HandBrake-0.10.2/ 0000775 0001752 0001752 00000000000 12535641641 014046 5 ustar handbrake handbrake HandBrake-0.10.2/version.txt 0000664 0001752 0001752 00000000533 12535641641 016275 0 ustar handbrake handbrake Path: 0.10.2
URL: svn://svn.handbrake.fr/HandBrake/tags/0.10.2
Relative URL: ^/tags/0.10.2
Repository Root: svn://svn.handbrake.fr/HandBrake
Repository UUID: b64f7644-9d1e-0410-96f1-a4d463321fa5
Revision: 7288
Node Kind: directory
Last Changed Author: sr55
Last Changed Rev: 7288
Last Changed Date: 2015-06-09 21:11:42 +0200 (mar., 09 juin 2015)
HandBrake-0.10.2/libhb/ 0000775 0001752 0001752 00000000000 12535641635 015131 5 ustar handbrake handbrake HandBrake-0.10.2/libhb/declpcm.c 0000664 0001752 0001752 00000032170 12463330511 016673 0 ustar handbrake handbrake /* declpcm.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
#include "audio_resample.h"
struct hb_work_private_s
{
hb_job_t *job;
uint32_t size; /* frame size in bytes */
uint32_t nchunks; /* number of samples pairs if paired */
uint32_t nsamples; /* frame size in samples */
uint32_t pos; /* buffer offset for next input data */
int64_t next_pts; /* pts for next output frame */
int64_t sequence;
/* the following is frame info for the frame we're currently accumulating */
uint64_t duration; /* frame duratin (in 90KHz ticks) */
uint32_t offset; /* where in buf frame starts */
uint32_t samplerate; /* sample rate in bits/sec */
uint8_t nchannels;
uint8_t sample_size; /* bits per sample */
uint8_t frame[HB_DVD_READ_BUFFER_SIZE*2];
uint8_t * data;
uint32_t alloc_size;
hb_audio_resample_t *resample;
};
static hb_buffer_t * Decode( hb_work_object_t * w );
static int declpcmInit( hb_work_object_t *, hb_job_t * );
static int declpcmWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
static void declpcmClose( hb_work_object_t * );
static int declpcmBSInfo( hb_work_object_t *, const hb_buffer_t *,
hb_work_info_t * );
hb_work_object_t hb_declpcm =
{
WORK_DECLPCM,
"LPCM decoder",
declpcmInit,
declpcmWork,
declpcmClose,
0,
declpcmBSInfo
};
static const int hdr2samplerate[] = { 48000, 96000, 44100, 32000 };
static const int hdr2samplesize[] = { 16, 20, 24, 16 };
static const uint64_t hdr2layout[] =
{
AV_CH_LAYOUT_MONO, AV_CH_LAYOUT_STEREO,
AV_CH_LAYOUT_2_1, AV_CH_LAYOUT_QUAD,
AV_CH_LAYOUT_5POINT0_BACK, AV_CH_LAYOUT_6POINT0_FRONT,
AV_CH_LAYOUT_6POINT1, AV_CH_LAYOUT_7POINT1,
};
static void lpcmInfo( hb_work_object_t *w, hb_buffer_t *in )
{
hb_work_private_t * pv = w->private_data;
/*
* LPCM packets have a 7 byte header (the substream id is stripped off
* before we get here so it's numbered -1 below)::
* byte -1 Substream id
* byte 0 Number of frames that begin in this packet
* (last frame may finish in next packet)
* byte 1,2 offset to first frame that begins in this packet (not including hdr)
* byte 3:
* bits 0-4 continuity counter (increments modulo 20)
* bit 5 reserved
* bit 6 audio mute on/off
* bit 7 audio emphasis on/off
* byte 4:
* bits 0-2 #channels - 1 (e.g., stereo = 1)
* bit 3 reserved
* bits 4-5 sample rate (0=48K,1=96K,2=44.1K,3=32K)
* bits 6-7 bits per sample (0=16 bit, 1=20 bit, 2=24 bit)
* byte 5 Dynamic range control (0x80 = off)
*
* The audio is viewed as "frames" of 150 90KHz ticks each (80 samples @ 48KHz).
* The frames are laid down continuously without regard to MPEG packet
* boundaries. E.g., for 48KHz stereo, the first packet will contain 6
* frames plus the start of the 7th, the second packet will contain the
* end of the 7th, 8-13 & the start of 14, etc. The frame structure is
* important because the PTS on the packet gives the time of the first
* frame that starts in the packet *NOT* the time of the first sample
* in the packet. Also samples get split across packet boundaries
* so we can't assume that we can consume all the data in one packet
* on every call to the work routine.
*/
pv->offset = ( ( in->data[1] << 8 ) | in->data[2] ) + 2;
if ( pv->offset >= HB_DVD_READ_BUFFER_SIZE )
{
hb_log( "declpcm: illegal frame offset %d", pv->offset );
pv->offset = 2; /*XXX*/
}
pv->nchannels = ( in->data[4] & 7 ) + 1;
pv->samplerate = hdr2samplerate[ ( in->data[4] >> 4 ) & 0x3 ];
pv->sample_size = hdr2samplesize[in->data[4] >> 6];
// 20 and 24 bit lpcm is always encoded in sample pairs. So take this
// into account when computing sizes.
int chunk_size = pv->sample_size / 8;
int samples_per_chunk = 1;
switch( pv->sample_size )
{
case 20:
chunk_size = 5;
samples_per_chunk = 2;
break;
case 24:
chunk_size = 6;
samples_per_chunk = 2;
break;
}
/*
* PCM frames have a constant duration (150 90KHz ticks).
* We need to convert that to the amount of data expected. It's the
* duration divided by the sample rate (to get #samples) times the number
* of channels times the bits per sample divided by 8 to get bytes.
* (we have to compute in bits because 20 bit samples are not an integral
* number of bytes). We do all the multiplies first then the divides to
* avoid truncation errors.
*/
/*
* Don't trust the number of frames given in the header. We've seen
* streams for which this is incorrect, and it can be computed.
* pv->duration = in->data[0] * 150;
*/
int chunks = ( in->size - pv->offset ) / chunk_size;
int samples = chunks * samples_per_chunk;
// Calculate number of frames that start in this packet
int frames = ( 90000 * samples / ( pv->samplerate * pv->nchannels ) +
149 ) / 150;
pv->duration = frames * 150;
pv->nchunks = ( pv->duration * pv->nchannels * pv->samplerate +
samples_per_chunk - 1 ) / ( 90000 * samples_per_chunk );
pv->nsamples = ( pv->duration * pv->samplerate ) / 90000;
pv->size = pv->nchunks * chunk_size;
pv->next_pts = in->s.start;
}
static int declpcmInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
pv->resample =
hb_audio_resample_init(AV_SAMPLE_FMT_FLT,
w->audio->config.out.mixdown,
w->audio->config.out.normalize_mix_level);
if (pv->resample == NULL)
{
hb_error("declpcmInit: hb_audio_resample_init() failed");
return 1;
}
return 0;
}
/*
* Convert DVD encapsulated LPCM to floating point PCM audio buffers.
* The amount of audio in a PCM frame is always <= the amount that will fit
* in a DVD block (2048 bytes) but the standard doesn't require that the audio
* frames line up with the DVD frames. Since audio frame boundaries are unrelated
* to DVD PES boundaries, this routine has to reconstruct then extract the audio
* frames. Because of the arbitrary alignment, it can output zero, one or two buf's.
*/
static int declpcmWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t *in = *buf_in;
hb_buffer_t *buf = NULL;
if ( in->size <= 0 )
{
/* EOF on input stream - send it downstream & say that we're done */
*buf_out = in;
*buf_in = NULL;
return HB_WORK_DONE;
}
pv->sequence = in->sequence;
/* if we have a frame to finish, add enough data from this buf to finish it */
if ( pv->size )
{
memcpy( pv->frame + pv->pos, in->data + 6, pv->size - pv->pos );
buf = Decode( w );
}
*buf_out = buf;
/* save the (rest of) data from this buf in our frame buffer */
lpcmInfo( w, in );
int off = pv->offset;
int amt = in->size - off;
pv->pos = amt;
memcpy( pv->frame, in->data + off, amt );
if ( amt >= pv->size )
{
if ( buf )
{
buf->next = Decode( w );
}
else
{
*buf_out = Decode( w );
}
pv->size = 0;
}
return HB_WORK_OK;
}
static hb_buffer_t *Decode( hb_work_object_t *w )
{
hb_work_private_t *pv = w->private_data;
hb_buffer_t *out;
if (pv->nsamples == 0)
return NULL;
int size = pv->nsamples * pv->nchannels * sizeof( float );
if (pv->alloc_size != size)
{
pv->data = realloc( pv->data, size );
pv->alloc_size = size;
}
float *odat = (float *)pv->data;
int count = pv->nchunks / pv->nchannels;
switch( pv->sample_size )
{
case 16: // 2 byte, big endian, signed (the right shift sign extends)
{
uint8_t *frm = pv->frame;
while ( count-- )
{
int cc;
for( cc = 0; cc < pv->nchannels; cc++ )
{
// Shifts below result in sign extension which gives
// us proper signed values. The final division adjusts
// the range to [-1.0 ... 1.0]
*odat++ = (float)( ( (int)( frm[0] << 24 ) >> 16 ) |
frm[1] ) / 32768.0;
frm += 2;
}
}
} break;
case 20:
{
// There will always be 2 groups of samples. A group is
// a collection of samples that spans all channels.
// The data for the samples is split. The first 2 msb
// bytes for all samples is encoded first, then the remaining
// lsb bits are encoded.
uint8_t *frm = pv->frame;
while ( count-- )
{
int gg, cc;
int shift = 4;
uint8_t *lsb = frm + 4 * pv->nchannels;
for( gg = 0; gg < 2; gg++ )
{
for( cc = 0; cc < pv->nchannels; cc++ )
{
// Shifts below result in sign extension which gives
// us proper signed values. The final division adjusts
// the range to [-1.0 ... 1.0]
*odat = (float)( ( (int)( frm[0] << 24 ) >> 12 ) |
( frm[1] << 4 ) |
( ( ( lsb[0] >> shift ) & 0x0f ) ) ) /
(16. * 32768.0);
odat++;
lsb += !shift;
shift ^= 4;
frm += 2;
}
}
frm = lsb;
}
} break;
case 24:
{
// There will always be 2 groups of samples. A group is
// a collection of samples that spans all channels.
// The data for the samples is split. The first 2 msb
// bytes for all samples is encoded first, then the remaining
// lsb bits are encoded.
uint8_t *frm = pv->frame;
while ( count-- )
{
int gg, cc;
uint8_t *lsb = frm + 4 * pv->nchannels;
for( gg = 0; gg < 2; gg++ )
{
for( cc = 0; cc < pv->nchannels; cc++ )
{
// Shifts below result in sign extension which gives
// us proper signed values. The final division adjusts
// the range to [-1.0 ... 1.0]
*odat++ = (float)( ( (int)( frm[0] << 24 ) >> 8 ) |
( frm[1] << 8 ) | lsb[0] ) /
(256. * 32768.0);
frm += 2;
lsb++;
}
}
frm = lsb;
}
} break;
}
hb_audio_resample_set_channel_layout(pv->resample,
hdr2layout[pv->nchannels - 1]);
if (hb_audio_resample_update(pv->resample))
{
hb_log("declpcm: hb_audio_resample_update() failed");
return NULL;
}
out = hb_audio_resample(pv->resample, &pv->data, pv->nsamples);
if (out != NULL)
{
out->s.start = pv->next_pts;
out->s.duration = pv->duration;
pv->next_pts += pv->duration;
out->s.stop = pv->next_pts;
}
return out;
}
static void declpcmClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
if ( pv )
{
hb_audio_resample_free(pv->resample);
free( pv->data );
free( pv );
w->private_data = 0;
}
}
static int declpcmBSInfo( hb_work_object_t *w, const hb_buffer_t *b,
hb_work_info_t *info )
{
int nchannels = ( b->data[4] & 7 ) + 1;
int sample_size = hdr2samplesize[b->data[4] >> 6];
int rate = hdr2samplerate[ ( b->data[4] >> 4 ) & 0x3 ];
int bitrate = rate * sample_size * nchannels;
int64_t duration = b->data[0] * 150;
memset( info, 0, sizeof(*info) );
info->name = "LPCM";
info->rate = rate;
info->rate_base = 1;
info->bitrate = bitrate;
info->flags = ( b->data[3] << 16 ) | ( b->data[4] << 8 ) | b->data[5];
info->matrix_encoding = AV_MATRIX_ENCODING_NONE;
info->channel_layout = hdr2layout[nchannels - 1];
info->channel_map = &hb_libav_chan_map;
info->samples_per_frame = ( duration * rate ) / 90000;
return 1;
}
HandBrake-0.10.2/libhb/denoise.c 0000664 0001752 0001752 00000027757 12265031673 016740 0 ustar handbrake handbrake /*
Copyright (c) 2003 Daniel Moreno
Copyright (c) 2012 Loren Merritt
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "hb.h"
#include "hbffmpeg.h"
#define HQDN3D_SPATIAL_LUMA_DEFAULT 4.0f
#define HQDN3D_SPATIAL_CHROMA_DEFAULT 3.0f
#define HQDN3D_TEMPORAL_LUMA_DEFAULT 6.0f
#define ABS(A) ( (A) > 0 ? (A) : -(A) )
#define MIN( a, b ) ( (a) > (b) ? (b) : (a) )
struct hb_filter_private_s
{
short hqdn3d_coef[6][512*16];
unsigned short * hqdn3d_line;
unsigned short * hqdn3d_frame[3];
};
static int hb_denoise_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_denoise_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_denoise_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_denoise =
{
.id = HB_FILTER_DENOISE,
.enforce_order = 1,
.name = "Denoise (hqdn3d)",
.settings = NULL,
.init = hb_denoise_init,
.work = hb_denoise_work,
.close = hb_denoise_close,
};
static void hqdn3d_precalc_coef( short * ct,
double dist25 )
{
int i;
double gamma, simil, c;
gamma = log( 0.25 ) / log( 1.0 - MIN(dist25,252.0)/255.0 - 0.00001 );
for( i = -255*16; i <= 255*16; i++ )
{
/* hqdn3d_lowpass_mul() truncates (not rounds) the diff, use +15/32 as midpoint */
double f = (i + 15.0/32.0) / 16.0;
simil = 1.0 - ABS(f) / 255.0;
c = pow(simil, gamma) * 256.0 * f;
ct[16*256+i] = (c<0) ? (c-0.5) : (c+0.5);
}
ct[0] = (dist25 != 0);
}
static inline unsigned int hqdn3d_lowpass_mul( int prev_mul,
int curr_mul,
short * coef )
{
int d = (prev_mul - curr_mul)>>4;
return curr_mul + coef[d];
}
static void hqdn3d_denoise_temporal( unsigned char * frame_src,
unsigned char * frame_dst,
unsigned short * frame_ant,
int w, int h,
short * temporal)
{
int x, y;
unsigned int tmp;
temporal += 0x1000;
for( y = 0; y < h; y++ )
{
for( x = 0; x < w; x++ )
{
frame_ant[x] = tmp = hqdn3d_lowpass_mul( frame_ant[x],
frame_src[x]<<8,
temporal );
frame_dst[x] = (tmp+0x7F)>>8;
}
frame_src += w;
frame_dst += w;
frame_ant += w;
}
}
static void hqdn3d_denoise_spatial( unsigned char * frame_src,
unsigned char * frame_dst,
unsigned short * line_ant,
unsigned short * frame_ant,
int w, int h,
short * spatial,
short * temporal )
{
int x, y;
unsigned int pixel_ant;
unsigned int tmp;
spatial += 0x1000;
temporal += 0x1000;
/* First line has no top neighbor. Only left one for each tmp and last frame */
pixel_ant = frame_src[0]<<8;
for ( x = 0; x < w; x++)
{
line_ant[x] = tmp = pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
frame_src[x]<<8,
spatial );
frame_ant[x] = tmp = hqdn3d_lowpass_mul( frame_ant[x],
tmp,
temporal );
frame_dst[x] = (tmp+0x7F)>>8;
}
for( y = 1; y < h; y++ )
{
frame_src += w;
frame_dst += w;
frame_ant += w;
pixel_ant = frame_src[0]<<8;
for ( x = 0; x < w-1; x++ )
{
line_ant[x] = tmp = hqdn3d_lowpass_mul( line_ant[x],
pixel_ant,
spatial );
pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
frame_src[x+1]<<8,
spatial );
frame_ant[x] = tmp = hqdn3d_lowpass_mul( frame_ant[x],
tmp,
temporal );
frame_dst[x] = (tmp+0x7F)>>8;
}
line_ant[x] = tmp = hqdn3d_lowpass_mul( line_ant[x],
pixel_ant,
spatial );
frame_ant[x] = tmp = hqdn3d_lowpass_mul( frame_ant[x],
tmp,
temporal );
frame_dst[x] = (tmp+0x7F)>>8;
}
}
static void hqdn3d_denoise( unsigned char * frame_src,
unsigned char * frame_dst,
unsigned short * line_ant,
unsigned short ** frame_ant_ptr,
int w,
int h,
short * spatial,
short * temporal )
{
int x, y;
unsigned short* frame_ant = (*frame_ant_ptr);
if( !frame_ant)
{
unsigned char * src = frame_src;
(*frame_ant_ptr) = frame_ant = malloc( w*h*sizeof(unsigned short) );
for ( y = 0; y < h; y++, frame_src += w, frame_ant += w )
{
for( x = 0; x < w; x++ )
{
frame_ant[x] = frame_src[x]<<8;
}
}
frame_src = src;
frame_ant = *frame_ant_ptr;
}
/* If no spatial coefficients, do temporal denoise only */
if( spatial[0] )
{
hqdn3d_denoise_spatial( frame_src,
frame_dst,
line_ant,
frame_ant,
w, h,
spatial,
temporal );
}
else
{
hqdn3d_denoise_temporal( frame_src,
frame_dst,
frame_ant,
w, h,
temporal);
}
}
static int hb_denoise_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( sizeof(struct hb_filter_private_s), 1 );
hb_filter_private_t * pv = filter->private_data;
double spatial_luma, spatial_chroma_b, spatial_chroma_r;
double temporal_luma, temporal_chroma_b, temporal_chroma_r;
if( filter->settings )
{
switch( sscanf( filter->settings, "%lf:%lf:%lf:%lf:%lf:%lf",
&spatial_luma, &spatial_chroma_b, &spatial_chroma_r,
&temporal_luma, &temporal_chroma_b, &temporal_chroma_r ) )
{
case 0:
spatial_luma = HQDN3D_SPATIAL_LUMA_DEFAULT;
spatial_chroma_b = HQDN3D_SPATIAL_CHROMA_DEFAULT;
spatial_chroma_r = spatial_chroma_b;
temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT;
temporal_chroma_b = temporal_luma *
spatial_chroma_b / spatial_luma;
temporal_chroma_r = temporal_chroma_b;
break;
case 1:
spatial_chroma_b = HQDN3D_SPATIAL_CHROMA_DEFAULT *
spatial_luma / HQDN3D_SPATIAL_LUMA_DEFAULT;
spatial_chroma_r = spatial_chroma_b;
temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT *
spatial_luma / HQDN3D_SPATIAL_LUMA_DEFAULT;
temporal_chroma_b = temporal_luma *
spatial_chroma_b / spatial_luma;
temporal_chroma_r = temporal_chroma_b;
break;
case 2:
spatial_chroma_r = spatial_chroma_b;
temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT *
spatial_luma / HQDN3D_SPATIAL_LUMA_DEFAULT;
temporal_chroma_b = temporal_luma *
spatial_chroma_b / spatial_luma;
temporal_chroma_r = temporal_chroma_b;
break;
case 3:
temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT *
spatial_luma / HQDN3D_SPATIAL_LUMA_DEFAULT;
temporal_chroma_b = temporal_luma *
spatial_chroma_b / spatial_luma;
temporal_chroma_r = temporal_chroma_b;
break;
case 4:
temporal_chroma_b = temporal_luma *
spatial_chroma_b / spatial_luma;
temporal_chroma_r = temporal_chroma_b;
break;
case 5:
temporal_chroma_r = temporal_chroma_b;
break;
}
}
hqdn3d_precalc_coef( pv->hqdn3d_coef[0], spatial_luma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[1], temporal_luma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[2], spatial_chroma_b );
hqdn3d_precalc_coef( pv->hqdn3d_coef[3], temporal_chroma_b );
hqdn3d_precalc_coef( pv->hqdn3d_coef[4], spatial_chroma_r );
hqdn3d_precalc_coef( pv->hqdn3d_coef[5], temporal_chroma_r );
return 0;
}
static void hb_denoise_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
if( pv->hqdn3d_line )
{
free( pv->hqdn3d_line );
pv->hqdn3d_line = NULL;
}
if( pv->hqdn3d_frame[0] )
{
free( pv->hqdn3d_frame[0] );
pv->hqdn3d_frame[0] = NULL;
}
if( pv->hqdn3d_frame[1] )
{
free( pv->hqdn3d_frame[1] );
pv->hqdn3d_frame[1] = NULL;
}
if( pv->hqdn3d_frame[2] )
{
free( pv->hqdn3d_frame[2] );
pv->hqdn3d_frame[2] = NULL;
}
free( pv );
filter->private_data = NULL;
}
static int hb_denoise_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in, * out;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
out = hb_video_buffer_init( in->f.width, in->f.height );
if( !pv->hqdn3d_line )
{
pv->hqdn3d_line = malloc( in->plane[0].stride * sizeof(unsigned short) );
}
int c, coef_index;
for ( c = 0; c < 3; c++ )
{
coef_index = c * 2;
hqdn3d_denoise( in->plane[c].data,
out->plane[c].data,
pv->hqdn3d_line,
&pv->hqdn3d_frame[c],
in->plane[c].stride,
in->plane[c].height,
pv->hqdn3d_coef[coef_index],
pv->hqdn3d_coef[coef_index+1] );
}
out->s = in->s;
hb_buffer_move_subs( out, in );
*buf_out = out;
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/eedi2.h 0000664 0001752 0001752 00000012021 12463330511 016252 0 ustar handbrake handbrake /* eedi2.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
// Used to order a sequeunce of metrics for median filtering
void eedi2_sort_metrics( int *order, const int length );
// Aping some Windows API funcctions AviSynth seems to like
// Taken from here: http://www.gidforums.com/t-8543.html
void *eedi2_aligned_malloc(size_t size, size_t align_size);
void eedi2_aligned_free(void *ptr);
// Copies bitmaps
void eedi2_bit_blit( uint8_t * dstp, int dst_pitch, const uint8_t * srcp, int src_pitch,
int row_size, int height );
// Sets up the initial field-sized bitmap EEDI2 interpolates from
void eedi2_fill_half_height_buffer_plane( uint8_t * src, uint8_t * dst, int pitch, int height );
// Simple line doubler
void eedi2_upscale_by_2( uint8_t * srcp, uint8_t * dstp, int height, int pitch );
// Finds places where vertically adjacent pixels abruptly change intensity
void eedi2_build_edge_mask( uint8_t * dstp, int dst_pitch, uint8_t *srcp, int src_pitch,
int mthresh, int lthresh, int vthresh, int height, int width );
// Expands and smooths out the edge mask by considering a pixel
// to be masked if >= dilation threshold adjacent pixels are masked.
void eedi2_dilate_edge_mask( uint8_t *mskp, int msk_pitch, uint8_t *dstp, int dst_pitch,
int dstr, int height, int width );
// Contracts the edge mask by considering a pixel to be masked
// only if > erosion threshold adjacent pixels are masked
void eedi2_erode_edge_mask( uint8_t *mskp, int msk_pitch, uint8_t *dstp, int dst_pitch,
int estr, int height, int width );
// Smooths out horizontally aligned holes in the mask
// If none of the 6 horizontally adjacent pixels are masked,
// don't consider the current pixel masked. If there are any
// masked on both sides, consider the current pixel masked.
void eedi2_remove_small_gaps( uint8_t * mskp, int msk_pitch, uint8_t * dstp, int dst_pitch,
int height, int width );
// Spatial vectors. Looks at maximum_search_distance surrounding pixels
// to guess which angle edges follow. This is EEDI2's timesink, and can be
// thought of as YADIF_CHECK on steroids. Both find edge directions.
void eedi2_calc_directions( const int plane, uint8_t * mskp, int msk_pitch, uint8_t * srcp, int src_pitch,
uint8_t * dstp, int dst_pitch, int maxd, int nt, int height, int width );
void eedi2_filter_map( uint8_t *mskp, int msk_pitch, uint8_t *dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int height, int width );
void eedi2_filter_dir_map( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, int height, int width );
void eedi2_expand_dir_map( uint8_t * mskp, int msk_pitch, uint8_t *dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, int height, int width );
void eedi2_mark_directions_2x( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, int tff, int height, int width );
void eedi2_filter_dir_map_2x( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, int field, int height, int width );
void eedi2_expand_dir_map_2x( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, int field, int height, int width );
void eedi2_fill_gaps_2x( uint8_t *mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, int field, int height, int width );
void eedi2_interpolate_lattice( const int plane, uint8_t * dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, uint8_t * omskp, int omsk_pitch, int field, int nt,
int height, int width );
void eedi2_post_process( uint8_t * nmskp, int nmsk_pitch, uint8_t * omskp, int omsk_pitch, uint8_t * dstp,
int src_pitch, int field, int height, int width );
void eedi2_gaussian_blur1( uint8_t * src, int src_pitch, uint8_t * tmp, int tmp_pitch, uint8_t * dst,
int dst_pitch, int height, int width );
void eedi2_gaussian_blur_sqrt2( int *src, int *tmp, int *dst, const int pitch,
const int height, const int width );
void eedi2_calc_derivatives( uint8_t *srcp, int src_pitch, int height, int width,
int *x2, int *y2, int *xy);
void eedi2_post_process_corner( int *x2, int *y2, int *xy, const int pitch, uint8_t * mskp, int msk_pitch,
uint8_t * dstp, int dst_pitch, int height, int width, int field );
HandBrake-0.10.2/libhb/decsrtsub.c 0000664 0001752 0001752 00000052072 12463330511 017265 0 ustar handbrake handbrake /* decsrtsub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include
#include
#include
#include
#include "hb.h"
#include "colormap.h"
#include "decsrtsub.h"
struct start_and_end {
unsigned long start, end;
};
enum
{
k_state_inEntry,
k_state_inEntry_or_new,
k_state_potential_new_entry,
k_state_timecode,
};
typedef struct srt_entry_s {
long offset, duration;
long start, stop;
char text[1024];
int pos;
} srt_entry_t;
/*
* Store all context in the work private struct,
*/
struct hb_work_private_s
{
hb_job_t * job;
FILE * file;
char buf[1024];
int pos;
int end;
char utf8_buf[2048];
int utf8_pos;
int utf8_end;
int utf8_bom_skipped;
unsigned long current_time;
unsigned long number_of_entries;
unsigned long last_entry_number;
unsigned long current_state;
srt_entry_t current_entry;
iconv_t *iconv_context;
hb_subtitle_t *subtitle;
uint64_t start_time; // In HB time
uint64_t stop_time; // In HB time
int line; // SSA line number
};
static char* srt_markup_to_ssa(char *srt, int *len)
{
char terminator;
char color[40];
uint32_t rgb;
*len = 0;
if (srt[0] != '<' && srt[0] != '{')
return NULL;
if (srt[0] == '<')
terminator = '>';
else
terminator = '}';
if (srt[1] == 'i' && srt[2] == terminator)
{
*len = 3;
return hb_strdup_printf("{\\i1}");
}
else if (srt[1] == 'b' && srt[2] == terminator)
{
*len = 3;
return hb_strdup_printf("{\\b1}");
}
else if (srt[1] == 'u' && srt[2] == terminator)
{
*len = 3;
return hb_strdup_printf("{\\u1}");
}
else if (srt[1] == '/' && srt[2] == 'i' && srt[3] == terminator)
{
*len = 4;
return hb_strdup_printf("{\\i0}");
}
else if (srt[1] == '/' && srt[2] == 'b' && srt[3] == terminator)
{
*len = 4;
return hb_strdup_printf("{\\b0}");
}
else if (srt[1] == '/' && srt[2] == 'u' && srt[3] == terminator)
{
*len = 4;
return hb_strdup_printf("{\\u0}");
}
else if (srt[0] == '<' && !strncmp(srt + 1, "font", 4))
{
int match;
match = sscanf(srt + 1, "font color=\"%39[^\"]\">", color);
if (match != 1)
{
return NULL;
}
while (srt[*len] != '>') (*len)++;
(*len)++;
if (color[0] == '#')
rgb = strtol(color + 1, NULL, 16);
else
rgb = hb_rgb_lookup_by_name(color);
return hb_strdup_printf("{\\1c&H%X&}", HB_RGB_TO_BGR(rgb));
}
else if (srt[0] == '<' && srt[1] == '/' && !strncmp(srt + 2, "font", 4) &&
srt[6] == '>')
{
*len = 7;
return hb_strdup_printf("{\\1c&HFFFFFF&}");
}
return NULL;
}
void hb_srt_to_ssa(hb_buffer_t *sub_in, int line)
{
if (sub_in->size == 0)
return;
// null terminate input if not already terminated
if (sub_in->data[sub_in->size-1] != 0)
{
hb_buffer_realloc(sub_in, ++sub_in->size);
sub_in->data[sub_in->size - 1] = 0;
}
char * srt = (char*)sub_in->data;
// SSA markup expands a little over SRT, so allocate a bit of extra
// space. More will be realloc'd if needed.
hb_buffer_t * sub = hb_buffer_init(sub_in->size + 80);
char * ssa, *ssa_markup;
int skip, len, pos, ii;
// Exchange data between input sub and new ssa_sub
// After this, sub_in contains ssa data
hb_buffer_swap_copy(sub_in, sub);
ssa = (char*)sub_in->data;
sprintf((char*)sub_in->data, "%d,,Default,,0,0,0,,", line);
pos = strlen((char*)sub_in->data);
ii = 0;
while (srt[ii] != '\0')
{
if ((ssa_markup = srt_markup_to_ssa(srt + ii, &skip)) != NULL)
{
len = strlen(ssa_markup);
hb_buffer_realloc(sub_in, pos + len + 1);
// After realloc, sub_in->data may change
ssa = (char*)sub_in->data;
sprintf(ssa + pos, "%s", ssa_markup);
free(ssa_markup);
pos += len;
ii += skip;
}
else
{
hb_buffer_realloc(sub_in, pos + 4);
// After realloc, sub_in->data may change
ssa = (char*)sub_in->data;
if (srt[ii] == '\r')
{
ssa[pos++] = '\\';
ssa[pos++] = 'N';
ii++;
if (srt[ii] == '\n')
{
ii++;
}
}
else if (srt[ii] == '\n')
{
ssa[pos++] = '\\';
ssa[pos++] = 'N';
ii++;
}
else
{
ssa[pos++] = srt[ii++];
}
}
}
ssa[pos] = '\0';
sub_in->size = pos + 1;
hb_buffer_close(&sub);
}
static int
read_time_from_string( const char* timeString, struct start_and_end *result )
{
// for ex. 00:00:15,248 --> 00:00:16,545
long houres1, minutes1, seconds1, milliseconds1,
houres2, minutes2, seconds2, milliseconds2;
int scanned;
scanned = sscanf(timeString, "%ld:%ld:%ld,%ld --> %ld:%ld:%ld,%ld\n",
&houres1, &minutes1, &seconds1, &milliseconds1,
&houres2, &minutes2, &seconds2, &milliseconds2);
if (scanned != 8)
{
return 0;
}
result->start =
milliseconds1 + seconds1*1000 + minutes1*60*1000 + houres1*60*60*1000;
result->end =
milliseconds2 + seconds2*1000 + minutes2*60*1000 + houres2*60*60*1000;
return 1;
}
static int utf8_fill( hb_work_private_t * pv )
{
int bytes, conversion = 0;
size_t out_size;
/* Align utf8 data to beginning of the buffer so that we can
* fill the buffer to its maximum */
memmove( pv->utf8_buf, pv->utf8_buf + pv->utf8_pos, pv->utf8_end - pv->utf8_pos );
pv->utf8_end -= pv->utf8_pos;
pv->utf8_pos = 0;
out_size = 2048 - pv->utf8_end;
while( out_size )
{
char *p, *q;
size_t in_size, retval;
if( pv->end == pv->pos )
{
bytes = fread( pv->buf, 1, 1024, pv->file );
pv->pos = 0;
pv->end = bytes;
if( bytes == 0 )
{
if( conversion )
return 1;
else
return 0;
}
}
p = pv->buf + pv->pos;
q = pv->utf8_buf + pv->utf8_end;
in_size = pv->end - pv->pos;
retval = iconv( pv->iconv_context, &p, &in_size, &q, &out_size);
if( q != pv->utf8_buf + pv->utf8_pos )
conversion = 1;
pv->utf8_end = q - pv->utf8_buf;
pv->pos = p - pv->buf;
if ( !pv->utf8_bom_skipped )
{
uint8_t *buf = (uint8_t*)pv->utf8_buf;
if (buf[0] == 0xef && buf[1] == 0xbb && buf[2] == 0xbf)
{
pv->utf8_pos = 3;
}
pv->utf8_bom_skipped = 1;
}
if( ( retval == -1 ) && ( errno == EINVAL ) )
{
/* Incomplete multibyte sequence, read more data */
memmove( pv->buf, p, pv->end - pv->pos );
pv->end -= pv->pos;
pv->pos = 0;
bytes = fread( pv->buf + pv->end, 1, 1024 - pv->end, pv->file );
if( bytes == 0 )
{
if( !conversion )
return 0;
else
return 1;
}
pv->end += bytes;
} else if ( ( retval == -1 ) && ( errno == EILSEQ ) )
{
hb_error( "Invalid byte for codeset in input, discard byte" );
/* Try the next byte of the input */
pv->pos++;
} else if ( ( retval == -1 ) && ( errno == E2BIG ) )
{
/* buffer full */
return conversion;
}
}
return 1;
}
static int get_line( hb_work_private_t * pv, char *buf, int size )
{
int i;
char c;
// clear remnants of the previous line before progessing a new one
memset(buf, '\0', size);
/* Find newline in converted UTF-8 buffer */
for( i = 0; i < size - 1; i++ )
{
if( pv->utf8_pos >= pv->utf8_end )
{
if( !utf8_fill( pv ) )
{
if( i )
return 1;
else
return 0;
}
}
c = pv->utf8_buf[pv->utf8_pos++];
if( c == '\n' )
{
buf[i] = '\n';
buf[i+1] = '\0';
return 1;
}
buf[i] = c;
}
buf[0] = '\0';
return 1;
}
/*
* Read the SRT file and put the entries into the subtitle fifo for all to read
*/
static hb_buffer_t *srt_read( hb_work_private_t *pv )
{
char line_buffer[1024];
int reprocess = 0, resync = 0;
if( !pv->file )
{
return NULL;
}
while( reprocess || get_line( pv, line_buffer, sizeof( line_buffer ) ) )
{
reprocess = 0;
switch (pv->current_state)
{
case k_state_timecode:
{
struct start_and_end timing;
int result;
result = read_time_from_string( line_buffer, &timing );
if (!result)
{
resync = 1;
pv->current_state = k_state_potential_new_entry;
continue;
}
pv->current_entry.duration = timing.end - timing.start;
pv->current_entry.offset = timing.start - pv->current_time;
pv->current_time = timing.end;
pv->current_entry.start = timing.start;
pv->current_entry.stop = timing.end;
pv->current_state = k_state_inEntry;
continue;
}
case k_state_inEntry_or_new:
{
char *endpoint;
/*
* Is this really new next entry begin?
* Look for entry number.
*/
strtol(line_buffer, &endpoint, 10);
if (endpoint == line_buffer ||
(endpoint && *endpoint != '\n' && *endpoint != '\r'))
{
/*
* Doesn't resemble an entry number
* must still be in an entry
*/
if (!resync)
{
reprocess = 1;
pv->current_state = k_state_inEntry;
}
continue;
}
reprocess = 1;
pv->current_state = k_state_potential_new_entry;
break;
}
case k_state_inEntry:
{
char *q;
int size, len;
// If the current line is empty, we assume this is the
// seperation betwene two entries. In case we are wrong,
// the mistake is corrected in the next state.
if (strcmp(line_buffer, "\n") == 0 || strcmp(line_buffer, "\r\n") == 0) {
pv->current_state = k_state_potential_new_entry;
continue;
}
q = pv->current_entry.text + pv->current_entry.pos;
len = strlen( line_buffer );
size = MIN(1024 - pv->current_entry.pos - 1, len );
memcpy(q, line_buffer, size);
pv->current_entry.pos += size;
pv->current_entry.text[pv->current_entry.pos] = '\0';
break;
}
case k_state_potential_new_entry:
{
char *endpoint;
long entry_number;
hb_buffer_t *buffer = NULL;
/*
* Is this really new next entry begin?
*/
entry_number = strtol(line_buffer, &endpoint, 10);
if (!resync && (*line_buffer == '\n' || *line_buffer == '\r'))
{
/*
* Well.. looks like we are in the wrong mode.. lets add the
* newline we misinterpreted...
*/
strncat(pv->current_entry.text, " ", 1024);
pv->current_state = k_state_inEntry_or_new;
continue;
}
if (endpoint == line_buffer ||
(endpoint && *endpoint != '\n' && *endpoint != '\r'))
{
/*
* Well.. looks like we are in the wrong mode.. lets add the
* line we misinterpreted...
*/
if (!resync)
{
reprocess = 1;
pv->current_state = k_state_inEntry;
}
continue;
}
/*
* We found the next entry - or a really rare error condition
*/
pv->last_entry_number = entry_number;
resync = 0;
if (*pv->current_entry.text != '\0')
{
long length;
char *p, *q;
int line = 1;
uint64_t start_time = ( pv->current_entry.start +
pv->subtitle->config.offset ) * 90;
uint64_t stop_time = ( pv->current_entry.stop +
pv->subtitle->config.offset ) * 90;
if( !( start_time > pv->start_time && stop_time < pv->stop_time ) )
{
hb_deep_log( 3, "Discarding SRT at time start %"PRId64", stop %"PRId64, start_time, stop_time);
memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
++(pv->number_of_entries);
pv->current_state = k_state_timecode;
continue;
}
length = strlen( pv->current_entry.text );
for (q = p = pv->current_entry.text; *p != '\0'; p++)
{
if (*p == '\n' || *p == '\r')
{
if (*(p + 1) == '\n' || *(p + 1) == '\r' ||
*(p + 1) == '\0')
{
// followed by line break or last character, skip it
length--;
continue;
}
else if (line == 1)
{
// replace '\r' with '\n'
*q = '\n';
line = 2;
}
else
{
// all subtitles on two lines tops
// replace line breaks with spaces
*q = ' ';
}
q++;
}
else
{
*q = *p;
q++;
}
}
*q = '\0';
buffer = hb_buffer_init( length + 1 );
if( buffer )
{
buffer->s.start = start_time - pv->start_time;
buffer->s.stop = stop_time - pv->start_time;
memcpy( buffer->data, pv->current_entry.text, length + 1 );
}
}
memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
++(pv->number_of_entries);
pv->current_state = k_state_timecode;
if( buffer )
{
return buffer;
}
continue;
}
}
}
hb_buffer_t *buffer = NULL;
if (*pv->current_entry.text != '\0')
{
long length;
char *p, *q;
int line = 1;
uint64_t start_time = ( pv->current_entry.start +
pv->subtitle->config.offset ) * 90;
uint64_t stop_time = ( pv->current_entry.stop +
pv->subtitle->config.offset ) * 90;
if( !( start_time > pv->start_time && stop_time < pv->stop_time ) )
{
hb_deep_log( 3, "Discarding SRT at time start %"PRId64", stop %"PRId64, start_time, stop_time);
memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
return NULL;
}
length = strlen( pv->current_entry.text );
for (q = p = pv->current_entry.text; *p != '\0'; p++)
{
if (*p == '\n' || *p == '\r')
{
if (*(p + 1) == '\n' || *(p + 1) == '\r' || *(p + 1) == '\0')
{
// followed by line break or last character, skip it
length--;
continue;
}
else if (line == 1)
{
// replace '\r' with '\n'
*q = '\n';
line = 2;
}
else
{
// all subtitles on two lines tops
// replace line breaks with spaces
*q = ' ';
}
q++;
}
else
{
*q = *p;
q++;
}
}
*q = '\0';
buffer = hb_buffer_init( length + 1 );
if( buffer )
{
buffer->s.start = start_time - pv->start_time;
buffer->s.stop = stop_time - pv->start_time;
memcpy( buffer->data, pv->current_entry.text, length + 1 );
}
}
memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
if( buffer )
{
return buffer;
}
return NULL;
}
static int decsrtInit( hb_work_object_t * w, hb_job_t * job )
{
int retval = 1;
hb_work_private_t * pv;
hb_buffer_t *buffer;
int i;
hb_chapter_t * chapter;
pv = calloc( 1, sizeof( hb_work_private_t ) );
if( pv )
{
w->private_data = pv;
pv->job = job;
buffer = hb_buffer_init( 0 );
hb_fifo_push( w->fifo_in, buffer);
pv->current_state = k_state_potential_new_entry;
pv->number_of_entries = 0;
pv->last_entry_number = 0;
pv->current_time = 0;
pv->subtitle = w->subtitle;
/*
* Figure out the start and stop times from teh chapters being
* encoded - drop subtitle not in this range.
*/
pv->start_time = 0;
for( i = 1; i < job->chapter_start; ++i )
{
chapter = hb_list_item( job->list_chapter, i - 1 );
if( chapter )
{
pv->start_time += chapter->duration;
} else {
hb_error( "Could not locate chapter %d for SRT start time", i );
retval = 0;
}
}
pv->stop_time = pv->start_time;
for( i = job->chapter_start; i <= job->chapter_end; ++i )
{
chapter = hb_list_item( job->list_chapter, i - 1 );
if( chapter )
{
pv->stop_time += chapter->duration;
} else {
hb_error( "Could not locate chapter %d for SRT start time", i );
retval = 0;
}
}
hb_deep_log( 3, "SRT Start time %"PRId64", stop time %"PRId64, pv->start_time, pv->stop_time);
pv->iconv_context = iconv_open( "utf-8", pv->subtitle->config.src_codeset );
if( pv->iconv_context == (iconv_t) -1 )
{
hb_error("Could not open the iconv library with those file formats\n");
} else {
memset( &pv->current_entry, 0, sizeof( srt_entry_t ) );
pv->file = hb_fopen(w->subtitle->config.src_filename, "r");
if( !pv->file )
{
hb_error("Could not open the SRT subtitle file '%s'\n",
w->subtitle->config.src_filename);
} else {
retval = 0;
}
}
}
if (!retval)
{
// Generate generic SSA Script Info.
int height = job->title->height - job->crop[0] - job->crop[1];
int width = job->title->width - job->crop[2] - job->crop[3];
hb_subtitle_add_ssa_header(w->subtitle, width, height);
}
return retval;
}
static int decsrtWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * out = NULL;
out = srt_read( pv );
if( out )
{
hb_srt_to_ssa(out, ++pv->line);
/*
* Keep a buffer in our input fifo so that we get run.
*/
hb_fifo_push( w->fifo_in, in);
*buf_in = NULL;
*buf_out = out;
} else {
*buf_out = NULL;
return HB_WORK_OK;
}
return HB_WORK_OK;
}
static void decsrtClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
fclose( pv->file );
iconv_close(pv->iconv_context);
free( w->private_data );
}
hb_work_object_t hb_decsrtsub =
{
WORK_DECSRTSUB,
"SRT Subtitle Decoder",
decsrtInit,
decsrtWork,
decsrtClose
};
HandBrake-0.10.2/libhb/qsv_memory.c 0000664 0001752 0001752 00000011124 12220306351 017455 0 ustar handbrake handbrake /* ********************************************************************* *\
Copyright (C) 2013 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\* ********************************************************************* */
#ifdef USE_QSV
#include "hb.h"
#include "hbffmpeg.h"
#include "qsv_memory.h"
int qsv_nv12_to_yuv420(struct SwsContext* sws_context,hb_buffer_t* dst, mfxFrameSurface1* src, mfxCoreInterface *core){
int ret = 0;
int i,j;
int in_pitch = src->Data.Pitch;
int w = AV_QSV_ALIGN16(src->Info.Width);
int h = (MFX_PICSTRUCT_PROGRESSIVE == src->Info.PicStruct) ? AV_QSV_ALIGN16(src->Info.Height) : AV_QSV_ALIGN32(src->Info.Height);
uint8_t *in_luma = 0;
uint8_t *in_chroma = 0;
static int copyframe_in_use = 1;
mfxStatus sts = MFX_ERR_NONE;
mfxFrameSurface1 accel_dst;
if (copyframe_in_use)
{
accel_dst.Info.FourCC = src->Info.FourCC;
accel_dst.Info.CropH = src->Info.CropH;
accel_dst.Info.CropW = src->Info.CropW;
accel_dst.Info.CropY = src->Info.CropY;
accel_dst.Info.CropX = src->Info.CropX;
accel_dst.Info.Width = w;
accel_dst.Info.Height = h;
accel_dst.Data.Pitch = src->Data.Pitch;
accel_dst.Data.Y = calloc( 1, in_pitch*h );
accel_dst.Data.VU = calloc( 1, in_pitch*h/2 );
sts = core->CopyFrame(core->pthis, &accel_dst, src);
if (sts < MFX_ERR_NONE)
{
free(accel_dst.Data.Y);
free(accel_dst.Data.VU);
copyframe_in_use = 0;
}
else
{
in_luma = accel_dst.Data.Y + accel_dst.Info.CropY * in_pitch + accel_dst.Info.CropX;
in_chroma = accel_dst.Data.VU + accel_dst.Info.CropY / 2 * in_pitch + accel_dst.Info.CropX;
}
}
if (!copyframe_in_use)
{
in_luma = src->Data.Y + src->Info.CropY * in_pitch + src->Info.CropX;
in_chroma = src->Data.VU + src->Info.CropY / 2 * in_pitch + src->Info.CropX;
}
hb_video_buffer_realloc( dst, w, h );
uint8_t *srcs[] = { in_luma, in_chroma };
int srcs_stride[] = { in_pitch, in_pitch };
uint8_t *dsts[] = { dst->plane[0].data, dst->plane[1].data, dst->plane[2].data };
int dsts_stride[] = { dst->plane[0].stride, dst->plane[1].stride, dst->plane[2].stride };
ret = sws_scale(sws_context, srcs, srcs_stride, 0, h, dsts, dsts_stride );
if (copyframe_in_use)
{
free(accel_dst.Data.Y);
free(accel_dst.Data.VU);
}
return ret;
}
int qsv_yuv420_to_nv12(struct SwsContext* sws_context,mfxFrameSurface1* dst, hb_buffer_t* src){
int ret = 0;
int w = src->plane[0].width;
int h = src->plane[0].height;
int out_pitch = dst->Data.Pitch;
uint8_t *out_luma = dst->Data.Y;
uint8_t *out_chroma = dst->Data.VU;
uint8_t *srcs[] = { src->plane[0].data, src->plane[1].data, src->plane[2].data };
int srcs_stride[] = { src->plane[0].stride, src->plane[1].stride, src->plane[2].stride };
uint8_t *dsts[] = { out_luma, out_chroma };
int dsts_stride[] = { out_pitch, out_pitch };
ret = sws_scale(sws_context, srcs, srcs_stride, 0, h, dsts, dsts_stride );
return ret;
}
#endif // USE_QSV
HandBrake-0.10.2/libhb/vfr.c 0000664 0001752 0001752 00000054512 12463330511 016065 0 ustar handbrake handbrake /* vfr.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
struct hb_filter_private_s
{
hb_job_t * job;
int cfr;
int input_vrate;
int input_vrate_base;
int vrate;
int vrate_base;
hb_fifo_t * delay_queue;
int dropped_frames;
int extended_frames;
uint64_t last_start[4];
uint64_t last_stop[4];
uint64_t lost_time[4];
uint64_t total_lost_time;
uint64_t total_gained_time;
int count_frames; // frames output so far
double frame_rate; // 90KHz ticks per frame (for CFR/PFR)
uint64_t out_last_stop; // where last frame ended (for CFR/PFR)
int drops; // frames dropped (for CFR/PFR)
int dups; // frames duped (for CFR/PFR)
// Duplicate frame detection members
float max_metric; // highest motion metric since
// last output frame
float frame_metric; // motion metric of last frame
float out_metric; // motion metric of last output frame
int sync_parity;
unsigned gamma_lut[256];
};
static int hb_vfr_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_vfr_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_vfr_close( hb_filter_object_t * filter );
static int hb_vfr_info( hb_filter_object_t * filter, hb_filter_info_t * info );
hb_filter_object_t hb_filter_vfr =
{
.id = HB_FILTER_VFR,
.enforce_order = 1,
.name = "Framerate Shaper",
.settings = NULL,
.init = hb_vfr_init,
.work = hb_vfr_work,
.close = hb_vfr_close,
.info = hb_vfr_info,
};
// Create gamma lookup table.
// Note that we are creating a scaled integer lookup table that will
// not cause overflows in sse_block16() below. This results in
// small values being truncated to 0 which is ok for this usage.
static void build_gamma_lut( hb_filter_private_t * pv )
{
int i;
for( i = 0; i < 256; i++ )
{
pv->gamma_lut[i] = 4095 * pow( ( (float)i / (float)255 ), 2.2f );
}
}
// insert buffer 'succ' after buffer chain element 'pred'.
// caller must guarantee that 'pred' and 'succ' are non-null.
static hb_buffer_t *insert_buffer_in_chain(
hb_buffer_t *pred,
hb_buffer_t *succ )
{
succ->next = pred->next;
pred->next = succ;
return succ;
}
#define DUP_THRESH_SSE 5.0
// Compute ths sum of squared errors for a 16x16 block
// Gamma adjusts pixel values so that less visible diffreences
// count less.
static inline unsigned sse_block16( hb_filter_private_t *pv, uint8_t *a, uint8_t *b, int stride )
{
int x, y;
unsigned sum = 0;
int diff;
unsigned *g = pv->gamma_lut;
for( y = 0; y < 16; y++ )
{
for( x = 0; x < 16; x++ )
{
diff = g[a[x]] - g[b[x]];
sum += diff * diff;
}
a += stride;
b += stride;
}
return sum;
}
// Sum of squared errors. Computes and sums the SSEs for all
// 16x16 blocks in the images. Only checks the Y component.
static float motion_metric( hb_filter_private_t * pv, hb_buffer_t * a, hb_buffer_t * b )
{
int bw = a->f.width / 16;
int bh = a->f.height / 16;
int stride = a->plane[0].stride;
uint8_t * pa = a->plane[0].data;
uint8_t * pb = b->plane[0].data;
int x, y;
uint64_t sum = 0;
for( y = 0; y < bh; y++ )
{
for( x = 0; x < bw; x++ )
{
sum += sse_block16( pv, pa + y * 16 * stride + x * 16,
pb + y * 16 * stride + x * 16, stride );
}
}
return (float)sum / ( a->f.width * a->f.height );;
}
// This section of the code implements video frame rate control.
// Since filters are allowed to duplicate and drop frames (which
// changes the timing), this has to be the last thing done in render.
//
// There are three options, selected by the value of cfr:
// 0 - Variable Frame Rate (VFR) or 'same as source': frame times
// are left alone
// 1 - Constant Frame Rate (CFR): Frame timings are adjusted so that all
// frames are exactly vrate_base ticks apart. Frames are dropped
// or duplicated if necessary to maintain this spacing.
// 2 - Peak Frame Rate (PFR): vrate_base is treated as the peak
// average frame rate. I.e., the average frame rate (current frame
// end time divided by number of frames so far) is never allowed to be
// greater than vrate_base and frames are dropped if necessary
// to keep the average under this value. Other than those drops, frame
// times are left alone.
//
static void adjust_frame_rate( hb_filter_private_t *pv, hb_buffer_t **buf_out )
{
hb_buffer_t *out = *buf_out;
if ( out && out->size > 0 )
{
if ( pv->cfr == 0 )
{
++pv->count_frames;
pv->out_last_stop = out->s.stop;
return;
}
// compute where this frame would stop if the frame rate were constant
// (this is our target stopping time for CFR and earliest possible
// stopping time for PFR).
double cfr_stop = pv->frame_rate * ( pv->count_frames + 1 );
hb_buffer_t * next = hb_fifo_see( pv->delay_queue );
float next_metric = 0;
if( next )
next_metric = motion_metric( pv, out, next );
if( pv->out_last_stop >= out->s.stop )
{
++pv->drops;
hb_buffer_close( buf_out );
pv->frame_metric = next_metric;
if( next_metric > pv->max_metric )
pv->max_metric = next_metric;
return;
}
if( out->s.start <= pv->out_last_stop &&
out->s.stop > pv->out_last_stop &&
next && next->s.stop < cfr_stop )
{
// This frame starts before the end of the last output
// frame and ends after the end of the last output
// frame (i.e. it straddles it). Also the next frame
// ends before the end of the next output frame. If the
// next frame is not a duplicate, and we haven't seen
// a changed frame since the last output frame,
// then drop this frame.
//
// This causes us to sync to the pattern of progressive
// 23.976 fps content that has been upsampled to
// progressive 59.94 fps.
if( pv->out_metric > pv->max_metric &&
next_metric > pv->max_metric )
{
// Pattern: N R R N
// o c n
// N == new frame
// R == repeat frame
// o == last output frame
// c == current frame
// n == next frame
// We haven't seen a frame change since the last output
// frame and the next frame changes. Use the next frame,
// drop this one.
++pv->drops;
pv->frame_metric = next_metric;
pv->max_metric = next_metric;
pv->sync_parity = 1;
hb_buffer_close( buf_out );
return;
}
else if( pv->sync_parity &&
pv->out_metric < pv->max_metric &&
pv->max_metric > pv->frame_metric &&
pv->frame_metric < next_metric )
{
// Pattern: R N R N
// o c n
// N == new frame
// R == repeat frame
// o == last output frame
// c == current frame
// n == next frame
// If we see this pattern, we must not use the next
// frame when straddling the current frame.
pv->sync_parity = 0;
}
else if( pv->sync_parity )
{
// The pattern is indeterminate. Continue dropping
// frames on the same schedule
++pv->drops;
pv->frame_metric = next_metric;
pv->max_metric = next_metric;
pv->sync_parity = 1;
hb_buffer_close( buf_out );
return;
}
}
// this frame has to start where the last one stopped.
out->s.start = pv->out_last_stop;
pv->out_metric = pv->frame_metric;
pv->frame_metric = next_metric;
pv->max_metric = next_metric;
// at this point we know that this frame doesn't push the average
// rate over the limit so we just pass it on for PFR. For CFR we're
// going to return it (with its start & stop times modified) and
// we may have to dup it.
++pv->count_frames;
if ( pv->cfr > 1 )
{
// PFR - we're going to keep the frame but may need to
// adjust it's stop time to meet the average rate constraint.
if ( out->s.stop <= cfr_stop )
{
out->s.stop = cfr_stop;
}
pv->out_last_stop = out->s.stop;
}
else
{
// we're doing CFR so we have to either trim some time from a
// buffer that ends too far in the future or, if the buffer is
// two or more frame times long, split it into multiple pieces,
// each of which is a frame time long.
double excess_dur = (double)out->s.stop - cfr_stop;
out->s.stop = cfr_stop;
pv->out_last_stop = out->s.stop;
for ( ; excess_dur >= pv->frame_rate; excess_dur -= pv->frame_rate )
{
/* next frame too far ahead - dup current frame */
hb_buffer_t *dup = hb_buffer_dup( out );
dup->s.new_chap = 0;
dup->s.start = cfr_stop;
cfr_stop += pv->frame_rate;
dup->s.stop = cfr_stop;
pv->out_last_stop = dup->s.stop;
out = insert_buffer_in_chain( out, dup );
++pv->dups;
++pv->count_frames;
}
}
}
}
static int hb_vfr_init(hb_filter_object_t *filter, hb_filter_init_t *init)
{
filter->private_data = calloc(1, sizeof(struct hb_filter_private_s));
hb_filter_private_t *pv = filter->private_data;
build_gamma_lut(pv);
pv->cfr = init->cfr;
pv->input_vrate = pv->vrate = init->vrate;
pv->input_vrate_base = pv->vrate_base = init->vrate_base;
if (filter->settings != NULL)
{
sscanf(filter->settings, "%d:%d:%d",
&pv->cfr, &pv->vrate, &pv->vrate_base);
}
pv->job = init->job;
/* Setup FIFO queue for subtitle cache */
pv->delay_queue = hb_fifo_init( 8, 1 );
/* VFR IVTC needs a bunch of time-keeping variables to track
how many frames are dropped, how many are extended, what the
last 4 start and stop times were (so they can be modified),
how much time has been lost and gained overall, how much time
the latest 4 frames should be extended by */
pv->dropped_frames = 0;
pv->extended_frames = 0;
pv->last_start[0] = 0;
pv->last_stop[0] = 0;
pv->total_lost_time = 0;
pv->total_gained_time = 0;
pv->lost_time[0] = 0; pv->lost_time[1] = 0; pv->lost_time[2] = 0; pv->lost_time[3] = 0;
pv->frame_metric = 1000; // Force first frame
if (pv->cfr == 2)
{
// For PFR, we want the framerate based on the source's actual
// framerate, unless it's higher than the specified peak framerate.
double source_fps = (double)init->vrate / init->vrate_base;
double peak_fps = (double)pv->vrate / pv->vrate_base;
if (source_fps > peak_fps)
{
// peak framerate is lower than the source framerate.
// so signal that the framerate will be the peak fps.
init->vrate = pv->vrate;
init->vrate_base = pv->vrate_base;
}
}
else
{
init->vrate = pv->vrate;
init->vrate_base = pv->vrate_base;
}
pv->frame_rate = (double)pv->vrate_base * 90000. / pv->vrate;
init->cfr = pv->cfr;
return 0;
}
static int hb_vfr_info( hb_filter_object_t * filter,
hb_filter_info_t * info )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
return 1;
memset( info, 0, sizeof( hb_filter_info_t ) );
info->out.vrate_base = pv->input_vrate_base;
info->out.vrate = pv->input_vrate;
if (pv->cfr == 2)
{
// For PFR, we want the framerate based on the source's actual
// framerate, unless it's higher than the specified peak framerate.
double source_fps = (double)pv->input_vrate / pv->input_vrate_base;
double peak_fps = (double)pv->vrate / pv->vrate_base;
if (source_fps > peak_fps)
{
// peak framerate is lower than the source framerate.
// so signal that the framerate will be the peak fps.
info->out.vrate = pv->vrate;
info->out.vrate_base = pv->vrate_base;
}
}
else
{
info->out.vrate = pv->vrate;
info->out.vrate_base = pv->vrate_base;
}
info->out.cfr = pv->cfr;
if ( pv->cfr == 0 )
{
/* Ensure we're using "Same as source" FPS */
sprintf( info->human_readable_desc,
"frame rate: same as source (around %.3f fps)",
(float)pv->vrate / pv->vrate_base );
}
else if ( pv->cfr == 2 )
{
// For PFR, we want the framerate based on the source's actual
// framerate, unless it's higher than the specified peak framerate.
double source_fps = (double)pv->input_vrate / pv->input_vrate_base;
double peak_fps = (double)pv->vrate / pv->vrate_base;
sprintf( info->human_readable_desc,
"frame rate: %.3f fps -> peak rate limited to %.3f fps",
source_fps , peak_fps );
}
else
{
// Constant framerate. Signal the framerate we are using.
double source_fps = (double)pv->input_vrate / pv->input_vrate_base;
double constant_fps = (double)pv->vrate / pv->vrate_base;
sprintf( info->human_readable_desc,
"frame rate: %.3f fps -> constant %.3f fps",
source_fps , constant_fps );
}
return 0;
}
static void hb_vfr_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
return;
if ( pv->cfr )
{
hb_log("render: %d frames output, %d dropped and %d duped for CFR/PFR",
pv->count_frames, pv->drops, pv->dups );
}
if( pv->job )
{
hb_interjob_t * interjob = hb_interjob_get( pv->job->h );
/* Preserve dropped frame count for more accurate
* framerates in 2nd passes.
*/
interjob->out_frame_count = pv->count_frames;
interjob->total_time = pv->out_last_stop;
}
hb_log("render: lost time: %"PRId64" (%i frames)",
pv->total_lost_time, pv->dropped_frames);
hb_log("render: gained time: %"PRId64" (%i frames) (%"PRId64" not accounted for)",
pv->total_gained_time, pv->extended_frames,
pv->total_lost_time - pv->total_gained_time);
if (pv->dropped_frames)
{
hb_log("render: average dropped frame duration: %"PRId64,
(pv->total_lost_time / pv->dropped_frames) );
}
if( pv->delay_queue )
{
hb_fifo_close( &pv->delay_queue );
}
/* Cleanup render work structure */
free( pv );
filter->private_data = NULL;
}
static int hb_vfr_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * out = NULL;
*buf_in = NULL;
*buf_out = NULL;
if( in->size <= 0 )
{
hb_buffer_t *head = NULL, *tail = NULL, *next;
int counter = 2;
/* If the input buffer is end of stream, send out an empty one
* to the next stage as well. To avoid losing the contents of
* the delay queue connect the buffers in the delay queue in
* the correct order, and add the end of stream buffer to the
* end.
*/
while( ( next = hb_fifo_get( pv->delay_queue ) ) != NULL )
{
/* We can't use the given time stamps. Previous frames
might already have been extended, throwing off the
raw values fed to render.c. Instead, their
stop and start times are stored in arrays.
The 4th cached frame will be the to use.
If it needed its duration extended to make up
lost time, it will have happened above. */
next->s.start = pv->last_start[counter];
next->s.stop = pv->last_stop[counter--];
adjust_frame_rate( pv, &next );
if( next )
{
if( !head && !tail )
{
head = next;
} else {
tail->next = next;
}
// Move tail to the end of the list that
// adjust_frame_rate could return
while (next)
{
tail = next;
next = next->next;
}
}
}
if( tail )
{
tail->next = in;
*buf_out = head;
} else {
*buf_out = in;
}
return HB_FILTER_DONE;
}
// If there is a gap between the last stop and the current start
// then frame(s) were dropped.
if ( in->s.start > pv->last_stop[0] )
{
/* We need to compensate for the time lost by dropping frame(s).
Spread its duration out in quarters, because usually dropped frames
maintain a 1-out-of-5 pattern and this spreads it out amongst
the remaining ones. Store these in the lost_time array, which
has 4 slots in it. Because not every frame duration divides
evenly by 4, and we can't lose the remainder, we have to go
through an awkward process to preserve it in the 4th array index.
*/
uint64_t temp_duration = in->s.start - pv->last_stop[0];
pv->lost_time[0] += (temp_duration / 4);
pv->lost_time[1] += (temp_duration / 4);
pv->lost_time[2] += (temp_duration / 4);
pv->lost_time[3] += ( temp_duration - 3 * (temp_duration / 4) );
pv->total_lost_time += temp_duration;
}
else if ( in->s.stop <= pv->last_stop[0] )
{
// This is generally an error somewhere (bad source or hb bug).
// But lets do our best to straighten out the mess.
++pv->drops;
hb_buffer_close(&in);
return HB_FILTER_OK;
}
/* Cache frame start and stop times, so we can renumber
time stamps if dropping frames for VFR. */
int i;
for( i = 3; i >= 1; i-- )
{
pv->last_start[i] = pv->last_start[i-1];
pv->last_stop[i] = pv->last_stop[i-1];
}
/* In order to make sure we have continuous time stamps, store
the current frame's duration as starting when the last one stopped. */
pv->last_start[0] = pv->last_stop[1];
pv->last_stop[0] = pv->last_start[0] + (in->s.stop - in->s.start);
hb_fifo_push( pv->delay_queue, in );
/*
* Keep the last three frames in our queue, this ensures that we have
* the last two always in there should we need to rewrite the
* durations on them.
*/
if( hb_fifo_size( pv->delay_queue ) >= 4 )
{
out = hb_fifo_get( pv->delay_queue );
}
if( out )
{
/* The current frame exists. That means it hasn't been dropped by a
* filter. We may edit its duration if needed.
*/
if( pv->lost_time[3] > 0 )
{
int time_shift = 0;
for( i = 3; i >= 0; i-- )
{
/*
* A frame's been dropped earlier by VFR detelecine.
* Gotta make up the lost time. This will also
* slow down the video.
* The dropped frame's has to be accounted for, so
* divvy it up amongst the 4 frames left behind.
* This is what the delay_queue is for;
* telecined sequences start 2 frames before
* the dropped frame, so to slow down the right
* ones you need a 2 frame delay between
* reading input and writing output.
*/
/* We want to extend the outputted frame's duration by the value
stored in the 4th slot of the lost_time array. Because we need
to adjust all the values in the array so they're contiguous,
extend the duration inside the array first, before applying
it to the current frame buffer. */
pv->last_start[i] += time_shift;
pv->last_stop[i] += pv->lost_time[i] + time_shift;
/* Log how much time has been added back in to the video. */
pv->total_gained_time += pv->lost_time[i];
time_shift += pv->lost_time[i];
pv->lost_time[i] = 0;
/* Log how many frames have had their durations extended. */
pv->extended_frames++;
}
}
/* We can't use the given time stamps. Previous frames
might already have been extended, throwing off the
raw values fed to render.c. Instead, their
stop and start times are stored in arrays.
The 4th cached frame will be the to use.
If it needed its duration extended to make up
lost time, it will have happened above. */
out->s.start = pv->last_start[3];
out->s.stop = pv->last_stop[3];
adjust_frame_rate( pv, &out );
}
*buf_out = out;
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/dvdnav.c 0000664 0001752 0001752 00000174336 12463330511 016561 0 ustar handbrake handbrake /* dvdnav.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "lang.h"
#include "dvd.h"
#include "dvdnav/dvdnav.h"
#include "dvdread/ifo_read.h"
#include "dvdread/ifo_print.h"
#include "dvdread/nav_read.h"
#define DVD_READ_CACHE 1
static char * hb_dvdnav_name( char * path );
static hb_dvd_t * hb_dvdnav_init( char * path );
static int hb_dvdnav_title_count( hb_dvd_t * d );
static hb_title_t * hb_dvdnav_title_scan( hb_dvd_t * d, int t, uint64_t min_duration );
static int hb_dvdnav_start( hb_dvd_t * d, hb_title_t *title, int chapter );
static void hb_dvdnav_stop( hb_dvd_t * d );
static int hb_dvdnav_seek( hb_dvd_t * d, float f );
static hb_buffer_t * hb_dvdnav_read( hb_dvd_t * d );
static int hb_dvdnav_chapter( hb_dvd_t * d );
static void hb_dvdnav_close( hb_dvd_t ** _d );
static int hb_dvdnav_angle_count( hb_dvd_t * d );
static void hb_dvdnav_set_angle( hb_dvd_t * d, int angle );
static int hb_dvdnav_main_feature( hb_dvd_t * d, hb_list_t * list_title );
hb_dvd_func_t hb_dvdnav_func =
{
hb_dvdnav_init,
hb_dvdnav_close,
hb_dvdnav_name,
hb_dvdnav_title_count,
hb_dvdnav_title_scan,
hb_dvdnav_start,
hb_dvdnav_stop,
hb_dvdnav_seek,
hb_dvdnav_read,
hb_dvdnav_chapter,
hb_dvdnav_angle_count,
hb_dvdnav_set_angle,
hb_dvdnav_main_feature
};
// there can be at most 999 PGCs per title. round that up to the nearest
// power of two.
#define MAX_PGCN 1024
/***********************************************************************
* Local prototypes
**********************************************************************/
static void PgcWalkInit( uint32_t pgcn_map[MAX_PGCN/32] );
static int FindChapterIndex( hb_list_t * list, int pgcn, int pgn );
static int NextPgcn( ifo_handle_t *ifo, int pgcn, uint32_t pgcn_map[MAX_PGCN/32] );
static int FindNextCell( pgc_t *pgc, int cell_cur );
static int dvdtime2msec( dvd_time_t * );
hb_dvd_func_t * hb_dvdnav_methods( void )
{
return &hb_dvdnav_func;
}
static char * hb_dvdnav_name( char * path )
{
static char name[1024];
unsigned char unused[1024];
dvd_reader_t * reader;
reader = DVDOpen( path );
if( !reader )
{
return NULL;
}
if( DVDUDFVolumeInfo( reader, name, sizeof( name ),
unused, sizeof( unused ) ) )
{
DVDClose( reader );
return NULL;
}
DVDClose( reader );
return name;
}
/***********************************************************************
* hb_dvdnav_reset
***********************************************************************
* Once dvdnav has entered the 'stopped' state, it can not be revived
* dvdnav_reset doesn't work because it doesn't remember the path
* So this function re-opens dvdnav
**********************************************************************/
static int hb_dvdnav_reset( hb_dvdnav_t * d )
{
char * path_ccp = hb_utf8_to_cp( d->path );
if ( d->dvdnav )
dvdnav_close( d->dvdnav );
/* Open device */
if( dvdnav_open(&d->dvdnav, path_ccp) != DVDNAV_STATUS_OK )
{
/*
* Not an error, may be a stream - which we'll try in a moment.
*/
hb_log( "dvd: not a dvd - trying as a stream/file instead" );
goto fail;
}
if (dvdnav_set_readahead_flag(d->dvdnav, DVD_READ_CACHE) !=
DVDNAV_STATUS_OK)
{
hb_error("Error: dvdnav_set_readahead_flag: %s\n",
dvdnav_err_to_string(d->dvdnav));
goto fail;
}
/*
** set the PGC positioning flag to have position information
** relatively to the whole feature instead of just relatively to the
** current chapter
**/
if (dvdnav_set_PGC_positioning_flag(d->dvdnav, 1) != DVDNAV_STATUS_OK)
{
hb_error("Error: dvdnav_set_PGC_positioning_flag: %s\n",
dvdnav_err_to_string(d->dvdnav));
goto fail;
}
free( path_ccp );
return 1;
fail:
if( d->dvdnav ) dvdnav_close( d->dvdnav );
free( path_ccp );
return 0;
}
/***********************************************************************
* hb_dvdnav_init
***********************************************************************
*
**********************************************************************/
static hb_dvd_t * hb_dvdnav_init( char * path )
{
hb_dvd_t * e;
hb_dvdnav_t * d;
int region_mask;
char * path_ccp;
e = calloc( sizeof( hb_dvd_t ), 1 );
d = &(e->dvdnav);
/*
* Convert UTF-8 path to current code page on Windows
* hb_utf8_to_cp() is the same as strdup on non-Windows,
* so no #ifdef required here
*/
path_ccp = hb_utf8_to_cp( path );
/* Log DVD drive region code */
if ( hb_dvd_region( path_ccp, ®ion_mask ) == 0 )
{
hb_log( "dvd: Region mask 0x%02x", region_mask );
if ( region_mask == 0xFF )
{
hb_log( "dvd: Warning, DVD device has no region set" );
}
}
/* Open device */
if( dvdnav_open(&d->dvdnav, path_ccp) != DVDNAV_STATUS_OK )
{
/*
* Not an error, may be a stream - which we'll try in a moment.
*/
hb_log( "dvd: not a dvd - trying as a stream/file instead" );
goto fail;
}
if (dvdnav_set_readahead_flag(d->dvdnav, DVD_READ_CACHE) !=
DVDNAV_STATUS_OK)
{
hb_error("Error: dvdnav_set_readahead_flag: %s\n",
dvdnav_err_to_string(d->dvdnav));
goto fail;
}
/*
** set the PGC positioning flag to have position information
** relatively to the whole feature instead of just relatively to the
** current chapter
**/
if (dvdnav_set_PGC_positioning_flag(d->dvdnav, 1) != DVDNAV_STATUS_OK)
{
hb_error("Error: dvdnav_set_PGC_positioning_flag: %s\n",
dvdnav_err_to_string(d->dvdnav));
goto fail;
}
/* Open device */
if( !( d->reader = DVDOpen( path_ccp ) ) )
{
/*
* Not an error, may be a stream - which we'll try in a moment.
*/
hb_log( "dvd: not a dvd - trying as a stream/file instead" );
goto fail;
}
/* Open main IFO */
if( !( d->vmg = ifoOpen( d->reader, 0 ) ) )
{
hb_error( "dvd: ifoOpen failed" );
goto fail;
}
d->path = strdup( path ); /* hb_dvdnav_title_scan assumes UTF-8 path, so not path_ccp here */
free( path_ccp );
return e;
fail:
if( d->dvdnav ) dvdnav_close( d->dvdnav );
if( d->vmg ) ifoClose( d->vmg );
if( d->reader ) DVDClose( d->reader );
free( e );
free( path_ccp );
return NULL;
}
/***********************************************************************
* hb_dvdnav_title_count
**********************************************************************/
static int hb_dvdnav_title_count( hb_dvd_t * e )
{
int titles = 0;
hb_dvdnav_t * d = &(e->dvdnav);
dvdnav_get_number_of_titles(d->dvdnav, &titles);
return titles;
}
static uint64_t
PttDuration(ifo_handle_t *ifo, int ttn, int pttn, int *blocks, int *last_pgcn)
{
int pgcn, pgn;
pgc_t * pgc;
uint64_t duration = 0;
int cell_start, cell_end;
int i;
*blocks = 0;
// Initialize map of visited pgc's to prevent loops
uint32_t pgcn_map[MAX_PGCN/32];
PgcWalkInit( pgcn_map );
pgcn = ifo->vts_ptt_srpt->title[ttn-1].ptt[pttn-1].pgcn;
pgn = ifo->vts_ptt_srpt->title[ttn-1].ptt[pttn-1].pgn;
if ( pgcn < 1 || pgcn > ifo->vts_pgcit->nr_of_pgci_srp || pgcn >= MAX_PGCN)
{
hb_log( "invalid PGC ID %d, skipping", pgcn );
return 0;
}
if( pgn <= 0 || pgn > 99 )
{
hb_log( "scan: pgn %d not valid, skipping", pgn );
return 0;
}
do
{
pgc = ifo->vts_pgcit->pgci_srp[pgcn-1].pgc;
if (!pgc)
{
*blocks = 0;
duration = 0;
hb_log( "scan: pgc not valid, skipping" );
break;
}
if (pgc->cell_playback == NULL)
{
*blocks = 0;
duration = 0;
hb_log("invalid PGC cell_playback table, skipping");
break;
}
if (pgn > pgc->nr_of_programs)
{
pgn = 1;
continue;
}
duration += 90LL * dvdtime2msec( &pgc->playback_time );
cell_start = pgc->program_map[pgn-1] - 1;
cell_end = pgc->nr_of_cells - 1;
for(i = cell_start; i <= cell_end; i = FindNextCell(pgc, i))
{
*blocks += pgc->cell_playback[i].last_sector + 1 -
pgc->cell_playback[i].first_sector;
}
*last_pgcn = pgcn;
pgn = 1;
} while((pgcn = NextPgcn(ifo, pgcn, pgcn_map)) != 0);
return duration;
}
/***********************************************************************
* hb_dvdnav_title_scan
**********************************************************************/
static hb_title_t * hb_dvdnav_title_scan( hb_dvd_t * e, int t, uint64_t min_duration )
{
hb_dvdnav_t * d = &(e->dvdnav);
hb_title_t * title;
ifo_handle_t * ifo = NULL;
int pgcn, pgn, pgcn_end, i, c;
int title_pgcn;
pgc_t * pgc;
int cell_cur;
hb_chapter_t * chapter;
int count;
uint64_t duration, longest;
int longest_pgcn, longest_pgn, longest_pgcn_end;
const char * name;
const char * codec_name;
hb_log( "scan: scanning title %d", t );
title = hb_title_init( d->path, t );
title->type = HB_DVD_TYPE;
if (dvdnav_get_title_string(d->dvdnav, &name) == DVDNAV_STATUS_OK)
{
strncpy( title->name, name, sizeof( title->name ) );
}
else
{
char * p_cur, * p_last = d->path;
for( p_cur = d->path; *p_cur; p_cur++ )
{
if( IS_DIR_SEP(p_cur[0]) && p_cur[1] )
{
p_last = &p_cur[1];
}
}
snprintf( title->name, sizeof( title->name ), "%s", p_last );
char *dot_term = strrchr(title->name, '.');
if (dot_term)
*dot_term = '\0';
}
/* VTS which our title is in */
title->vts = d->vmg->tt_srpt->title[t-1].title_set_nr;
if ( !title->vts )
{
/* A VTS of 0 means the title wasn't found in the title set */
hb_log("Invalid VTS (title set) number: %i", title->vts);
goto fail;
}
hb_log( "scan: opening IFO for VTS %d", title->vts );
if( !( ifo = ifoOpen( d->reader, title->vts ) ) )
{
hb_log( "scan: ifoOpen failed" );
goto fail;
}
/* ignore titles with bogus cell addresses so we don't abort later
** in libdvdread. */
for ( i = 0; i < ifo->vts_c_adt->nr_of_vobs; ++i)
{
if( (ifo->vts_c_adt->cell_adr_table[i].start_sector & 0xffffff ) ==
0xffffff )
{
hb_log( "scan: cell_adr_table[%d].start_sector invalid (0x%x) "
"- skipping title", i,
ifo->vts_c_adt->cell_adr_table[i].start_sector );
goto fail;
}
if( (ifo->vts_c_adt->cell_adr_table[i].last_sector & 0xffffff ) ==
0xffffff )
{
hb_log( "scan: cell_adr_table[%d].last_sector invalid (0x%x) "
"- skipping title", i,
ifo->vts_c_adt->cell_adr_table[i].last_sector );
goto fail;
}
}
if( global_verbosity_level == 3 )
{
ifo_print( d->reader, title->vts );
}
/* Position of the title in the VTS */
title->ttn = d->vmg->tt_srpt->title[t-1].vts_ttn;
if ( title->ttn < 1 || title->ttn > ifo->vts_ptt_srpt->nr_of_srpts )
{
hb_log( "invalid VTS PTT offset %d for title %d, skipping", title->ttn, t );
goto fail;
}
longest = 0LL;
longest_pgcn = -1;
longest_pgn = 1;
longest_pgcn_end = -1;
pgcn_end = -1;
for( i = 0; i < ifo->vts_ptt_srpt->title[title->ttn-1].nr_of_ptts; i++ )
{
int blocks = 0;
duration = PttDuration(ifo, title->ttn, i+1, &blocks, &pgcn_end);
pgcn = ifo->vts_ptt_srpt->title[title->ttn-1].ptt[i].pgcn;
pgn = ifo->vts_ptt_srpt->title[title->ttn-1].ptt[i].pgn;
if( duration > longest )
{
longest_pgcn = pgcn;
longest_pgn = pgn;
longest_pgcn_end = pgcn_end;
longest = duration;
title->block_count = blocks;
}
else if (pgcn == longest_pgcn && pgn < longest_pgn)
{
longest_pgn = pgn;
title->block_count = blocks;
}
}
/* Get duration */
title->duration = longest;
title->hours = title->duration / 90000 / 3600;
title->minutes = ( ( title->duration / 90000 ) % 3600 ) / 60;
title->seconds = ( title->duration / 90000 ) % 60;
hb_log( "scan: duration is %02d:%02d:%02d (%"PRId64" ms)",
title->hours, title->minutes, title->seconds,
title->duration / 90 );
/* ignore titles under 10 seconds because they're often stills or
* clips with no audio & our preview code doesn't currently handle
* either of these. */
if( longest < min_duration )
{
hb_log( "scan: ignoring title (too short)" );
goto fail;
}
pgcn = longest_pgcn;
pgcn_end = longest_pgcn_end;
pgn = longest_pgn;;
title_pgcn = pgcn;
/* Get pgc */
if ( pgcn < 1 || pgcn > ifo->vts_pgcit->nr_of_pgci_srp || pgcn >= MAX_PGCN)
{
hb_log( "invalid PGC ID %d for title %d, skipping", pgcn, t );
goto fail;
}
// Check all pgc's for validity
uint32_t pgcn_map[MAX_PGCN/32];
PgcWalkInit( pgcn_map );
do
{
pgc = ifo->vts_pgcit->pgci_srp[pgcn-1].pgc;
if( !pgc || !pgc->program_map )
{
hb_log( "scan: pgc not valid, skipping" );
goto fail;
}
if (pgc->cell_playback == NULL)
{
hb_log( "invalid PGC cell_playback table for title %d, skipping", t );
goto fail;
}
} while ((pgcn = NextPgcn(ifo, pgcn, pgcn_map)) != 0);
pgcn = longest_pgcn;
pgc = ifo->vts_pgcit->pgci_srp[pgcn-1].pgc;
hb_log("pgc_id: %d, pgn: %d: pgc: %p", pgcn, pgn, pgc);
if (pgn > pgc->nr_of_programs)
{
hb_log( "invalid PGN %d for title %d, skipping", pgn, t );
goto fail;
}
/* Title start */
title->cell_start = pgc->program_map[pgn-1] - 1;
title->block_start = pgc->cell_playback[title->cell_start].first_sector;
pgc = ifo->vts_pgcit->pgci_srp[pgcn_end-1].pgc;
/* Title end */
title->cell_end = pgc->nr_of_cells - 1;
title->block_end = pgc->cell_playback[title->cell_end].last_sector;
hb_log( "scan: vts=%d, ttn=%d, cells=%d->%d, blocks=%"PRIu64"->%"PRIu64", "
"%"PRIu64" blocks", title->vts, title->ttn, title->cell_start,
title->cell_end, title->block_start, title->block_end,
title->block_count );
/* Detect languages */
for( i = 0; i < ifo->vtsi_mat->nr_of_vts_audio_streams; i++ )
{
int audio_format, lang_code, lang_extension, audio_control, position, j;
hb_audio_t * audio, * audio_tmp;
iso639_lang_t * lang;
hb_log( "scan: checking audio %d", i + 1 );
audio = calloc( sizeof( hb_audio_t ), 1 );
audio_format = ifo->vtsi_mat->vts_audio_attr[i].audio_format;
lang_code = ifo->vtsi_mat->vts_audio_attr[i].lang_code;
lang_extension = ifo->vtsi_mat->vts_audio_attr[i].code_extension;
audio_control =
ifo->vts_pgcit->pgci_srp[title_pgcn-1].pgc->audio_control[i];
if( !( audio_control & 0x8000 ) )
{
hb_log( "scan: audio channel is not active" );
free( audio );
continue;
}
position = ( audio_control & 0x7F00 ) >> 8;
switch( audio_format )
{
case 0x00:
audio->id = ( ( 0x80 + position ) << 8 ) | 0xbd;
audio->config.in.codec = HB_ACODEC_AC3;
audio->config.in.codec_param = AV_CODEC_ID_AC3;
codec_name = "AC3";
break;
case 0x02:
case 0x03:
audio->id = 0xc0 + position;
audio->config.in.codec = HB_ACODEC_FFMPEG;
audio->config.in.codec_param = AV_CODEC_ID_MP2;
codec_name = "MPEG";
break;
case 0x04:
audio->id = ( ( 0xa0 + position ) << 8 ) | 0xbd;
audio->config.in.codec = HB_ACODEC_LPCM;
codec_name = "LPCM";
break;
case 0x06:
audio->id = ( ( 0x88 + position ) << 8 ) | 0xbd;
audio->config.in.codec = HB_ACODEC_DCA;
audio->config.in.codec_param = AV_CODEC_ID_DTS;
codec_name = "DTS";
break;
default:
audio->id = 0;
audio->config.in.codec = 0;
codec_name = "Unknown";
hb_log( "scan: unknown audio codec (%x)",
audio_format );
break;
}
if( !audio->id )
{
continue;
}
/* Check for duplicate tracks */
audio_tmp = NULL;
for( j = 0; j < hb_list_count( title->list_audio ); j++ )
{
audio_tmp = hb_list_item( title->list_audio, j );
if( audio->id == audio_tmp->id )
{
break;
}
audio_tmp = NULL;
}
if( audio_tmp )
{
hb_log( "scan: duplicate audio track" );
free( audio );
continue;
}
lang = lang_for_code( lang_code );
audio->config.lang.type = lang_extension;
snprintf( audio->config.lang.simple,
sizeof( audio->config.lang.simple ), "%s",
strlen( lang->native_name ) ? lang->native_name : lang->eng_name );
snprintf( audio->config.lang.iso639_2,
sizeof( audio->config.lang.iso639_2 ), "%s", lang->iso639_2 );
hb_log("scan: id=0x%x, lang=%s (%s), 3cc=%s ext=%i", audio->id,
audio->config.lang.simple, codec_name,
audio->config.lang.iso639_2, lang_extension);
audio->config.in.track = i;
hb_list_add( title->list_audio, audio );
}
/* Check for subtitles */
for( i = 0; i < ifo->vtsi_mat->nr_of_vts_subp_streams; i++ )
{
hb_subtitle_t * subtitle;
int spu_control;
int position;
iso639_lang_t * lang;
int lang_extension = 0;
hb_log( "scan: checking subtitle %d", i + 1 );
spu_control =
ifo->vts_pgcit->pgci_srp[title_pgcn-1].pgc->subp_control[i];
if( !( spu_control & 0x80000000 ) )
{
hb_log( "scan: subtitle channel is not active" );
continue;
}
if( ifo->vtsi_mat->vts_video_attr.display_aspect_ratio )
{
switch( ifo->vtsi_mat->vts_video_attr.permitted_df )
{
case 1:
position = spu_control & 0xFF;
break;
case 2:
position = ( spu_control >> 8 ) & 0xFF;
break;
default:
position = ( spu_control >> 16 ) & 0xFF;
}
}
else
{
position = ( spu_control >> 24 ) & 0x7F;
}
lang_extension = ifo->vtsi_mat->vts_subp_attr[i].code_extension;
lang = lang_for_code( ifo->vtsi_mat->vts_subp_attr[i].lang_code );
subtitle = calloc( sizeof( hb_subtitle_t ), 1 );
subtitle->track = i+1;
subtitle->id = ( ( 0x20 + position ) << 8 ) | 0xbd;
snprintf( subtitle->lang, sizeof( subtitle->lang ), "%s",
strlen(lang->native_name) ? lang->native_name : lang->eng_name);
snprintf( subtitle->iso639_2, sizeof( subtitle->iso639_2 ), "%s",
lang->iso639_2);
subtitle->format = PICTURESUB;
subtitle->source = VOBSUB;
subtitle->config.dest = RENDERSUB; // By default render (burn-in) the VOBSUB.
subtitle->stream_type = 0xbd;
subtitle->substream_type = 0x20 + position;
subtitle->codec = WORK_DECVOBSUB;
subtitle->type = lang_extension;
memcpy( subtitle->palette,
ifo->vts_pgcit->pgci_srp[title_pgcn-1].pgc->palette,
16 * sizeof( uint32_t ) );
subtitle->palette_set = 1;
switch( lang_extension )
{
case 2:
strcat( subtitle->lang, " (Caption with bigger size character)" );
break;
case 3:
strcat( subtitle->lang, " (Caption for Children)" );
break;
case 5:
strcat( subtitle->lang, " (Closed Caption)" );
break;
case 6:
strcat( subtitle->lang, " (Closed Caption with bigger size character)" );
break;
case 7:
strcat( subtitle->lang, " (Closed Caption for Children)" );
break;
case 9:
strcat( subtitle->lang, " (Forced Caption)" );
break;
case 13:
strcat( subtitle->lang, " (Director's Commentary)" );
break;
case 14:
strcat( subtitle->lang, " (Director's Commentary with bigger size character)" );
break;
case 15:
strcat( subtitle->lang, " (Director's Commentary for Children)" );
default:
break;
}
hb_log( "scan: id=0x%x, lang=%s, 3cc=%s ext=%i", subtitle->id,
subtitle->lang, subtitle->iso639_2, lang_extension );
hb_list_add( title->list_subtitle, subtitle );
}
/* Chapters */
PgcWalkInit( pgcn_map );
c = 0;
do
{
pgc = ifo->vts_pgcit->pgci_srp[pgcn-1].pgc;
for (i = pgn; i <= pgc->nr_of_programs; i++)
{
char chapter_title[80];
chapter = calloc( sizeof( hb_chapter_t ), 1 );
chapter->pgcn = pgcn;
chapter->pgn = i;
chapter->index = c + 1;
sprintf( chapter_title, "Chapter %d", chapter->index );
hb_chapter_set_title( chapter, chapter_title );
hb_list_add( title->list_chapter, chapter );
c++;
}
pgn = 1;
} while ((pgcn = NextPgcn(ifo, pgcn, pgcn_map)) != 0);
hb_log( "scan: title %d has %d chapters", t, c );
count = hb_list_count( title->list_chapter );
for (i = 0; i < count; i++)
{
chapter = hb_list_item( title->list_chapter, i );
pgcn = chapter->pgcn;
pgn = chapter->pgn;
pgc = ifo->vts_pgcit->pgci_srp[pgcn-1].pgc;
/* Start cell */
chapter->cell_start = pgc->program_map[pgn-1] - 1;
chapter->block_start = pgc->cell_playback[chapter->cell_start].first_sector;
// if there are no more programs in this pgc, the end cell is the
// last cell. Otherwise it's the cell before the start cell of the
// next program.
if ( pgn == pgc->nr_of_programs )
{
chapter->cell_end = pgc->nr_of_cells - 1;
}
else
{
chapter->cell_end = pgc->program_map[pgn] - 2;;
}
chapter->block_end = pgc->cell_playback[chapter->cell_end].last_sector;
/* Block count, duration */
chapter->block_count = 0;
chapter->duration = 0;
cell_cur = chapter->cell_start;
while( cell_cur <= chapter->cell_end )
{
#define cp pgc->cell_playback[cell_cur]
chapter->block_count += cp.last_sector + 1 - cp.first_sector;
chapter->duration += 90LL * dvdtime2msec( &cp.playback_time );
#undef cp
cell_cur = FindNextCell( pgc, cell_cur );
}
}
for( i = 0; i < hb_list_count( title->list_chapter ); i++ )
{
chapter = hb_list_item( title->list_chapter, i );
int seconds = ( chapter->duration + 45000 ) / 90000;
chapter->hours = ( seconds / 3600 );
chapter->minutes = ( seconds % 3600 ) / 60;
chapter->seconds = ( seconds % 60 );
hb_log( "scan: chap %d c=%d->%d, b=%"PRIu64"->%"PRIu64" (%"PRIu64"), %"PRId64" ms",
chapter->index, chapter->cell_start, chapter->cell_end,
chapter->block_start, chapter->block_end,
chapter->block_count, chapter->duration / 90 );
}
/* Get aspect. We don't get width/height/rate infos here as
they tend to be wrong */
switch( ifo->vtsi_mat->vts_video_attr.display_aspect_ratio )
{
case 0:
title->container_aspect = 4. / 3.;
break;
case 3:
title->container_aspect = 16. / 9.;
break;
default:
hb_log( "scan: unknown aspect" );
goto fail;
}
hb_log( "scan: aspect = %g", title->container_aspect );
/* This title is ok so far */
goto cleanup;
fail:
hb_title_close( &title );
cleanup:
if( ifo ) ifoClose( ifo );
return title;
}
/***********************************************************************
* hb_dvdnav_title_scan
**********************************************************************/
static int find_title( hb_list_t * list_title, int title )
{
int ii;
for ( ii = 0; ii < hb_list_count( list_title ); ii++ )
{
hb_title_t * hbtitle = hb_list_item( list_title, ii );
if ( hbtitle->index == title )
return ii;
}
return -1;
}
static int skip_to_menu( dvdnav_t * dvdnav, int blocks )
{
int ii;
int result, event, len;
uint8_t buf[HB_DVD_READ_BUFFER_SIZE];
for ( ii = 0; ii < blocks; ii++ )
{
result = dvdnav_get_next_block( dvdnav, buf, &event, &len );
if ( result == DVDNAV_STATUS_ERR )
{
hb_error("dvdnav: Read Error, %s", dvdnav_err_to_string(dvdnav));
return 0;
}
switch ( event )
{
case DVDNAV_BLOCK_OK:
break;
case DVDNAV_CELL_CHANGE:
{
} break;
case DVDNAV_STILL_FRAME:
{
dvdnav_still_event_t *event;
event = (dvdnav_still_event_t*)buf;
dvdnav_still_skip( dvdnav );
if ( event->length == 255 )
{
// Infinite still. Can't be the main feature unless
// you like watching paint dry.
return 0;
}
} break;
case DVDNAV_WAIT:
dvdnav_wait_skip( dvdnav );
break;
case DVDNAV_STOP:
return 0;
case DVDNAV_HOP_CHANNEL:
break;
case DVDNAV_NAV_PACKET:
{
pci_t *pci = dvdnav_get_current_nav_pci( dvdnav );
if ( pci == NULL ) break;
int buttons = pci->hli.hl_gi.btn_ns;
int title, part;
result = dvdnav_current_title_info( dvdnav, &title, &part );
if (result != DVDNAV_STATUS_OK)
{
hb_log("dvdnav title info: %s", dvdnav_err_to_string(dvdnav));
}
else if ( title == 0 && buttons > 0 )
{
// Check button activation duration to see if this
// isn't another fake menu.
if ( pci->hli.hl_gi.btn_se_e_ptm - pci->hli.hl_gi.hli_s_ptm >
15 * 90000 )
{
// Found what appears to be a good menu.
return 1;
}
}
} break;
case DVDNAV_VTS_CHANGE:
{
dvdnav_vts_change_event_t *event;
event = (dvdnav_vts_change_event_t*)buf;
// Some discs initialize the vts with the "first play" item
// and some don't seem to. So if we see it is uninitialized,
// set it.
if ( event->new_vtsN <= 0 )
result = dvdnav_title_play( dvdnav, 1 );
} break;
case DVDNAV_HIGHLIGHT:
break;
case DVDNAV_AUDIO_STREAM_CHANGE:
break;
case DVDNAV_SPU_STREAM_CHANGE:
break;
case DVDNAV_SPU_CLUT_CHANGE:
break;
case DVDNAV_NOP:
break;
default:
break;
}
}
return 0;
}
static int try_button( dvdnav_t * dvdnav, int button, hb_list_t * list_title )
{
int result, event, len;
uint8_t buf[HB_DVD_READ_BUFFER_SIZE];
int ii, jj;
int32_t cur_title = 0, title, part;
uint64_t longest_duration = 0;
int longest = -1;
pci_t *pci = dvdnav_get_current_nav_pci( dvdnav );
result = dvdnav_button_select_and_activate( dvdnav, pci, button + 1 );
if (result != DVDNAV_STATUS_OK)
{
hb_log("dvdnav_button_select_and_activate: %s", dvdnav_err_to_string(dvdnav));
}
result = dvdnav_current_title_info( dvdnav, &title, &part );
if (result != DVDNAV_STATUS_OK)
hb_log("dvdnav cur title info: %s", dvdnav_err_to_string(dvdnav));
cur_title = title;
for (jj = 0; jj < 10; jj++)
{
for (ii = 0; ii < 2000; ii++)
{
result = dvdnav_get_next_block( dvdnav, buf, &event, &len );
if ( result == DVDNAV_STATUS_ERR )
{
hb_error("dvdnav: Read Error, %s", dvdnav_err_to_string(dvdnav));
goto done;
}
switch ( event )
{
case DVDNAV_BLOCK_OK:
break;
case DVDNAV_CELL_CHANGE:
{
result = dvdnav_current_title_info( dvdnav, &title, &part );
if (result != DVDNAV_STATUS_OK)
hb_log("dvdnav title info: %s", dvdnav_err_to_string(dvdnav));
cur_title = title;
// Note, some "fake" titles have long advertised durations
// but then jump to the real title early in playback.
// So keep reading after finding a long title to detect
// such cases.
} break;
case DVDNAV_STILL_FRAME:
{
dvdnav_still_event_t *event;
event = (dvdnav_still_event_t*)buf;
dvdnav_still_skip( dvdnav );
if ( event->length == 255 )
{
// Infinite still. Can't be the main feature unless
// you like watching paint dry.
goto done;
}
} break;
case DVDNAV_WAIT:
dvdnav_wait_skip( dvdnav );
break;
case DVDNAV_STOP:
goto done;
case DVDNAV_HOP_CHANNEL:
break;
case DVDNAV_NAV_PACKET:
{
} break;
case DVDNAV_VTS_CHANGE:
{
result = dvdnav_current_title_info( dvdnav, &title, &part );
if (result != DVDNAV_STATUS_OK)
hb_log("dvdnav title info: %s", dvdnav_err_to_string(dvdnav));
cur_title = title;
// Note, some "fake" titles have long advertised durations
// but then jump to the real title early in playback.
// So keep reading after finding a long title to detect
// such cases.
} break;
case DVDNAV_HIGHLIGHT:
break;
case DVDNAV_AUDIO_STREAM_CHANGE:
break;
case DVDNAV_SPU_STREAM_CHANGE:
break;
case DVDNAV_SPU_CLUT_CHANGE:
break;
case DVDNAV_NOP:
break;
default:
break;
}
}
// Check if the current title is long enough to qualify
// as the main feature.
if ( cur_title > 0 )
{
hb_title_t * hbtitle;
int index;
index = find_title( list_title, cur_title );
hbtitle = hb_list_item( list_title, index );
if ( hbtitle != NULL )
{
if ( hbtitle->duration / 90000 > 10 * 60 )
{
hb_deep_log( 3, "dvdnav: Found candidate feature title %d duration %02d:%02d:%02d on button %d",
cur_title, hbtitle->hours, hbtitle->minutes,
hbtitle->seconds, button+1 );
return cur_title;
}
if ( hbtitle->duration > longest_duration )
{
longest_duration = hbtitle->duration;
longest = title;
}
}
// Some titles have long lead-ins. Try skipping it.
dvdnav_next_pg_search( dvdnav );
}
}
done:
if ( longest != -1 )
{
hb_title_t * hbtitle;
int index;
index = find_title( list_title, longest );
hbtitle = hb_list_item( list_title, index );
if ( hbtitle != NULL )
{
hb_deep_log( 3, "dvdnav: Found candidate feature title %d duration %02d:%02d:%02d on button %d",
longest, hbtitle->hours, hbtitle->minutes,
hbtitle->seconds, button+1 );
}
}
return longest;
}
static int try_menu(
hb_dvdnav_t * d,
hb_list_t * list_title,
DVDMenuID_t menu,
uint64_t fallback_duration )
{
int result, event, len;
uint8_t buf[HB_DVD_READ_BUFFER_SIZE];
int ii, jj;
int32_t cur_title, title, part;
uint64_t longest_duration = 0;
int longest = -1;
// A bit of a hack here. Abusing Escape menu to mean use whatever
// current menu is already set.
if ( menu != DVD_MENU_Escape )
{
result = dvdnav_menu_call( d->dvdnav, menu );
if ( result != DVDNAV_STATUS_OK )
{
// Sometimes the "first play" item doesn't initialize the
// initial VTS. So do it here.
result = dvdnav_title_play( d->dvdnav, 1 );
result = dvdnav_menu_call( d->dvdnav, menu );
if ( result != DVDNAV_STATUS_OK )
{
hb_error("dvdnav: Can not set dvd menu, %s", dvdnav_err_to_string(d->dvdnav));
goto done;
}
}
}
result = dvdnav_current_title_info( d->dvdnav, &title, &part );
if (result != DVDNAV_STATUS_OK)
hb_log("dvdnav title info: %s", dvdnav_err_to_string(d->dvdnav));
cur_title = title;
for (jj = 0; jj < 4; jj++)
{
for (ii = 0; ii < 4000; ii++)
{
result = dvdnav_get_next_block( d->dvdnav, buf, &event, &len );
if ( result == DVDNAV_STATUS_ERR )
{
hb_error("dvdnav: Read Error, %s", dvdnav_err_to_string(d->dvdnav));
goto done;
}
switch ( event )
{
case DVDNAV_BLOCK_OK:
break;
case DVDNAV_CELL_CHANGE:
{
result = dvdnav_current_title_info( d->dvdnav, &title, &part );
if (result != DVDNAV_STATUS_OK)
hb_log("dvdnav title info: %s", dvdnav_err_to_string(d->dvdnav));
cur_title = title;
} break;
case DVDNAV_STILL_FRAME:
{
dvdnav_still_event_t *event;
event = (dvdnav_still_event_t*)buf;
dvdnav_still_skip( d->dvdnav );
if ( event->length == 255 )
{
// Infinite still. There won't be any menus after this.
goto done;
}
} break;
case DVDNAV_WAIT:
dvdnav_wait_skip( d->dvdnav );
break;
case DVDNAV_STOP:
goto done;
case DVDNAV_HOP_CHANNEL:
break;
case DVDNAV_NAV_PACKET:
{
pci_t *pci = dvdnav_get_current_nav_pci( d->dvdnav );
int kk;
int buttons;
if ( pci == NULL ) break;
buttons = pci->hli.hl_gi.btn_ns;
// If we are on a menu that has buttons and
// the button activation duration is long enough
// that this isn't another fake menu.
if ( cur_title == 0 && buttons > 0 &&
pci->hli.hl_gi.btn_se_e_ptm - pci->hli.hl_gi.hli_s_ptm >
15 * 90000 )
{
for (kk = 0; kk < buttons; kk++)
{
dvdnav_t *dvdnav_copy;
result = dvdnav_dup( &dvdnav_copy, d->dvdnav );
if (result != DVDNAV_STATUS_OK)
{
hb_log("dvdnav dup failed: %s", dvdnav_err_to_string(d->dvdnav));
goto done;
}
title = try_button( dvdnav_copy, kk, list_title );
dvdnav_free_dup( dvdnav_copy );
if ( title >= 0 )
{
hb_title_t * hbtitle;
int index;
index = find_title( list_title, title );
hbtitle = hb_list_item( list_title, index );
if ( hbtitle != NULL )
{
if ( hbtitle->duration > longest_duration )
{
longest_duration = hbtitle->duration;
longest = title;
if ((float)fallback_duration * 0.75 < longest_duration)
goto done;
}
}
}
}
goto done;
}
} break;
case DVDNAV_VTS_CHANGE:
{
result = dvdnav_current_title_info( d->dvdnav, &title, &part );
if (result != DVDNAV_STATUS_OK)
hb_log("dvdnav title info: %s", dvdnav_err_to_string(d->dvdnav));
cur_title = title;
} break;
case DVDNAV_HIGHLIGHT:
break;
case DVDNAV_AUDIO_STREAM_CHANGE:
break;
case DVDNAV_SPU_STREAM_CHANGE:
break;
case DVDNAV_SPU_CLUT_CHANGE:
break;
case DVDNAV_NOP:
break;
default:
break;
}
}
// Sometimes the menu is preceeded by a intro that just
// gets restarted when hitting the menu button. So
// try skipping with the skip forward button. Then
// try hitting the menu again.
if ( !(jj & 1) )
{
dvdnav_next_pg_search( d->dvdnav );
}
else
{
result = dvdnav_menu_call( d->dvdnav, menu );
}
}
done:
return longest;
}
static int hb_dvdnav_main_feature( hb_dvd_t * e, hb_list_t * list_title )
{
hb_dvdnav_t * d = &(e->dvdnav);
int longest_root = -1;
int longest_title = -1;
int longest_fallback = 0;
int ii;
uint64_t longest_duration_root = 0;
uint64_t longest_duration_title = 0;
uint64_t longest_duration_fallback = 0;
uint64_t avg_duration = 0;
int avg_cnt = 0;
hb_title_t * title;
int index;
hb_deep_log( 2, "dvdnav: Searching menus for main feature" );
for ( ii = 0; ii < hb_list_count( list_title ); ii++ )
{
title = hb_list_item( list_title, ii );
if ( title->duration > longest_duration_fallback )
{
longest_duration_fallback = title->duration;
longest_fallback = title->index;
}
if ( title->duration > 90000L * 60 * 30 )
{
avg_duration += title->duration;
avg_cnt++;
}
}
if ( avg_cnt )
avg_duration /= avg_cnt;
index = find_title( list_title, longest_fallback );
title = hb_list_item( list_title, index );
if ( title )
{
hb_deep_log( 2, "dvdnav: Longest title %d duration %02d:%02d:%02d",
longest_fallback, title->hours, title->minutes,
title->seconds );
}
dvdnav_reset( d->dvdnav );
if ( skip_to_menu( d->dvdnav, 2000 ) )
{
longest_root = try_menu( d, list_title, DVD_MENU_Escape, longest_duration_fallback );
if ( longest_root >= 0 )
{
index = find_title( list_title, longest_root );
title = hb_list_item( list_title, index );
if ( title )
{
longest_duration_root = title->duration;
hb_deep_log( 2, "dvdnav: Found first-play title %d duration %02d:%02d:%02d",
longest_root, title->hours, title->minutes, title->seconds );
}
}
else
{
hb_deep_log( 2, "dvdnav: No first-play menu title found" );
}
}
if ( longest_root < 0 ||
(float)longest_duration_fallback * 0.7 > longest_duration_root)
{
longest_root = try_menu( d, list_title, DVD_MENU_Root, longest_duration_fallback );
if ( longest_root >= 0 )
{
index = find_title( list_title, longest_root );
title = hb_list_item( list_title, index );
if ( title )
{
longest_duration_root = title->duration;
hb_deep_log( 2, "dvdnav: Found root title %d duration %02d:%02d:%02d",
longest_root, title->hours, title->minutes, title->seconds );
}
}
else
{
hb_deep_log( 2, "dvdnav: No root menu title found" );
}
}
if ( longest_root < 0 ||
(float)longest_duration_fallback * 0.7 > longest_duration_root)
{
longest_title = try_menu( d, list_title, DVD_MENU_Title, longest_duration_fallback );
if ( longest_title >= 0 )
{
index = find_title( list_title, longest_title );
title = hb_list_item( list_title, index );
if ( title )
{
longest_duration_title = title->duration;
hb_deep_log( 2, "dvdnav: found title %d duration %02d:%02d:%02d",
longest_title, title->hours, title->minutes,
title->seconds );
}
}
else
{
hb_deep_log( 2, "dvdnav: No title menu title found" );
}
}
uint64_t longest_duration;
int longest;
if ( longest_duration_root > longest_duration_title )
{
longest_duration = longest_duration_root;
longest = longest_root;
}
else
{
longest_duration = longest_duration_title;
longest = longest_title;
}
if ((float)longest_duration_fallback * 0.7 > longest_duration &&
longest_duration < 90000L * 60 * 30 )
{
float factor = (float)avg_duration / longest_duration;
if ( factor > 1 )
factor = 1 / factor;
if ( avg_cnt > 10 && factor < 0.7 )
{
longest = longest_fallback;
hb_deep_log( 2, "dvdnav: Using longest title %d", longest );
}
}
return longest;
}
/***********************************************************************
* hb_dvdnav_start
***********************************************************************
* Title and chapter start at 1
**********************************************************************/
static int hb_dvdnav_start( hb_dvd_t * e, hb_title_t *title, int c )
{
hb_dvdnav_t * d = &(e->dvdnav);
int t = title->index;
hb_chapter_t *chapter;
dvdnav_status_t result;
d->title_block_count = title->block_count;
d->list_chapter = title->list_chapter;
if ( d->stopped && !hb_dvdnav_reset(d) )
{
return 0;
}
dvdnav_reset( d->dvdnav );
chapter = hb_list_item( title->list_chapter, c - 1);
if (chapter != NULL)
result = dvdnav_program_play(d->dvdnav, t, chapter->pgcn, chapter->pgn);
else
result = dvdnav_part_play(d->dvdnav, t, 1);
if (result != DVDNAV_STATUS_OK)
{
hb_error( "dvd: dvdnav_*_play failed - %s",
dvdnav_err_to_string(d->dvdnav) );
return 0;
}
d->title = t;
d->stopped = 0;
d->chapter = 0;
d->cell = 0;
return 1;
}
/***********************************************************************
* hb_dvdnav_stop
***********************************************************************
*
**********************************************************************/
static void hb_dvdnav_stop( hb_dvd_t * e )
{
}
/***********************************************************************
* hb_dvdnav_seek
***********************************************************************
*
**********************************************************************/
static int hb_dvdnav_seek( hb_dvd_t * e, float f )
{
hb_dvdnav_t * d = &(e->dvdnav);
uint64_t sector = f * d->title_block_count;
int result, event, len;
uint8_t buf[HB_DVD_READ_BUFFER_SIZE];
int done = 0, ii;
if (d->stopped)
{
return 0;
}
// XXX the current version of libdvdnav can't seek outside the current
// PGC. Check if the place we're seeking to is in a different
// PGC. Position there & adjust the offset if so.
uint64_t pgc_offset = 0;
uint64_t chap_offset = 0;
hb_chapter_t *pgc_change = hb_list_item(d->list_chapter, 0 );
for ( ii = 0; ii < hb_list_count( d->list_chapter ); ++ii )
{
hb_chapter_t *chapter = hb_list_item( d->list_chapter, ii );
uint64_t chap_len = chapter->block_end - chapter->block_start + 1;
if ( chapter->pgcn != pgc_change->pgcn )
{
// this chapter's in a different pgc from the previous - note the
// change so we can make sector offset's be pgc relative.
pgc_offset = chap_offset;
pgc_change = chapter;
}
if ( chap_offset <= sector && sector < chap_offset + chap_len )
{
// this chapter contains the sector we want - see if it's in a
// different pgc than the one we're currently in.
int32_t title, pgcn, pgn;
if (dvdnav_current_title_program( d->dvdnav, &title, &pgcn, &pgn ) != DVDNAV_STATUS_OK)
hb_log("dvdnav cur pgcn err: %s", dvdnav_err_to_string(d->dvdnav));
// If we find ourselves in a new title, it means a title
// transition was made while reading data. Jumping between
// titles can cause the vm to get into a bad state. So
// reset the vm in this case.
if ( d->title != title )
dvdnav_reset( d->dvdnav );
if ( d->title != title || chapter->pgcn != pgcn )
{
// this chapter is in a different pgc - switch to it.
if (dvdnav_program_play(d->dvdnav, d->title, chapter->pgcn, chapter->pgn) != DVDNAV_STATUS_OK)
hb_log("dvdnav prog play err: %s", dvdnav_err_to_string(d->dvdnav));
}
// seek sectors are pgc-relative so remove the pgc start sector.
sector -= pgc_offset;
break;
}
chap_offset += chap_len;
}
// dvdnav will not let you seek or poll current position
// till it reaches a certain point in parsing. so we
// have to get blocks until we reach a cell
// Put an arbitrary limit of 100 blocks on how long we search
for (ii = 0; ii < 100 && !done; ii++)
{
result = dvdnav_get_next_block( d->dvdnav, buf, &event, &len );
if ( result == DVDNAV_STATUS_ERR )
{
hb_error("dvdnav: Read Error, %s", dvdnav_err_to_string(d->dvdnav));
return 0;
}
switch ( event )
{
case DVDNAV_BLOCK_OK:
case DVDNAV_CELL_CHANGE:
done = 1;
break;
case DVDNAV_STILL_FRAME:
dvdnav_still_skip( d->dvdnav );
break;
case DVDNAV_WAIT:
dvdnav_wait_skip( d->dvdnav );
break;
case DVDNAV_STOP:
hb_log("dvdnav: stop encountered during seek");
d->stopped = 1;
return 0;
case DVDNAV_HOP_CHANNEL:
case DVDNAV_NAV_PACKET:
case DVDNAV_VTS_CHANGE:
case DVDNAV_HIGHLIGHT:
case DVDNAV_AUDIO_STREAM_CHANGE:
case DVDNAV_SPU_STREAM_CHANGE:
case DVDNAV_SPU_CLUT_CHANGE:
case DVDNAV_NOP:
default:
break;
}
}
if (dvdnav_sector_search(d->dvdnav, sector, SEEK_SET) != DVDNAV_STATUS_OK)
{
hb_error( "dvd: dvdnav_sector_search failed - %s",
dvdnav_err_to_string(d->dvdnav) );
return 0;
}
d->chapter = 0;
d->cell = 0;
return 1;
}
/***********************************************************************
* hb_dvdnav_read
***********************************************************************
*
**********************************************************************/
static hb_buffer_t * hb_dvdnav_read( hb_dvd_t * e )
{
hb_dvdnav_t * d = &(e->dvdnav);
int result, event, len;
int chapter = 0;
int error_count = 0;
hb_buffer_t *b = hb_buffer_init( HB_DVD_READ_BUFFER_SIZE );
while ( 1 )
{
if (d->stopped)
{
hb_buffer_close( &b );
return NULL;
}
result = dvdnav_get_next_block( d->dvdnav, b->data, &event, &len );
if ( result == DVDNAV_STATUS_ERR )
{
hb_error("dvdnav: Read Error, %s", dvdnav_err_to_string(d->dvdnav));
if (dvdnav_sector_search(d->dvdnav, 1, SEEK_CUR) != DVDNAV_STATUS_OK)
{
hb_error( "dvd: dvdnav_sector_search failed - %s",
dvdnav_err_to_string(d->dvdnav) );
hb_buffer_close( &b );
return NULL;
}
error_count++;
if (error_count > 500)
{
hb_error("dvdnav: Error, too many consecutive read errors");
hb_buffer_close( &b );
return NULL;
}
continue;
}
switch ( event )
{
case DVDNAV_BLOCK_OK:
// We have received a regular block of the currently playing
// MPEG stream.
// The muxers expect to only get chapter 2 and above
// They write chapter 1 when chapter 2 is detected.
if (chapter > 1)
b->s.new_chap = chapter;
chapter = 0;
error_count = 0;
return b;
case DVDNAV_NOP:
/*
* Nothing to do here.
*/
break;
case DVDNAV_STILL_FRAME:
/*
* We have reached a still frame. A real player application
* would wait the amount of time specified by the still's
* length while still handling user input to make menus and
* other interactive stills work. A length of 0xff means an
* indefinite still which has to be skipped indirectly by some
* user interaction.
*/
dvdnav_still_skip( d->dvdnav );
break;
case DVDNAV_WAIT:
/*
* We have reached a point in DVD playback, where timing is
* critical. Player application with internal fifos can
* introduce state inconsistencies, because libdvdnav is
* always the fifo's length ahead in the stream compared to
* what the application sees. Such applications should wait
* until their fifos are empty when they receive this type of
* event.
*/
dvdnav_wait_skip( d->dvdnav );
break;
case DVDNAV_SPU_CLUT_CHANGE:
/*
* Player applications should pass the new colour lookup table
* to their SPU decoder
*/
break;
case DVDNAV_SPU_STREAM_CHANGE:
/*
* Player applications should inform their SPU decoder to
* switch channels
*/
break;
case DVDNAV_AUDIO_STREAM_CHANGE:
/*
* Player applications should inform their audio decoder to
* switch channels
*/
break;
case DVDNAV_HIGHLIGHT:
/*
* Player applications should inform their overlay engine to
* highlight the given button
*/
break;
case DVDNAV_VTS_CHANGE:
/*
* Some status information like video aspect and video scale
* permissions do not change inside a VTS. Therefore this
* event can be used to query such information only when
* necessary and update the decoding/displaying accordingly.
*/
{
int tt = 0, pgcn = 0, pgn = 0;
dvdnav_current_title_program(d->dvdnav, &tt, &pgcn, &pgn);
if (tt != d->title)
{
// Transition to another title signals that we are done.
hb_buffer_close( &b );
return NULL;
}
}
break;
case DVDNAV_CELL_CHANGE:
/*
* Some status information like the current Title and Part
* numbers do not change inside a cell. Therefore this event
* can be used to query such information only when necessary
* and update the decoding/displaying accordingly.
*/
{
dvdnav_cell_change_event_t * cell_event;
int tt = 0, pgcn = 0, pgn = 0, c;
cell_event = (dvdnav_cell_change_event_t*)b->data;
dvdnav_current_title_program(d->dvdnav, &tt, &pgcn, &pgn);
if (tt != d->title)
{
// Transition to another title signals that we are done.
hb_buffer_close( &b );
return NULL;
}
c = FindChapterIndex(d->list_chapter, pgcn, pgn);
if (c != d->chapter)
{
if (c < d->chapter)
{
// Some titles end with a 'link' back to the beginning so
// a transition to an earlier chapter means we're done.
hb_buffer_close( &b );
return NULL;
}
chapter = d->chapter = c;
}
else if ( cell_event->cellN <= d->cell )
{
hb_buffer_close( &b );
return NULL;
}
d->cell = cell_event->cellN;
}
break;
case DVDNAV_NAV_PACKET:
/*
* A NAV packet provides PTS discontinuity information, angle
* linking information and button definitions for DVD menus.
* Angles are handled completely inside libdvdnav. For the
* menus to work, the NAV packet information has to be passed
* to the overlay engine of the player so that it knows the
* dimensions of the button areas.
*/
// mpegdemux expects to get these. I don't think it does
// anything useful with them however.
// The muxers expect to only get chapter 2 and above
// They write chapter 1 when chapter 2 is detected.
if (chapter > 1)
b->s.new_chap = chapter;
chapter = 0;
return b;
break;
case DVDNAV_HOP_CHANNEL:
/*
* This event is issued whenever a non-seamless operation has
* been executed. Applications with fifos should drop the
* fifos content to speed up responsiveness.
*/
break;
case DVDNAV_STOP:
/*
* Playback should end here.
*/
d->stopped = 1;
hb_buffer_close( &b );
return NULL;
default:
break;
}
}
hb_buffer_close( &b );
return NULL;
}
/***********************************************************************
* hb_dvdnav_chapter
***********************************************************************
* Returns in which chapter the next block to be read is.
* Chapter numbers start at 1.
**********************************************************************/
static int hb_dvdnav_chapter( hb_dvd_t * e )
{
hb_dvdnav_t * d = &(e->dvdnav);
int32_t t, pgcn, pgn;
int32_t c;
if (dvdnav_current_title_program(d->dvdnav, &t, &pgcn, &pgn) != DVDNAV_STATUS_OK)
{
return -1;
}
c = FindChapterIndex( d->list_chapter, pgcn, pgn );
return c;
}
/***********************************************************************
* hb_dvdnav_close
***********************************************************************
* Closes and frees everything
**********************************************************************/
static void hb_dvdnav_close( hb_dvd_t ** _d )
{
hb_dvdnav_t * d = &((*_d)->dvdnav);
if( d->dvdnav ) dvdnav_close( d->dvdnav );
if( d->vmg ) ifoClose( d->vmg );
if( d->reader ) DVDClose( d->reader );
free(d->path);
free( d );
*_d = NULL;
}
/***********************************************************************
* hb_dvdnav_angle_count
***********************************************************************
* Returns the number of angles supported.
**********************************************************************/
static int hb_dvdnav_angle_count( hb_dvd_t * e )
{
hb_dvdnav_t * d = &(e->dvdnav);
int current, angle_count;
if (dvdnav_get_angle_info( d->dvdnav, ¤t, &angle_count) != DVDNAV_STATUS_OK)
{
hb_log("dvdnav_get_angle_info %s", dvdnav_err_to_string(d->dvdnav));
angle_count = 1;
}
return angle_count;
}
/***********************************************************************
* hb_dvdnav_set_angle
***********************************************************************
* Sets the angle to read
**********************************************************************/
static void hb_dvdnav_set_angle( hb_dvd_t * e, int angle )
{
hb_dvdnav_t * d = &(e->dvdnav);
if (dvdnav_angle_change( d->dvdnav, angle) != DVDNAV_STATUS_OK)
{
hb_log("dvdnav_angle_change %s", dvdnav_err_to_string(d->dvdnav));
}
}
/***********************************************************************
* FindChapterIndex
***********************************************************************
* Assumes pgc and cell_cur are correctly set, and sets cell_next to the
* cell to be read when we will be done with cell_cur.
**********************************************************************/
static int FindChapterIndex( hb_list_t * list, int pgcn, int pgn )
{
int count, ii;
hb_chapter_t *chapter;
count = hb_list_count( list );
for (ii = 0; ii < count; ii++)
{
chapter = hb_list_item( list, ii );
if (chapter->pgcn == pgcn && chapter->pgn == pgn)
return chapter->index;
}
return 0;
}
/***********************************************************************
* FindNextCell
***********************************************************************
* Assumes pgc and cell_cur are correctly set, and sets cell_next to the
* cell to be read when we will be done with cell_cur.
**********************************************************************/
static int FindNextCell( pgc_t *pgc, int cell_cur )
{
int i = 0;
int cell_next;
if( pgc->cell_playback[cell_cur].block_type ==
BLOCK_TYPE_ANGLE_BLOCK )
{
while( pgc->cell_playback[cell_cur+i].block_mode !=
BLOCK_MODE_LAST_CELL )
{
i++;
}
cell_next = cell_cur + i + 1;
hb_log( "dvd: Skipping multi-angle cells %d-%d",
cell_cur,
cell_next - 1 );
}
else
{
cell_next = cell_cur + 1;
}
return cell_next;
}
/***********************************************************************
* NextPgcn
***********************************************************************
* Assumes pgc and cell_cur are correctly set, and sets cell_next to the
* cell to be read when we will be done with cell_cur.
* Since pg chains can be circularly linked (either from a read error or
* deliberately) pgcn_map tracks program chains we've already seen.
**********************************************************************/
static int NextPgcn( ifo_handle_t *ifo, int pgcn, uint32_t pgcn_map[MAX_PGCN/32] )
{
int next_pgcn;
pgc_t *pgc;
pgcn_map[pgcn >> 5] |= (1 << (pgcn & 31));
pgc = ifo->vts_pgcit->pgci_srp[pgcn-1].pgc;
next_pgcn = pgc->next_pgc_nr;
if ( next_pgcn < 1 || next_pgcn >= MAX_PGCN || next_pgcn > ifo->vts_pgcit->nr_of_pgci_srp )
return 0;
return pgcn_map[next_pgcn >> 5] & (1 << (next_pgcn & 31))? 0 : next_pgcn;
}
/***********************************************************************
* PgcWalkInit
***********************************************************************
* Pgc links can loop. I track which have been visited in a bit vector
* Initialize the bit vector to empty.
**********************************************************************/
static void PgcWalkInit( uint32_t pgcn_map[MAX_PGCN/32] )
{
memset(pgcn_map, 0, sizeof(uint32_t) * MAX_PGCN/32);
}
/***********************************************************************
* dvdtime2msec
***********************************************************************
* From lsdvd
**********************************************************************/
static int dvdtime2msec(dvd_time_t * dt)
{
double frames_per_s[4] = {-1.0, 25.00, -1.0, 29.97};
double fps = frames_per_s[(dt->frame_u & 0xc0) >> 6];
long ms;
ms = (((dt->hour & 0xf0) >> 3) * 5 + (dt->hour & 0x0f)) * 3600000;
ms += (((dt->minute & 0xf0) >> 3) * 5 + (dt->minute & 0x0f)) * 60000;
ms += (((dt->second & 0xf0) >> 3) * 5 + (dt->second & 0x0f)) * 1000;
if( fps > 0 )
{
ms += (((dt->frame_u & 0x30) >> 3) * 5 +
(dt->frame_u & 0x0f)) * 1000.0 / fps;
}
return ms;
}
HandBrake-0.10.2/libhb/rotate.c 0000664 0001752 0001752 00000023414 12463330511 016563 0 ustar handbrake handbrake /* rorate.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
#include "taskset.h"
#define MODE_DEFAULT 3
// Mode 1: Flip vertically (y0 becomes yN and yN becomes y0)
// Mode 2: Flip horizontally (x0 becomes xN and xN becomes x0)
// Mode 3: Flip both horizontally and vertically (modes 1 and 2 combined)
typedef struct rotate_arguments_s {
hb_buffer_t *dst;
hb_buffer_t *src;
} rotate_arguments_t;
struct hb_filter_private_s
{
int mode;
int width;
int height;
int par_width;
int par_height;
int cpu_count;
taskset_t rotate_taskset; // Threads for Rotate - one per CPU
rotate_arguments_t *rotate_arguments; // Arguments to thread for work
};
static int hb_rotate_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_rotate_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_rotate_close( hb_filter_object_t * filter );
static int hb_rotate_info( hb_filter_object_t * filter,
hb_filter_info_t * info );
hb_filter_object_t hb_filter_rotate =
{
.id = HB_FILTER_ROTATE,
.enforce_order = 0,
.name = "Rotate (rotate & flip image axes)",
.settings = NULL,
.init = hb_rotate_init,
.work = hb_rotate_work,
.close = hb_rotate_close,
.info = hb_rotate_info
};
typedef struct rotate_thread_arg_s {
hb_filter_private_t *pv;
int segment;
} rotate_thread_arg_t;
/*
* rotate this segment of all three planes in a single thread.
*/
void rotate_filter_thread( void *thread_args_v )
{
rotate_arguments_t *rotate_work = NULL;
hb_filter_private_t * pv;
int run = 1;
int plane;
int segment, segment_start, segment_stop;
rotate_thread_arg_t *thread_args = thread_args_v;
uint8_t *dst;
hb_buffer_t *dst_buf;
hb_buffer_t *src_buf;
int y;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("Rotate thread started for segment %d", segment);
while( run )
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->rotate_taskset, segment );
if( taskset_thread_stop( &pv->rotate_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
run = 0;
goto report_completion;
}
rotate_work = &pv->rotate_arguments[segment];
if( rotate_work->dst == NULL )
{
hb_error( "Thread started when no work available" );
hb_snooze(500);
goto report_completion;
}
/*
* Process all three planes, but only this segment of it.
*/
dst_buf = rotate_work->dst;
src_buf = rotate_work->src;
for( plane = 0; plane < 3; plane++)
{
int dst_stride, src_stride;
dst = dst_buf->plane[plane].data;
dst_stride = dst_buf->plane[plane].stride;
src_stride = src_buf->plane[plane].stride;
int h = src_buf->plane[plane].height;
int w = src_buf->plane[plane].width;
segment_start = ( h / pv->cpu_count ) * segment;
if( segment == pv->cpu_count - 1 )
{
/*
* Final segment
*/
segment_stop = h;
} else {
segment_stop = ( h / pv->cpu_count ) * ( segment + 1 );
}
for( y = segment_start; y < segment_stop; y++ )
{
uint8_t * cur;
int x, xo, yo;
cur = &src_buf->plane[plane].data[y * src_stride];
for( x = 0; x < w; x++)
{
if( pv->mode & 1 )
{
yo = h - y - 1;
}
else
{
yo = y;
}
if( pv->mode & 2 )
{
xo = w - x - 1;
}
else
{
xo = x;
}
if( pv->mode & 4 ) // Rotate 90 clockwise
{
int tmp = xo;
xo = h - yo - 1;
yo = tmp;
}
dst[yo*dst_stride + xo] = cur[x];
}
}
}
report_completion:
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->rotate_taskset, segment );
}
}
/*
* threaded rotate - each thread rotates a single segment of all
* three planes. Where a segment is defined as the frame divided by
* the number of CPUs.
*
* This function blocks until the frame is rotated.
*/
static void rotate_filter(
hb_filter_private_t * pv,
hb_buffer_t *out,
hb_buffer_t *in )
{
int segment;
for( segment = 0; segment < pv->cpu_count; segment++ )
{
/*
* Setup the work for this plane.
*/
pv->rotate_arguments[segment].dst = out;
pv->rotate_arguments[segment].src = in;
}
/*
* Allow the taskset threads to make one pass over the data.
*/
taskset_cycle( &pv->rotate_taskset );
/*
* Entire frame is now rotated.
*/
}
static int hb_rotate_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
pv->mode = MODE_DEFAULT;
if( filter->settings )
{
sscanf( filter->settings, "%d",
&pv->mode );
}
pv->cpu_count = hb_get_cpu_count();
/*
* Create rotate taskset.
*/
pv->rotate_arguments = malloc( sizeof( rotate_arguments_t ) * pv->cpu_count );
if( pv->rotate_arguments == NULL ||
taskset_init( &pv->rotate_taskset, /*thread_count*/pv->cpu_count,
sizeof( rotate_thread_arg_t ) ) == 0 )
{
hb_error( "rotate could not initialize taskset" );
}
int i;
for( i = 0; i < pv->cpu_count; i++ )
{
rotate_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->rotate_taskset, i );
thread_args->pv = pv;
thread_args->segment = i;
pv->rotate_arguments[i].dst = NULL;
if( taskset_thread_spawn( &pv->rotate_taskset, i,
"rotate_filter_segment",
rotate_filter_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "rotate could not spawn thread" );
}
}
// Set init width/height so the next stage in the pipline
// knows what it will be getting
if( pv->mode & 4 )
{
// 90 degree rotation, exchange width and height
int tmp = init->width;
init->width = init->height;
init->height = tmp;
tmp = init->par_width;
init->par_width = init->par_height;
init->par_height = tmp;
}
pv->width = init->width;
pv->height = init->height;
pv->par_width = init->par_width;
pv->par_height = init->par_height;
return 0;
}
static int hb_rotate_info( hb_filter_object_t * filter,
hb_filter_info_t * info )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
return 1;
memset( info, 0, sizeof( hb_filter_info_t ) );
info->out.width = pv->width;
info->out.height = pv->height;
info->out.par_width = pv->par_width;
info->out.par_height = pv->par_height;
int pos = 0;
if( pv->mode & 1 )
pos += sprintf( &info->human_readable_desc[pos], "flip vertical" );
if( pv->mode & 2 )
{
if( pos )
pos += sprintf( &info->human_readable_desc[pos], "/" );
pos += sprintf( &info->human_readable_desc[pos], "flip horizontal" );
}
if( pv->mode & 4 )
{
if( pos )
pos += sprintf( &info->human_readable_desc[pos], "/" );
pos += sprintf( &info->human_readable_desc[pos], "rotate 90" );
}
return 0;
}
static void hb_rotate_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
taskset_fini( &pv->rotate_taskset );
/*
* free memory for rotate structs
*/
free( pv->rotate_arguments );
free( pv );
filter->private_data = NULL;
}
static int hb_rotate_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in, * out;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
int width_out, height_out;
if ( pv->mode & 4 )
{
width_out = in->f.height;
height_out = in->f.width;
}
else
{
width_out = in->f.width;
height_out = in->f.height;
}
out = hb_video_buffer_init( width_out, height_out );
// Rotate!
rotate_filter( pv, out, in );
out->s = in->s;
hb_buffer_move_subs( out, in );
*buf_out = out;
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/hb.c 0000664 0001752 0001752 00000162023 12463330511 015656 0 ustar handbrake handbrake /* hb.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "opencl.h"
#include "hbffmpeg.h"
#include
#include
#include
#ifdef USE_QSV
#include "qsv_common.h"
#endif
#if defined( SYS_MINGW )
#include
#if defined( PTW32_STATIC_LIB )
#include
#endif
#endif
struct hb_handle_s
{
int id;
/* The "Check for update" thread */
int build;
char version[32];
hb_thread_t * update_thread;
/* This thread's only purpose is to check other threads'
states */
volatile int die;
hb_thread_t * main_thread;
int pid;
/* DVD/file scan thread */
hb_title_set_t title_set;
hb_thread_t * scan_thread;
/* The thread which processes the jobs. Others threads are launched
from this one (see work.c) */
hb_list_t * jobs;
hb_job_t * current_job;
int job_count;
int job_count_permanent;
volatile int work_die;
hb_error_code work_error;
hb_thread_t * work_thread;
hb_lock_t * state_lock;
hb_state_t state;
int paused;
hb_lock_t * pause_lock;
/* For MacGui active queue
increments each time the scan thread completes*/
int scanCount;
volatile int scan_die;
/* Stash of persistent data between jobs, for stuff
like correcting frame count and framerate estimates
on multi-pass encodes where frames get dropped. */
hb_interjob_t * interjob;
// power management opaque pointer
void *system_sleep_opaque;
} ;
hb_work_object_t * hb_objects = NULL;
int hb_instance_counter = 0;
static void thread_func( void * );
static int ff_lockmgr_cb(void **mutex, enum AVLockOp op)
{
switch ( op )
{
case AV_LOCK_CREATE:
{
*mutex = hb_lock_init();
} break;
case AV_LOCK_DESTROY:
{
hb_lock_close( (hb_lock_t**)mutex );
} break;
case AV_LOCK_OBTAIN:
{
hb_lock( (hb_lock_t*)*mutex );
} break;
case AV_LOCK_RELEASE:
{
hb_unlock( (hb_lock_t*)*mutex );
} break;
default:
break;
}
return 0;
}
void hb_avcodec_init()
{
av_lockmgr_register(ff_lockmgr_cb);
av_register_all();
#ifdef _WIN64
// avresample's assembly optimizations can cause crashes under Win x86_64
// (see http://bugzilla.libav.org/show_bug.cgi?id=496)
// disable AVX and FMA4 as a workaround
hb_deep_log(2, "hb_avcodec_init: Windows x86_64, disabling AVX and FMA4");
int cpu_flags = av_get_cpu_flags() & ~AV_CPU_FLAG_AVX & ~AV_CPU_FLAG_FMA4;
av_set_cpu_flags_mask(cpu_flags);
#endif
}
int hb_avcodec_open(AVCodecContext *avctx, AVCodec *codec,
AVDictionary **av_opts, int thread_count)
{
int ret;
if ((thread_count == HB_FFMPEG_THREADS_AUTO || thread_count > 0) &&
(codec->type == AVMEDIA_TYPE_VIDEO))
{
avctx->thread_count = (thread_count == HB_FFMPEG_THREADS_AUTO) ?
hb_get_cpu_count() / 2 + 1 : thread_count;
avctx->thread_type = FF_THREAD_FRAME|FF_THREAD_SLICE;
avctx->thread_safe_callbacks = 1;
}
else
{
avctx->thread_count = 1;
}
if (codec->capabilities & CODEC_CAP_EXPERIMENTAL)
{
// "experimental" encoders will not open without this
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
}
ret = avcodec_open2(avctx, codec, av_opts);
return ret;
}
int hb_avcodec_close(AVCodecContext *avctx)
{
int ret;
ret = avcodec_close(avctx);
return ret;
}
int hb_avpicture_fill(AVPicture *pic, hb_buffer_t *buf)
{
int ret, ii;
for (ii = 0; ii < 4; ii++)
pic->linesize[ii] = buf->plane[ii].stride;
ret = av_image_fill_pointers(pic->data, buf->f.fmt,
buf->plane[0].height_stride,
buf->data, pic->linesize);
if (ret != buf->size)
{
hb_error("Internal error hb_avpicture_fill expected %d, got %d",
buf->size, ret);
}
return ret;
}
static int handle_jpeg(enum AVPixelFormat *format)
{
switch (*format)
{
case AV_PIX_FMT_YUVJ420P: *format = AV_PIX_FMT_YUV420P; return 1;
case AV_PIX_FMT_YUVJ422P: *format = AV_PIX_FMT_YUV422P; return 1;
case AV_PIX_FMT_YUVJ444P: *format = AV_PIX_FMT_YUV444P; return 1;
case AV_PIX_FMT_YUVJ440P: *format = AV_PIX_FMT_YUV440P; return 1;
default: return 0;
}
}
struct SwsContext*
hb_sws_get_context(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags)
{
struct SwsContext * ctx;
ctx = sws_alloc_context();
if ( ctx )
{
int srcRange, dstRange;
srcRange = handle_jpeg(&srcFormat);
dstRange = handle_jpeg(&dstFormat);
/* enable this when implemented in Libav
flags |= SWS_FULL_CHR_H_INT | SWS_FULL_CHR_H_INP;
*/
av_opt_set_int(ctx, "srcw", srcW, 0);
av_opt_set_int(ctx, "srch", srcH, 0);
av_opt_set_int(ctx, "src_range", srcRange, 0);
av_opt_set_int(ctx, "src_format", srcFormat, 0);
av_opt_set_int(ctx, "dstw", dstW, 0);
av_opt_set_int(ctx, "dsth", dstH, 0);
av_opt_set_int(ctx, "dst_range", dstRange, 0);
av_opt_set_int(ctx, "dst_format", dstFormat, 0);
av_opt_set_int(ctx, "sws_flags", flags, 0);
sws_setColorspaceDetails( ctx,
sws_getCoefficients( SWS_CS_DEFAULT ), // src colorspace
srcRange, // src range 0 = MPG, 1 = JPG
sws_getCoefficients( SWS_CS_DEFAULT ), // dst colorspace
dstRange, // dst range 0 = MPG, 1 = JPG
0, // brightness
1 << 16, // contrast
1 << 16 ); // saturation
if (sws_init_context(ctx, NULL, NULL) < 0) {
fprintf(stderr, "Cannot initialize resampling context\n");
sws_freeContext(ctx);
ctx = NULL;
}
}
return ctx;
}
uint64_t hb_ff_mixdown_xlat(int hb_mixdown, int *downmix_mode)
{
uint64_t ff_layout = 0;
int mode = AV_MATRIX_ENCODING_NONE;
switch (hb_mixdown)
{
// Passthru
case HB_AMIXDOWN_NONE:
break;
case HB_AMIXDOWN_MONO:
case HB_AMIXDOWN_LEFT:
case HB_AMIXDOWN_RIGHT:
ff_layout = AV_CH_LAYOUT_MONO;
break;
case HB_AMIXDOWN_DOLBY:
ff_layout = AV_CH_LAYOUT_STEREO;
mode = AV_MATRIX_ENCODING_DOLBY;
break;
case HB_AMIXDOWN_DOLBYPLII:
ff_layout = AV_CH_LAYOUT_STEREO;
mode = AV_MATRIX_ENCODING_DPLII;
break;
case HB_AMIXDOWN_STEREO:
ff_layout = AV_CH_LAYOUT_STEREO;
break;
case HB_AMIXDOWN_5POINT1:
ff_layout = AV_CH_LAYOUT_5POINT1;
break;
case HB_AMIXDOWN_6POINT1:
ff_layout = AV_CH_LAYOUT_6POINT1;
break;
case HB_AMIXDOWN_7POINT1:
ff_layout = AV_CH_LAYOUT_7POINT1;
break;
case HB_AMIXDOWN_5_2_LFE:
ff_layout = (AV_CH_LAYOUT_5POINT1_BACK|
AV_CH_FRONT_LEFT_OF_CENTER|
AV_CH_FRONT_RIGHT_OF_CENTER);
break;
default:
ff_layout = AV_CH_LAYOUT_STEREO;
hb_log("hb_ff_mixdown_xlat: unsupported mixdown %d", hb_mixdown);
break;
}
if (downmix_mode != NULL)
*downmix_mode = mode;
return ff_layout;
}
/*
* Set sample format to the request format if supported by the codec.
* The planar/packed variant of the requested format is the next best thing.
*/
void hb_ff_set_sample_fmt(AVCodecContext *context, AVCodec *codec,
enum AVSampleFormat request_sample_fmt)
{
if (context != NULL && codec != NULL &&
codec->type == AVMEDIA_TYPE_AUDIO && codec->sample_fmts != NULL)
{
const enum AVSampleFormat *fmt;
enum AVSampleFormat next_best_fmt;
next_best_fmt = (av_sample_fmt_is_planar(request_sample_fmt) ?
av_get_packed_sample_fmt(request_sample_fmt) :
av_get_planar_sample_fmt(request_sample_fmt));
context->request_sample_fmt = AV_SAMPLE_FMT_NONE;
for (fmt = codec->sample_fmts; *fmt != AV_SAMPLE_FMT_NONE; fmt++)
{
if (*fmt == request_sample_fmt)
{
context->request_sample_fmt = request_sample_fmt;
break;
}
else if (*fmt == next_best_fmt)
{
context->request_sample_fmt = next_best_fmt;
}
}
/*
* When encoding and AVCodec.sample_fmts exists, avcodec_open2()
* will error out if AVCodecContext.sample_fmt isn't set.
*/
if (context->request_sample_fmt == AV_SAMPLE_FMT_NONE)
{
context->request_sample_fmt = codec->sample_fmts[0];
}
context->sample_fmt = context->request_sample_fmt;
}
}
/**
* Registers work objects, by adding the work object to a liked list.
* @param w Handle to hb_work_object_t to register.
*/
void hb_register( hb_work_object_t * w )
{
w->next = hb_objects;
hb_objects = w;
}
void (*hb_log_callback)(const char* message);
static void redirect_thread_func(void *);
#if defined( SYS_MINGW )
#define pipe(phandles) _pipe (phandles, 4096, _O_BINARY)
#endif
/**
* Registers the given function as a logger. All logs will be passed to it.
* @param log_cb The function to register as a logger.
*/
void hb_register_logger( void (*log_cb)(const char* message) )
{
hb_log_callback = log_cb;
hb_thread_init("ioredirect", redirect_thread_func, NULL, HB_NORMAL_PRIORITY);
}
/**
* libhb initialization routine.
* @param verbose HB_DEBUG_NONE or HB_DEBUG_ALL.
* @param update_check signals libhb to check for updated version from HandBrake website.
* @return Handle to hb_handle_t for use on all subsequent calls to libhb.
*/
hb_handle_t * hb_init( int verbose, int update_check )
{
hb_handle_t * h = calloc( sizeof( hb_handle_t ), 1 );
uint64_t date;
/* See hb_deep_log() and hb_log() in common.c */
global_verbosity_level = verbose;
if( verbose )
putenv( "HB_DEBUG=1" );
h->id = hb_instance_counter++;
/* Check for an update on the website if asked to */
h->build = -1;
/* Initialize opaque for PowerManagement purposes */
h->system_sleep_opaque = hb_system_sleep_opaque_init();
if( update_check )
{
hb_log( "hb_init: checking for updates" );
date = hb_get_date();
h->update_thread = hb_update_init( &h->build, h->version );
for( ;; )
{
if( hb_thread_has_exited( h->update_thread ) )
{
/* Immediate success or failure */
hb_thread_close( &h->update_thread );
break;
}
if( hb_get_date() > date + 1000 )
{
/* Still nothing after one second. Connection problem,
let the thread die */
hb_log( "hb_init: connection problem, not waiting for "
"update_thread" );
break;
}
hb_snooze( 500 );
}
}
/*
* Initialise buffer pool
*/
hb_buffer_pool_init();
h->title_set.list_title = hb_list_init();
h->jobs = hb_list_init();
h->state_lock = hb_lock_init();
h->state.state = HB_STATE_IDLE;
h->pause_lock = hb_lock_init();
h->interjob = calloc( sizeof( hb_interjob_t ), 1 );
/* Start library thread */
hb_log( "hb_init: starting libhb thread" );
h->die = 0;
h->main_thread = hb_thread_init( "libhb", thread_func, h,
HB_NORMAL_PRIORITY );
return h;
}
/**
* libhb initialization routine.
* This version is to use when calling the dylib, the macro hb_init isn't available from a dylib call!
* @param verbose HB_DEBUG_NONE or HB_DEBUG_ALL.
* @param update_check signals libhb to check for updated version from HandBrake website.
* @return Handle to hb_handle_t for use on all subsequent calls to libhb.
*/
hb_handle_t * hb_init_dl( int verbose, int update_check )
{
hb_handle_t * h = calloc( sizeof( hb_handle_t ), 1 );
uint64_t date;
/* See hb_log() in common.c */
if( verbose > HB_DEBUG_NONE )
{
putenv( "HB_DEBUG=1" );
}
h->id = hb_instance_counter++;
/* Check for an update on the website if asked to */
h->build = -1;
/* Initialize opaque for PowerManagement purposes */
h->system_sleep_opaque = hb_system_sleep_opaque_init();
if( update_check )
{
hb_log( "hb_init: checking for updates" );
date = hb_get_date();
h->update_thread = hb_update_init( &h->build, h->version );
for( ;; )
{
if( hb_thread_has_exited( h->update_thread ) )
{
/* Immediate success or failure */
hb_thread_close( &h->update_thread );
break;
}
if( hb_get_date() > date + 1000 )
{
/* Still nothing after one second. Connection problem,
let the thread die */
hb_log( "hb_init: connection problem, not waiting for "
"update_thread" );
break;
}
hb_snooze( 500 );
}
}
h->title_set.list_title = hb_list_init();
h->jobs = hb_list_init();
h->current_job = NULL;
h->state_lock = hb_lock_init();
h->state.state = HB_STATE_IDLE;
h->pause_lock = hb_lock_init();
/* Start library thread */
hb_log( "hb_init: starting libhb thread" );
h->die = 0;
h->main_thread = hb_thread_init( "libhb", thread_func, h,
HB_NORMAL_PRIORITY );
return h;
}
/**
* Returns current version of libhb.
* @param h Handle to hb_handle_t.
* @return character array of version number.
*/
char * hb_get_version( hb_handle_t * h )
{
return HB_PROJECT_VERSION;
}
/**
* Returns current build of libhb.
* @param h Handle to hb_handle_t.
* @return character array of build number.
*/
int hb_get_build( hb_handle_t * h )
{
return HB_PROJECT_BUILD;
}
/**
* Checks for needed update.
* @param h Handle to hb_handle_t.
* @param version Pointer to handle where version will be copied.
* @return update indicator.
*/
int hb_check_update( hb_handle_t * h, char ** version )
{
*version = ( h->build < 0 ) ? NULL : h->version;
return h->build;
}
/**
* Deletes current previews associated with titles
* @param h Handle to hb_handle_t
*/
void hb_remove_previews( hb_handle_t * h )
{
char filename[1024];
char dirname[1024];
hb_title_t * title;
int i, count, len;
DIR * dir;
struct dirent * entry;
memset( dirname, 0, 1024 );
hb_get_temporary_directory( dirname );
dir = opendir( dirname );
if (dir == NULL) return;
count = hb_list_count( h->title_set.list_title );
while( ( entry = readdir( dir ) ) )
{
if( entry->d_name[0] == '.' )
{
continue;
}
for( i = 0; i < count; i++ )
{
title = hb_list_item( h->title_set.list_title, i );
len = snprintf( filename, 1024, "%d_%d", h->id, title->index );
if (strncmp(entry->d_name, filename, len) == 0)
{
snprintf( filename, 1024, "%s/%s", dirname, entry->d_name );
unlink( filename );
break;
}
}
}
closedir( dir );
}
/**
* Initializes a scan of the by calling hb_scan_init
* @param h Handle to hb_handle_t
* @param path location of VIDEO_TS folder.
* @param title_index Desired title to scan. 0 for all titles.
* @param preview_count Number of preview images to generate.
* @param store_previews Whether or not to write previews to disk.
*/
void hb_scan( hb_handle_t * h, const char * path, int title_index,
int preview_count, int store_previews, uint64_t min_duration )
{
hb_title_t * title;
h->scan_die = 0;
/* Clean up from previous scan */
hb_remove_previews( h );
while( ( title = hb_list_item( h->title_set.list_title, 0 ) ) )
{
hb_list_rem( h->title_set.list_title, title );
hb_title_close( &title );
}
/* Print CPU info here so that it's in all scan and encode logs */
const char *cpu_name = hb_get_cpu_name();
const char *cpu_type = hb_get_cpu_platform_name();
hb_log("CPU: %s", cpu_name != NULL ? cpu_name : "");
if (cpu_type != NULL)
{
hb_log(" - %s", cpu_type);
}
hb_log(" - logical processor count: %d", hb_get_cpu_count());
/* Print OpenCL info here so that it's in all scan and encode logs */
hb_opencl_info_print();
#ifdef USE_QSV
/* Print QSV info here so that it's in all scan and encode logs */
hb_qsv_info_print();
#endif
hb_log( "hb_scan: path=%s, title_index=%d", path, title_index );
h->scan_thread = hb_scan_init( h, &h->scan_die, path, title_index,
&h->title_set, preview_count,
store_previews, min_duration );
}
/**
* Returns the list of titles found.
* @param h Handle to hb_handle_t
* @return Handle to hb_list_t of the title list.
*/
hb_list_t * hb_get_titles( hb_handle_t * h )
{
return h->title_set.list_title;
}
hb_title_set_t * hb_get_title_set( hb_handle_t * h )
{
return &h->title_set;
}
int hb_save_preview( hb_handle_t * h, int title, int preview, hb_buffer_t *buf )
{
FILE * file;
char filename[1024];
hb_get_tempory_filename( h, filename, "%d_%d_%d",
hb_get_instance_id(h), title, preview );
file = hb_fopen(filename, "wb");
if( !file )
{
hb_error( "hb_save_preview: fopen failed (%s)", filename );
return -1;
}
int pp, hh;
for( pp = 0; pp < 3; pp++ )
{
uint8_t *data = buf->plane[pp].data;
int stride = buf->plane[pp].stride;
int w = buf->plane[pp].width;
int h = buf->plane[pp].height;
for( hh = 0; hh < h; hh++ )
{
fwrite( data, w, 1, file );
data += stride;
}
}
fclose( file );
return 0;
}
hb_buffer_t * hb_read_preview(hb_handle_t * h, hb_title_t *title, int preview)
{
FILE * file;
char filename[1024];
hb_get_tempory_filename(h, filename, "%d_%d_%d",
hb_get_instance_id(h), title->index, preview);
file = hb_fopen(filename, "rb");
if (!file)
{
hb_error( "hb_read_preview: fopen failed (%s)", filename );
return NULL;
}
hb_buffer_t * buf;
buf = hb_frame_buffer_init(AV_PIX_FMT_YUV420P, title->width, title->height);
int pp, hh;
for (pp = 0; pp < 3; pp++)
{
uint8_t *data = buf->plane[pp].data;
int stride = buf->plane[pp].stride;
int w = buf->plane[pp].width;
int h = buf->plane[pp].height;
for (hh = 0; hh < h; hh++)
{
fread(data, w, 1, file);
data += stride;
}
}
fclose(file);
return buf;
}
hb_image_t* hb_get_preview2(hb_handle_t * h, int title_idx, int picture,
hb_ui_geometry_t *ui_geo, int deinterlace)
{
char filename[1024];
hb_buffer_t * in_buf, * deint_buf = NULL, * preview_buf;
uint32_t swsflags;
AVPicture pic_in, pic_preview, pic_deint, pic_crop;
struct SwsContext * context;
int width = ui_geo->width * ui_geo->par.num / ui_geo->par.den;
int height = ui_geo->height;
swsflags = SWS_LANCZOS | SWS_ACCURATE_RND;
preview_buf = hb_frame_buffer_init(AV_PIX_FMT_RGB32, width, height);
hb_avpicture_fill( &pic_preview, preview_buf );
// Allocate the AVPicture frames and fill in
memset( filename, 0, 1024 );
hb_title_t * title;
title = hb_find_title_by_index(h, title_idx);
if (title == NULL)
{
hb_error( "hb_get_preview2: invalid title (%d)", title_idx );
return NULL;
}
in_buf = hb_read_preview( h, title, picture );
if ( in_buf == NULL )
{
return NULL;
}
hb_avpicture_fill( &pic_in, in_buf );
if (deinterlace)
{
// Deinterlace and crop
deint_buf = hb_frame_buffer_init( AV_PIX_FMT_YUV420P,
title->width, title->height );
hb_deinterlace(deint_buf, in_buf);
hb_avpicture_fill( &pic_deint, deint_buf );
av_picture_crop(&pic_crop, &pic_deint, AV_PIX_FMT_YUV420P,
ui_geo->crop[0], ui_geo->crop[2] );
}
else
{
// Crop
av_picture_crop(&pic_crop, &pic_in, AV_PIX_FMT_YUV420P,
ui_geo->crop[0], ui_geo->crop[2] );
}
// Get scaling context
context = hb_sws_get_context(
title->width - (ui_geo->crop[2] + ui_geo->crop[3]),
title->height - (ui_geo->crop[0] + ui_geo->crop[1]),
AV_PIX_FMT_YUV420P, width, height, AV_PIX_FMT_RGB32,
swsflags);
// Scale
sws_scale(context,
(const uint8_t* const *)pic_crop.data, pic_crop.linesize,
0, title->height - (ui_geo->crop[0] + ui_geo->crop[1]),
pic_preview.data, pic_preview.linesize);
// Free context
sws_freeContext( context );
hb_image_t *image = hb_buffer_to_image(preview_buf);
// Clean up
hb_buffer_close( &in_buf );
hb_buffer_close( &deint_buf );
hb_buffer_close( &preview_buf );
return image;
}
/**
* Create preview image of desired title a index of picture.
* @param h Handle to hb_handle_t.
* @param title Handle to hb_title_t of desired title.
* @param picture Index in title.
* @param buffer Handle to buffer were image will be drawn.
*/
void hb_get_preview( hb_handle_t * h, hb_job_t * job, int picture,
uint8_t * buffer )
{
hb_title_t * title = job->title;
char filename[1024];
hb_buffer_t * in_buf, * deint_buf = NULL, * preview_buf;
uint8_t * pen;
uint32_t swsflags;
AVPicture pic_in, pic_preview, pic_deint, pic_crop;
struct SwsContext * context;
int i;
int preview_size;
swsflags = SWS_LANCZOS | SWS_ACCURATE_RND;
preview_buf = hb_frame_buffer_init( AV_PIX_FMT_RGB32,
job->width, job->height );
hb_avpicture_fill( &pic_preview, preview_buf );
// Allocate the AVPicture frames and fill in
memset( filename, 0, 1024 );
in_buf = hb_read_preview( h, title, picture );
if ( in_buf == NULL )
{
return;
}
hb_avpicture_fill( &pic_in, in_buf );
if( job->deinterlace )
{
// Deinterlace and crop
deint_buf = hb_frame_buffer_init( AV_PIX_FMT_YUV420P,
title->width, title->height );
hb_deinterlace(deint_buf, in_buf);
hb_avpicture_fill( &pic_deint, deint_buf );
av_picture_crop( &pic_crop, &pic_deint, AV_PIX_FMT_YUV420P,
job->crop[0], job->crop[2] );
}
else
{
// Crop
av_picture_crop( &pic_crop, &pic_in, AV_PIX_FMT_YUV420P, job->crop[0], job->crop[2] );
}
// Get scaling context
context = hb_sws_get_context(title->width - (job->crop[2] + job->crop[3]),
title->height - (job->crop[0] + job->crop[1]),
AV_PIX_FMT_YUV420P,
job->width, job->height, AV_PIX_FMT_RGB32,
swsflags);
// Scale
sws_scale(context,
(const uint8_t* const *)pic_crop.data, pic_crop.linesize,
0, title->height - (job->crop[0] + job->crop[1]),
pic_preview.data, pic_preview.linesize);
// Free context
sws_freeContext( context );
preview_size = pic_preview.linesize[0];
pen = buffer;
for( i = 0; i < job->height; i++ )
{
memcpy( pen, pic_preview.data[0] + preview_size * i, 4 * job->width );
pen += 4 * job->width;
}
// Clean up
hb_buffer_close( &in_buf );
hb_buffer_close( &deint_buf );
hb_buffer_close( &preview_buf );
}
/**
* Analyzes a frame to detect interlacing artifacts
* and returns true if interlacing (combing) is found.
*
* Code taken from Thomas Oestreich's 32detect filter
* in the Transcode project, with minor formatting changes.
*
* @param buf An hb_buffer structure holding valid frame data
* @param width The frame's width in pixels
* @param height The frame's height in pixels
* @param color_equal Sensitivity for detecting similar colors
* @param color_diff Sensitivity for detecting different colors
* @param threshold Sensitivity for flagging planes as combed
* @param prog_equal Sensitivity for detecting similar colors on progressive frames
* @param prog_diff Sensitivity for detecting different colors on progressive frames
* @param prog_threshold Sensitivity for flagging progressive frames as combed
*/
int hb_detect_comb( hb_buffer_t * buf, int color_equal, int color_diff, int threshold, int prog_equal, int prog_diff, int prog_threshold )
{
int j, k, n, off, cc_1, cc_2, cc[3];
// int flag[3] ; // debugging flag
uint16_t s1, s2, s3, s4;
cc_1 = 0; cc_2 = 0;
if ( buf->s.flags & 16 )
{
/* Frame is progressive, be more discerning. */
color_diff = prog_diff;
color_equal = prog_equal;
threshold = prog_threshold;
}
/* One pas for Y, one pass for Cb, one pass for Cr */
for( k = 0; k < 3; k++ )
{
uint8_t * data = buf->plane[k].data;
int width = buf->plane[k].width;
int stride = buf->plane[k].stride;
int height = buf->plane[k].height;
for( j = 0; j < width; ++j )
{
off = 0;
for( n = 0; n < ( height - 4 ); n = n + 2 )
{
/* Look at groups of 4 sequential horizontal lines */
s1 = ( ( data )[ off + j ] & 0xff );
s2 = ( ( data )[ off + j + stride ] & 0xff );
s3 = ( ( data )[ off + j + 2 * stride ] & 0xff );
s4 = ( ( data )[ off + j + 3 * stride ] & 0xff );
/* Note if the 1st and 2nd lines are more different in
color than the 1st and 3rd lines are similar in color.*/
if ( ( abs( s1 - s3 ) < color_equal ) &&
( abs( s1 - s2 ) > color_diff ) )
++cc_1;
/* Note if the 2nd and 3rd lines are more different in
color than the 2nd and 4th lines are similar in color.*/
if ( ( abs( s2 - s4 ) < color_equal ) &&
( abs( s2 - s3 ) > color_diff) )
++cc_2;
/* Now move down 2 horizontal lines before starting over.*/
off += 2 * stride;
}
}
// compare results
/* The final cc score for a plane is the percentage of combed pixels it contains.
Because sensitivity goes down to hundreths of a percent, multiply by 1000
so it will be easy to compare against the threhold value which is an integer. */
cc[k] = (int)( ( cc_1 + cc_2 ) * 1000.0 / ( width * height ) );
}
/* HandBrake is all yuv420, so weight the average percentage of all 3 planes accordingly.*/
int average_cc = ( 2 * cc[0] + ( cc[1] / 2 ) + ( cc[2] / 2 ) ) / 3;
/* Now see if that average percentage of combed pixels surpasses the threshold percentage given by the user.*/
if( average_cc > threshold )
{
#if 0
hb_log("Average %i combed (Threshold %i) %i/%i/%i | PTS: %"PRId64" (%fs) %s", average_cc, threshold, cc[0], cc[1], cc[2], buf->start, (float)buf->start / 90000, (buf->flags & 16) ? "Film" : "Video" );
#endif
return 1;
}
#if 0
hb_log("SKIPPED Average %i combed (Threshold %i) %i/%i/%i | PTS: %"PRId64" (%fs) %s", average_cc, threshold, cc[0], cc[1], cc[2], buf->start, (float)buf->start / 90000, (buf->flags & 16) ? "Film" : "Video" );
#endif
/* Reaching this point means no combing detected. */
return 0;
}
/**
* Calculates destination width and height for anamorphic content
*
* Returns calculated geometry
* @param source_geometry - Pointer to source geometry info
* @param ui_geometry - Pointer to requested destination parameters
*/
void hb_set_anamorphic_size2(hb_geometry_t *src_geo,
hb_ui_geometry_t *ui_geo,
hb_geometry_t *result)
{
hb_rational_t in_par, out_par;
int keep_display_aspect = !!(ui_geo->keep & HB_KEEP_DISPLAY_ASPECT);
int keep_height = !!(ui_geo->keep & HB_KEEP_HEIGHT);
/* Set up some variables to make the math easier to follow. */
int cropped_width = src_geo->width - ui_geo->crop[2] - ui_geo->crop[3];
int cropped_height = src_geo->height - ui_geo->crop[0] - ui_geo->crop[1];
double storage_aspect = (double)cropped_width / cropped_height;
int mod = ui_geo->modulus ? EVEN(ui_geo->modulus) : 2;
// Use 64 bits to avoid overflow till the final hb_reduce() call
hb_reduce(&in_par.num, &in_par.den, ui_geo->par.num, ui_geo->par.den);
int64_t dst_par_num = in_par.num;
int64_t dst_par_den = in_par.den;
hb_rational_t src_par = src_geo->par;
/* If a source was really NTSC or PAL and the user specified ITU PAR
values, replace the standard PAR values with the ITU broadcast ones. */
if (src_geo->width == 720 && ui_geo->itu_par)
{
// convert aspect to a scaled integer so we can test for 16:9 & 4:3
// aspect ratios ignoring insignificant differences in the LSBs of
// the floating point representation.
int iaspect = src_geo->width * src_par.num * 9. /
(src_geo->height * src_par.den);
/* Handle ITU PARs */
if (src_geo->height == 480)
{
/* It's NTSC */
if (iaspect == 16)
{
/* It's widescreen */
dst_par_num = 40;
dst_par_den = 33;
}
else if (iaspect == 12)
{
/* It's 4:3 */
dst_par_num = 10;
dst_par_den = 11;
}
}
else if (src_geo->height == 576)
{
/* It's PAL */
if (iaspect == 16)
{
/* It's widescreen */
dst_par_num = 16;
dst_par_den = 11;
}
else if (iaspect == 12)
{
/* It's 4:3 */
dst_par_num = 12;
dst_par_den = 11;
}
}
}
/*
3 different ways of deciding output dimensions:
- 1: Strict anamorphic, preserve source dimensions
- 2: Loose anamorphic, round to mod16 and preserve storage aspect ratio
- 3: Power user anamorphic, specify everything
*/
int width, height;
int maxWidth, maxHeight;
maxWidth = MULTIPLE_MOD_DOWN(ui_geo->maxWidth, mod);
maxHeight = MULTIPLE_MOD_DOWN(ui_geo->maxHeight, mod);
if (maxWidth && maxWidth < 32)
maxWidth = 32;
if (maxHeight && maxHeight < 32)
maxHeight = 32;
switch (ui_geo->mode)
{
case HB_ANAMORPHIC_NONE:
{
double par, cropped_sar, dar;
par = (double)src_geo->par.num / src_geo->par.den;
cropped_sar = (double)cropped_width / cropped_height;
dar = par * cropped_sar;
/* "None" anamorphic. a.k.a. non-anamorphic
* - Uses mod-compliant dimensions, set by user
* - Allows users to set the either width *or* height
*/
if (keep_display_aspect)
{
if (!keep_height)
{
width = MULTIPLE_MOD_UP(ui_geo->width, mod);
height = MULTIPLE_MOD(width / dar, mod);
}
else
{
height = MULTIPLE_MOD_UP(ui_geo->height, mod);
width = MULTIPLE_MOD(height * dar, mod);
}
}
else
{
width = MULTIPLE_MOD_UP(ui_geo->width, mod);
height = MULTIPLE_MOD_UP(ui_geo->height, mod);
}
if (maxWidth && (width > maxWidth))
{
width = maxWidth;
height = MULTIPLE_MOD(width / dar, mod);
}
if (maxHeight && (height > maxHeight))
{
height = maxHeight;
width = MULTIPLE_MOD(height * dar, mod);
}
dst_par_num = dst_par_den = 1;
} break;
default:
case HB_ANAMORPHIC_STRICT:
{
/* "Strict" anamorphic.
* - Uses mod2-compliant dimensions,
* - Forces title - crop dimensions
*/
width = MULTIPLE_MOD_UP(cropped_width, 2);
height = MULTIPLE_MOD_UP(cropped_height, 2);
/* Adjust the output PAR for new width/height
* Film AR is the source display width / cropped source height.
* Output display width is the output height * film AR.
* Output PAR is the output display width / output storage width.
*
* i.e.
* source_display_width = cropped_width * source PAR
* AR = source_display_width / cropped_height;
* output_display_width = height * AR;
* par = output_display_width / width;
*
* When these terms are reduced, you get the following...
*/
dst_par_num = (int64_t)height * cropped_width * src_par.num;
dst_par_den = (int64_t)width * cropped_height * src_par.den;
} break;
case HB_ANAMORPHIC_LOOSE:
{
/* "Loose" anamorphic.
* - Uses mod-compliant dimensions, set by user
* - Allows users to set the either width *or* height
*/
if (!keep_height)
{
width = MULTIPLE_MOD_UP(ui_geo->width, mod);
height = MULTIPLE_MOD_UP(width / storage_aspect + 0.5, mod);
}
else
{
height = MULTIPLE_MOD_UP(ui_geo->height, mod);
width = MULTIPLE_MOD_UP(height * storage_aspect + 0.5, mod);
}
if (maxWidth && (maxWidth < width))
{
width = maxWidth;
height = MULTIPLE_MOD(width / storage_aspect + 0.5, mod);
}
if (maxHeight && (maxHeight < height))
{
height = maxHeight;
width = MULTIPLE_MOD(height * storage_aspect + 0.5, mod);
}
/* Adjust the output PAR for new width/height
See comment in HB_ANAMORPHIC_STRICT */
dst_par_num = (int64_t)height * cropped_width * src_par.num;
dst_par_den = (int64_t)width * cropped_height * src_par.den;
} break;
case HB_ANAMORPHIC_CUSTOM:
{
/* Anamorphic 3: Power User Jamboree
- Set everything based on specified values */
/* Use specified storage dimensions */
storage_aspect = (double)ui_geo->width / ui_geo->height;
/* Time to get picture dimensions that divide cleanly.*/
width = MULTIPLE_MOD_UP(ui_geo->width, mod);
height = MULTIPLE_MOD_UP(ui_geo->height, mod);
/* Bind to max dimensions */
if (maxWidth && width > maxWidth)
{
width = maxWidth;
// If we are keeping the display aspect, then we are going
// to be modifying the PAR anyway. So it's preferred
// to let the width/height stray some from the original
// requested storage aspect.
//
// But otherwise, PAR and DAR will change the least
// if we stay as close as possible to the requested
// storage aspect.
if (!keep_display_aspect)
{
height = width / storage_aspect + 0.5;
height = MULTIPLE_MOD(height, mod);
}
}
if (maxHeight && height > maxHeight)
{
height = maxHeight;
// Ditto, see comment above
if (!keep_display_aspect)
{
width = height * storage_aspect + 0.5;
width = MULTIPLE_MOD(width, mod);
}
}
/* That finishes the storage dimensions. On to display. */
if (ui_geo->dar.num && ui_geo->dar.den)
{
/* We need to adjust the PAR to produce this aspect. */
dst_par_num = (int64_t)height * ui_geo->dar.num /
ui_geo->dar.den;
dst_par_den = width;
}
else
{
if (keep_display_aspect)
{
/* We can ignore the possibility of a PAR change
* Adjust the output PAR for new width/height
* See comment in HB_ANAMORPHIC_STRICT
*/
dst_par_num = (int64_t)height * cropped_width *
src_par.num;
dst_par_den = (int64_t)width * cropped_height *
src_par.den;
}
else
{
/* If the dimensions were changed by the modulus
* or by maxWidth/maxHeight, we also change the
* output PAR so that the DAR is unchanged.
*
* PAR is the requested output display width / storage width
* requested output display width is the original
* requested width * original requested PAR
*/
dst_par_num = ui_geo->width * dst_par_num;
dst_par_den = width * dst_par_den;
}
}
} break;
}
/* Pass the results back to the caller */
result->width = width;
result->height = height;
/* While x264 is smart enough to reduce fractions on its own, libavcodec
* needs some help with the math, so lose superfluous factors. */
hb_limit_rational64(&dst_par_num, &dst_par_den,
dst_par_num, dst_par_den, 65535);
/* If the user is directling updating PAR, don't override his values */
hb_reduce(&out_par.num, &out_par.den, dst_par_num, dst_par_den);
if (ui_geo->mode == HB_ANAMORPHIC_CUSTOM && !keep_display_aspect &&
out_par.num == in_par.num && out_par.den == in_par.den)
{
result->par.num = ui_geo->par.num;
result->par.den = ui_geo->par.den;
}
else
{
hb_reduce(&result->par.num, &result->par.den, dst_par_num, dst_par_den);
}
}
/**
* Calculates job width and height for anamorphic content,
*
* @param job Handle to hb_job_t
* @param output_width Pointer to returned storage width
* @param output_height Pointer to returned storage height
* @param output_par_width Pointer to returned pixel width
* @param output_par_height Pointer to returned pixel height
*/
void hb_set_anamorphic_size( hb_job_t * job,
int *output_width, int *output_height,
int *output_par_width, int *output_par_height )
{
hb_geometry_t result;
hb_geometry_t src;
hb_ui_geometry_t ui_geo;
src.width = job->title->width;
src.height = job->title->height;
src.par.num = job->title->pixel_aspect_width;
src.par.den = job->title->pixel_aspect_height;
ui_geo.width = job->width;
ui_geo.height = job->height;
ui_geo.par.num = job->anamorphic.par_width;
ui_geo.par.den = job->anamorphic.par_height;
ui_geo.modulus = job->modulus;
memcpy(ui_geo.crop, job->crop, sizeof(int[4]));
ui_geo.maxWidth = job->maxWidth;
ui_geo.maxHeight = job->maxHeight;
ui_geo.mode = job->anamorphic.mode;
ui_geo.keep = 0;
if (job->anamorphic.keep_display_aspect)
ui_geo.keep = HB_KEEP_DISPLAY_ASPECT;
ui_geo.itu_par = job->anamorphic.itu_par;
ui_geo.dar.num = job->anamorphic.dar_width;
ui_geo.dar.den = job->anamorphic.dar_height;
hb_set_anamorphic_size2(&src, &ui_geo, &result);
*output_width = result.width;
*output_height = result.height;
*output_par_width = result.par.num;
*output_par_height = result.par.den;
}
/**
* Add a filter to a jobs filter list
*
* @param job Handle to hb_job_t
* @param settings to give the filter
*/
void hb_add_filter( hb_job_t * job, hb_filter_object_t * filter, const char * settings_in )
{
char * settings = NULL;
if ( settings_in != NULL )
{
settings = strdup( settings_in );
}
filter->settings = settings;
if( filter->enforce_order )
{
// Find the position in the filter chain this filter belongs in
int i;
for( i = 0; i < hb_list_count( job->list_filter ); i++ )
{
hb_filter_object_t * f = hb_list_item( job->list_filter, i );
if( f->id > filter->id )
{
hb_list_insert( job->list_filter, i, filter );
return;
}
else if( f->id == filter->id )
{
// Don't allow the same filter to be added twice
hb_filter_close( &filter );
return;
}
}
}
// No position found or order not enforced for this filter
hb_list_add( job->list_filter, filter );
}
/**
* Validate and adjust dimensions if necessary
*
* @param job Handle to hb_job_t
*/
void hb_validate_size( hb_job_t * job )
{
int width, height, par_width, par_height;
hb_set_anamorphic_size(job, &width, &height, &par_width, &par_height);
job->width = width;
job->height = height;
job->anamorphic.par_width = par_width;
job->anamorphic.par_height = par_height;
}
/**
* Returns the number of jobs in the queue.
* @param h Handle to hb_handle_t.
* @return Number of jobs.
*/
int hb_count( hb_handle_t * h )
{
return hb_list_count( h->jobs );
}
/**
* Returns handle to job at index i within the job list.
* @param h Handle to hb_handle_t.
* @param i Index of job.
* @returns Handle to hb_job_t of desired job.
*/
hb_job_t * hb_job( hb_handle_t * h, int i )
{
return hb_list_item( h->jobs, i );
}
hb_job_t * hb_current_job( hb_handle_t * h )
{
return( h->current_job );
}
/**
* Adds a job to the job list.
* @param h Handle to hb_handle_t.
* @param job Handle to hb_job_t.
*/
void hb_add( hb_handle_t * h, hb_job_t * job )
{
hb_job_t * job_copy;
hb_audio_t * audio;
hb_subtitle_t * subtitle;
int i;
char audio_lang[4];
/* Copy the job */
job_copy = calloc( sizeof( hb_job_t ), 1 );
memcpy( job_copy, job, sizeof( hb_job_t ) );
/* If we're doing Foreign Audio Search, copy all subtitles matching the
* first audio track language we find in the audio list.
*
* Otherwise, copy all subtitles found in the input job (which can be
* manually selected by the user, or added after the Foreign Audio
* Search pass). */
memset( audio_lang, 0, sizeof( audio_lang ) );
if( job->indepth_scan )
{
/* Find the first audio language that is being encoded, then add all the
* matching subtitles for that language. */
for( i = 0; i < hb_list_count( job->list_audio ); i++ )
{
if( ( audio = hb_list_item( job->list_audio, i ) ) )
{
strncpy( audio_lang, audio->config.lang.iso639_2, sizeof( audio_lang ) );
break;
}
}
/*
* If doing a subtitle scan then add all the matching subtitles for this
* language.
*/
job_copy->list_subtitle = hb_list_init();
for( i = 0; i < hb_list_count( job->title->list_subtitle ); i++ )
{
subtitle = hb_list_item( job->title->list_subtitle, i );
if( strcmp( subtitle->iso639_2, audio_lang ) == 0 &&
hb_subtitle_can_force( subtitle->source ) )
{
/* Matched subtitle language with audio language, so add this to
* our list to scan.
*
* We will update the subtitle list on the next pass later, after
* the subtitle scan pass has completed. */
hb_list_add( job_copy->list_subtitle,
hb_subtitle_copy( subtitle ) );
}
}
}
else
{
/* Copy all subtitles from the input job to title_copy/job_copy. */
job_copy->list_subtitle = hb_subtitle_list_copy( job->list_subtitle );
}
job_copy->list_chapter = hb_chapter_list_copy( job->list_chapter );
job_copy->list_audio = hb_audio_list_copy( job->list_audio );
job_copy->list_attachment = hb_attachment_list_copy( job->list_attachment );
job_copy->metadata = hb_metadata_copy( job->metadata );
if (job->encoder_preset != NULL)
job_copy->encoder_preset = strdup(job->encoder_preset);
if (job->encoder_tune != NULL)
job_copy->encoder_tune = strdup(job->encoder_tune);
if (job->encoder_options != NULL)
job_copy->encoder_options = strdup(job->encoder_options);
if (job->encoder_profile != NULL)
job_copy->encoder_profile = strdup(job->encoder_profile);
if (job->encoder_level != NULL)
job_copy->encoder_level = strdup(job->encoder_level);
if (job->file != NULL)
job_copy->file = strdup(job->file);
job_copy->h = h;
job_copy->pause = h->pause_lock;
/* Copy the job filter list */
job_copy->list_filter = hb_filter_list_copy( job->list_filter );
/* Add the job to the list */
hb_list_add( h->jobs, job_copy );
h->job_count = hb_count(h);
h->job_count_permanent++;
}
/**
* Removes a job from the job list.
* @param h Handle to hb_handle_t.
* @param job Handle to hb_job_t.
*/
void hb_rem( hb_handle_t * h, hb_job_t * job )
{
hb_list_rem( h->jobs, job );
h->job_count = hb_count(h);
if (h->job_count_permanent)
h->job_count_permanent--;
/* XXX free everything XXX */
}
/**
* Starts the conversion process.
* Sets state to HB_STATE_WORKING.
* calls hb_work_init, to launch work thread. Stores handle to work thread.
* @param h Handle to hb_handle_t.
*/
void hb_start( hb_handle_t * h )
{
/* XXX Hack */
h->job_count = hb_list_count( h->jobs );
h->job_count_permanent = h->job_count;
hb_lock( h->state_lock );
h->state.state = HB_STATE_WORKING;
#define p h->state.param.working
p.progress = 0.0;
p.job_cur = 1;
p.job_count = h->job_count;
p.rate_cur = 0.0;
p.rate_avg = 0.0;
p.hours = -1;
p.minutes = -1;
p.seconds = -1;
p.sequence_id = 0;
#undef p
hb_unlock( h->state_lock );
h->paused = 0;
h->work_die = 0;
h->work_error = HB_ERROR_NONE;
h->work_thread = hb_work_init( h->jobs, &h->work_die, &h->work_error, &h->current_job );
}
/**
* Pauses the conversion process.
* @param h Handle to hb_handle_t.
*/
void hb_pause( hb_handle_t * h )
{
if( !h->paused )
{
hb_lock( h->pause_lock );
h->paused = 1;
hb_current_job( h )->st_pause_date = hb_get_date();
hb_lock( h->state_lock );
h->state.state = HB_STATE_PAUSED;
hb_unlock( h->state_lock );
}
}
/**
* Resumes the conversion process.
* @param h Handle to hb_handle_t.
*/
void hb_resume( hb_handle_t * h )
{
if( h->paused )
{
#define job hb_current_job( h )
if( job->st_pause_date != -1 )
{
job->st_paused += hb_get_date() - job->st_pause_date;
}
#undef job
hb_unlock( h->pause_lock );
h->paused = 0;
}
}
/**
* Stops the conversion process.
* @param h Handle to hb_handle_t.
*/
void hb_stop( hb_handle_t * h )
{
h->work_die = 1;
h->job_count = hb_count(h);
h->job_count_permanent = 0;
hb_resume( h );
}
/**
* Stops the conversion process.
* @param h Handle to hb_handle_t.
*/
void hb_scan_stop( hb_handle_t * h )
{
h->scan_die = 1;
h->job_count = hb_count(h);
h->job_count_permanent = 0;
hb_resume( h );
}
/**
* Returns the state of the conversion process.
* @param h Handle to hb_handle_t.
* @param s Handle to hb_state_t which to copy the state data.
*/
void hb_get_state( hb_handle_t * h, hb_state_t * s )
{
hb_lock( h->state_lock );
memcpy( s, &h->state, sizeof( hb_state_t ) );
if ( h->state.state == HB_STATE_SCANDONE || h->state.state == HB_STATE_WORKDONE )
h->state.state = HB_STATE_IDLE;
hb_unlock( h->state_lock );
}
void hb_get_state2( hb_handle_t * h, hb_state_t * s )
{
hb_lock( h->state_lock );
memcpy( s, &h->state, sizeof( hb_state_t ) );
hb_unlock( h->state_lock );
}
/**
* Called in MacGui in UpdateUI to check
* for a new scan being completed to set a new source
*/
int hb_get_scancount( hb_handle_t * h)
{
return h->scanCount;
}
/**
* Closes access to libhb by freeing the hb_handle_t handle ontained in hb_init.
* @param _h Pointer to handle to hb_handle_t.
*/
void hb_close( hb_handle_t ** _h )
{
hb_handle_t * h = *_h;
hb_title_t * title;
h->die = 1;
hb_thread_close( &h->main_thread );
while( ( title = hb_list_item( h->title_set.list_title, 0 ) ) )
{
hb_list_rem( h->title_set.list_title, title );
hb_title_close( &title );
}
hb_list_close( &h->title_set.list_title );
hb_list_close( &h->jobs );
hb_lock_close( &h->state_lock );
hb_lock_close( &h->pause_lock );
hb_system_sleep_opaque_close(&h->system_sleep_opaque);
free( h->interjob );
free( h );
*_h = NULL;
}
int hb_global_init()
{
int result = 0;
result = hb_platform_init();
if (result < 0)
{
hb_error("Platform specific initialization failed!");
return -1;
}
#ifdef USE_QSV
result = hb_qsv_info_init();
if (result < 0)
{
hb_error("hb_qsv_info_init failed!");
return -1;
}
#endif
/* libavcodec */
hb_avcodec_init();
/* HB work objects */
hb_register(&hb_muxer);
hb_register(&hb_reader);
hb_register(&hb_sync_video);
hb_register(&hb_sync_audio);
hb_register(&hb_decavcodecv);
hb_register(&hb_decavcodeca);
hb_register(&hb_declpcm);
hb_register(&hb_deccc608);
hb_register(&hb_decpgssub);
hb_register(&hb_decsrtsub);
hb_register(&hb_decssasub);
hb_register(&hb_dectx3gsub);
hb_register(&hb_decutf8sub);
hb_register(&hb_decvobsub);
hb_register(&hb_encvobsub);
hb_register(&hb_encavcodec);
hb_register(&hb_encavcodeca);
#ifdef __APPLE__
hb_register(&hb_encca_aac);
hb_register(&hb_encca_haac);
#endif
hb_register(&hb_enclame);
hb_register(&hb_enctheora);
hb_register(&hb_encvorbis);
hb_register(&hb_encx264);
#ifdef USE_X265
hb_register(&hb_encx265);
#endif
#ifdef USE_QSV
hb_register(&hb_encqsv);
#endif
hb_common_global_init();
return result;
}
/**
* Cleans up libhb at a process level. Call before the app closes. Removes preview directory.
*/
void hb_global_close()
{
char dirname[1024];
DIR * dir;
struct dirent * entry;
/* Find and remove temp folder */
memset( dirname, 0, 1024 );
hb_get_temporary_directory( dirname );
dir = opendir( dirname );
if (dir)
{
while( ( entry = readdir( dir ) ) )
{
char filename[1024];
if( entry->d_name[0] == '.' )
{
continue;
}
memset( filename, 0, 1024 );
snprintf( filename, 1023, "%s/%s", dirname, entry->d_name );
unlink( filename );
}
closedir( dir );
rmdir( dirname );
}
}
/**
* Monitors the state of the update, scan, and work threads.
* Sets scan done state when scan thread exits.
* Sets work done state when work thread exits.
* @param _h Handle to hb_handle_t
*/
static void thread_func( void * _h )
{
hb_handle_t * h = (hb_handle_t *) _h;
char dirname[1024];
h->pid = getpid();
/* Create folder for temporary files */
memset( dirname, 0, 1024 );
hb_get_temporary_directory( dirname );
hb_mkdir( dirname );
while( !h->die )
{
/* In case the check_update thread hangs, it'll die sooner or
later. Then, we join it here */
if( h->update_thread &&
hb_thread_has_exited( h->update_thread ) )
{
hb_thread_close( &h->update_thread );
}
/* Check if the scan thread is done */
if( h->scan_thread &&
hb_thread_has_exited( h->scan_thread ) )
{
hb_thread_close( &h->scan_thread );
if ( h->scan_die )
{
hb_title_t * title;
hb_remove_previews( h );
while( ( title = hb_list_item( h->title_set.list_title, 0 ) ) )
{
hb_list_rem( h->title_set.list_title, title );
hb_title_close( &title );
}
hb_log( "hb_scan: canceled" );
}
else
{
hb_log( "libhb: scan thread found %d valid title(s)",
hb_list_count( h->title_set.list_title ) );
}
hb_lock( h->state_lock );
h->state.state = HB_STATE_SCANDONE; //originally state.state
hb_unlock( h->state_lock );
/*we increment this sessions scan count by one for the MacGui
to trigger a new source being set */
h->scanCount++;
}
/* Check if the work thread is done */
if( h->work_thread &&
hb_thread_has_exited( h->work_thread ) )
{
hb_thread_close( &h->work_thread );
hb_log( "libhb: work result = %d",
h->work_error );
hb_lock( h->state_lock );
h->state.state = HB_STATE_WORKDONE;
h->state.param.workdone.error = h->work_error;
h->job_count = hb_count(h);
if (h->job_count < 1)
h->job_count_permanent = 0;
hb_unlock( h->state_lock );
}
hb_snooze( 50 );
}
if( h->scan_thread )
{
hb_scan_stop( h );
hb_thread_close( &h->scan_thread );
}
if( h->work_thread )
{
hb_stop( h );
hb_thread_close( &h->work_thread );
}
hb_remove_previews( h );
}
/**
* Redirects stderr to the registered callback
* function.
* @param _data Unused.
*/
static void redirect_thread_func(void * _data)
{
int pfd[2];
pipe(pfd);
#if defined( SYS_MINGW )
// dup2 doesn't work on windows for some stupid reason
stderr->_file = pfd[1];
#else
dup2(pfd[1], /*stderr*/ 2);
#endif
FILE * log_f = fdopen(pfd[0], "rb");
char line_buffer[500];
while(fgets(line_buffer, 500, log_f) != NULL)
{
hb_log_callback(line_buffer);
}
}
/**
* Returns the PID.
* @param h Handle to hb_handle_t
*/
int hb_get_pid( hb_handle_t * h )
{
return h->pid;
}
/**
* Returns the id for the given instance.
* @param h Handle to hb_handle_t
* @returns The ID for the given instance
*/
int hb_get_instance_id( hb_handle_t * h )
{
return h->id;
}
/**
* Sets the current state.
* @param h Handle to hb_handle_t
* @param s Handle to new hb_state_t
*/
void hb_set_state( hb_handle_t * h, hb_state_t * s )
{
hb_lock( h->pause_lock );
hb_lock( h->state_lock );
memcpy( &h->state, s, sizeof( hb_state_t ) );
if( h->state.state == HB_STATE_WORKING ||
h->state.state == HB_STATE_SEARCHING )
{
/* XXX Hack */
if (h->job_count < 1)
h->job_count_permanent = 1;
h->state.param.working.job_cur =
h->job_count_permanent - hb_list_count( h->jobs );
h->state.param.working.job_count = h->job_count_permanent;
// Set which job is being worked on
if (h->current_job)
h->state.param.working.sequence_id = h->current_job->sequence_id;
else
h->state.param.working.sequence_id = 0;
}
hb_unlock( h->state_lock );
hb_unlock( h->pause_lock );
}
void hb_system_sleep_allow(hb_handle_t *h)
{
hb_system_sleep_private_enable(h->system_sleep_opaque);
}
void hb_system_sleep_prevent(hb_handle_t *h)
{
hb_system_sleep_private_disable(h->system_sleep_opaque);
}
/* Passes a pointer to persistent data */
hb_interjob_t * hb_interjob_get( hb_handle_t * h )
{
return h->interjob;
}
HandBrake-0.10.2/libhb/hb_dict.c 0000664 0001752 0001752 00000014447 12463330511 016667 0 ustar handbrake handbrake /* hb_dict.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hb_dict.h"
hb_dict_t * hb_dict_init( int alloc )
{
hb_dict_t * dict = NULL;
dict = malloc( sizeof( hb_dict_t ) );
if( !dict )
{
hb_log( "ERROR: could not allocate hb_dict_t" );
return NULL;
}
dict->count = 0;
dict->objects = malloc( alloc * sizeof( hb_dict_entry_t ) );
if( !dict->objects )
{
hb_log( "ERROR: could not allocate hb_dict_t objects" );
dict->alloc = 0;
}
else
{
dict->alloc = alloc;
}
return dict;
}
void hb_dict_free( hb_dict_t ** dict_ptr )
{
hb_dict_t * dict = *dict_ptr;
if( dict )
{
if( dict->objects )
{
int i;
for( i = 0; i < dict->count; i++ )
{
if( dict->objects[i].key )
{
free( dict->objects[i].key );
}
if( dict->objects[i].value )
{
free( dict->objects[i].value );
}
}
free( dict->objects );
}
free( *dict_ptr );
*dict_ptr = NULL;
}
}
void hb_dict_set( hb_dict_t ** dict_ptr, const char * key, const char * value )
{
hb_dict_t * dict = *dict_ptr;
if( !dict )
{
hb_log( "hb_dict_set: NULL dictionary" );
return;
}
if( !key || !strlen( key ) )
return;
hb_dict_entry_t * entry = hb_dict_get( dict, key );
if( entry )
{
if( entry->value )
{
if( value && !strcmp( value, entry->value ) )
return;
else
{
free( entry->value );
entry->value = NULL;
}
}
if( value && strlen( value ) )
entry->value = strdup( value );
}
else
{
if( dict->alloc <= dict->count )
{
hb_dict_entry_t * tmp = NULL;
tmp = malloc( ( 2 * dict->alloc ) * sizeof( hb_dict_entry_t ) );
if( !tmp )
{
hb_log( "ERROR: could not realloc hb_dict_t objects" );
return;
}
if( dict->objects )
{
if( dict->count )
memcpy( tmp, dict->objects, dict->count * sizeof( hb_dict_entry_t ) );
free( dict->objects );
}
dict->objects = tmp;
dict->alloc *= 2;
}
dict->objects[dict->count].key = strdup( key );
if( value && strlen( value ) )
dict->objects[dict->count].value = strdup( value );
else
dict->objects[dict->count].value = NULL;
dict->count++;
}
}
void hb_dict_unset( hb_dict_t ** dict_ptr, const char * key )
{
hb_dict_t * dict = *dict_ptr;
if( !dict || !dict->objects || !key || !strlen( key ) )
return;
int i;
for( i = 0; i < dict->count; i++ )
if( !strcmp( key, dict->objects[i].key ) )
{
free( dict->objects[i].key );
if( dict->objects[i].value )
free( dict->objects[i].value );
if( i != --dict->count )
memmove( &dict->objects[i], &dict->objects[i+1],
sizeof( hb_dict_entry_t ) * ( dict->count - i ) );
}
}
hb_dict_entry_t * hb_dict_get( hb_dict_t * dict, const char * key )
{
if( !dict || !dict->objects || !key || !strlen( key ) )
return NULL;
int i;
for( i = 0; i < dict->count; i++ )
if( !strcmp( key, dict->objects[i].key ) )
return &dict->objects[i];
return NULL;
}
hb_dict_entry_t * hb_dict_next( hb_dict_t * dict, hb_dict_entry_t * previous )
{
if( dict == NULL || dict->objects == NULL || !dict->count )
return NULL;
if( previous == NULL )
return &dict->objects[0];
unsigned int prev_index = previous - dict->objects;
if( prev_index + 1 < dict->count )
return &dict->objects[prev_index+1];
return NULL;
}
hb_dict_t * hb_encopts_to_dict( const char * encopts, int encoder )
{
hb_dict_t * dict = NULL;
if( encopts && *encopts )
{
char *cur_opt, *opts_start, *value;
const char *name;
dict = hb_dict_init( 10 );
if( !dict )
return NULL;
cur_opt = opts_start = strdup( encopts );
if( opts_start )
{
while( *cur_opt )
{
name = cur_opt;
cur_opt += strcspn( cur_opt, ":" );
if( *cur_opt )
{
*cur_opt = 0;
cur_opt++;
}
value = strchr( name, '=' );
if( value )
{
*value = 0;
value++;
}
// x264 has multiple names for some options
if( encoder == HB_VCODEC_X264 )
name = hb_x264_encopt_name( name );
#ifdef USE_X265
// x265 has multiple names for some options
if( encoder == HB_VCODEC_X265 )
name = hb_x265_encopt_name( name );
#endif
hb_dict_set( &dict, name, value );
}
}
free( opts_start );
}
return dict;
}
char * hb_dict_to_encopts( hb_dict_t * dict )
{
int first_opt = 1;
char *tmp, *encopts_tmp, *encopts = NULL;
hb_dict_entry_t * entry = NULL;
while( ( entry = hb_dict_next( dict, entry ) ) )
{
tmp = hb_strdup_printf( "%s%s%s%s",
first_opt ? "" : ":",
entry->key,
entry->value ? "=" : "",
entry->value ? entry->value : "" );
if( tmp )
{
encopts_tmp = hb_strncat_dup( encopts, tmp, strlen( tmp ) );
if( encopts_tmp )
{
if( encopts )
free( encopts );
encopts = encopts_tmp;
}
first_opt = 0;
free( tmp );
}
}
return encopts;
}
HandBrake-0.10.2/libhb/bits.h 0000664 0001752 0001752 00000004434 12463330511 016234 0 ustar handbrake handbrake /* bits.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_BITS_H
#define HB_BITS_H
static inline int
allbits_set(uint32_t *bitmap, int num_words)
{
unsigned int i;
for( i = 0; i < num_words; i++ )
{
if( bitmap[i] != 0xFFFFFFFF )
return (0);
}
return (1);
}
static inline int
bit_is_set( uint32_t *bit_map, int bit_pos )
{
return( ( bit_map[bit_pos >> 5] & (0x1 << (bit_pos & 0x1F) ) ) != 0 );
}
static inline int
bit_is_clear( uint32_t *bit_map, int bit_pos )
{
return( ( bit_map[bit_pos >> 5] & ( 0x1 << (bit_pos & 0x1F) ) ) == 0 );
}
static inline void
bit_set( uint32_t *bit_map, int bit_pos )
{
bit_map[bit_pos >> 5] |= 0x1 << (bit_pos & 0x1F);
}
static inline void
bit_clear(uint32_t *bit_map, int bit_pos)
{
bit_map[bit_pos >> 5] &= ~( 0x1 << ( bit_pos & 0x1F ) );
}
static inline void
bit_nclear(uint32_t *bit_map, int start_pos, int stop_pos)
{
int start_word = start_pos >> 5;
int stop_word = stop_pos >> 5;
if ( start_word == stop_word )
{
bit_map[start_word] &= ( ( 0x7FFFFFFF >> ( 31 - (start_pos & 0x1F ) ) )
| ( 0xFFFFFFFE << ( stop_pos & 0x1F ) ) );
}
else
{
bit_map[start_word] &= ( 0x7FFFFFFF >> ( 31 - ( start_pos & 0x1F ) ) );
while (++start_word < stop_word)
bit_map[start_word] = 0;
bit_map[stop_word] &= 0xFFFFFFFE << ( stop_pos & 0x1F );
}
}
static inline void
bit_nset(uint32_t *bit_map, int start_pos, int stop_pos)
{
int start_word = start_pos >> 5;
int stop_word = stop_pos >> 5;
if ( start_word == stop_word )
{
bit_map[start_word] |= ( ( 0xFFFFFFFF << ( start_pos & 0x1F ) )
& ( 0xFFFFFFFF >> ( 31 - ( stop_pos & 0x1F ) ) ) );
}
else
{
bit_map[start_word] |= 0xFFFFFFFF << ( start_pos & 0x1F );
while (++start_word < stop_word)
bit_map[start_word] = 0xFFFFFFFF;
bit_map[stop_word] |= 0xFFFFFFFF >> ( 31 - ( stop_pos & 0x1F ) );
}
}
#endif /* HB_BITS_H */
HandBrake-0.10.2/libhb/batch.c 0000664 0001752 0001752 00000007741 12463330511 016353 0 ustar handbrake handbrake /* batch.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "lang.h"
struct hb_batch_s
{
char * path;
hb_list_t * list_file;
};
static int compare_str(const void *a, const void *b)
{
return strncmp(*(const char**)a, *(const char**)b, PATH_MAX);
}
/***********************************************************************
* hb_batch_init
***********************************************************************
*
**********************************************************************/
hb_batch_t * hb_batch_init( char * path )
{
hb_batch_t * d;
hb_stat_t sb;
HB_DIR * dir;
struct dirent * entry;
char * filename;
int count, ii;
char ** files;
if ( hb_stat( path, &sb ) )
return NULL;
if ( !S_ISDIR( sb.st_mode ) )
return NULL;
dir = hb_opendir(path);
if ( dir == NULL )
return NULL;
// Count the total number of entries
count = 0;
while ( (entry = hb_readdir( dir ) ) )
{
count++;
}
files = malloc(count * sizeof(char*));
// Find all regular files
ii = 0;
hb_rewinddir(dir);
while ( (entry = hb_readdir( dir ) ) )
{
filename = hb_strdup_printf( "%s" DIR_SEP_STR "%s", path, entry->d_name );
if ( hb_stat( filename, &sb ) )
{
free( filename );
continue;
}
if ( !S_ISREG( sb.st_mode ) )
{
free( filename );
continue;
}
files[ii++] = filename;
}
count = ii;
// Sort the files
qsort(files, count, sizeof(char*), compare_str);
// Create file list
d = calloc( sizeof( hb_batch_t ), 1 );
d->list_file = hb_list_init();
for (ii = 0; ii < count; ii++)
{
hb_list_add( d->list_file, files[ii] );
}
hb_closedir( dir );
free(files);
if ( hb_list_count( d->list_file ) == 0 )
{
hb_list_close( &d->list_file );
free( d );
return NULL;
}
d->path = strdup( path );
return d;
}
/***********************************************************************
* hb_batch_title_count
**********************************************************************/
int hb_batch_title_count( hb_batch_t * d )
{
return hb_list_count( d->list_file );
}
/***********************************************************************
* hb_batch_title_scan
**********************************************************************/
hb_title_t * hb_batch_title_scan( hb_batch_t * d, int t )
{
hb_title_t * title;
char * filename;
hb_stream_t * stream;
if ( t < 0 )
return NULL;
filename = hb_list_item( d->list_file, t - 1 );
if ( filename == NULL )
return NULL;
hb_log( "batch: scanning %s", filename );
title = hb_title_init( filename, 0 );
stream = hb_stream_open( filename, title, 1 );
if ( stream == NULL )
{
hb_title_close( &title );
return NULL;
}
title = hb_stream_title_scan( stream, title );
hb_stream_close( &stream );
if ( title != NULL )
{
title->index = t;
}
return title;
}
/***********************************************************************
* hb_batch_close
***********************************************************************
* Closes and frees everything
**********************************************************************/
void hb_batch_close( hb_batch_t ** _d )
{
hb_batch_t * d = *_d;
char * filename;
while ( ( filename = hb_list_item( d->list_file, 0 ) ) )
{
hb_list_rem( d->list_file, filename );
free( filename );
}
hb_list_close( &d->list_file );
free( d->path );
free( d );
*_d = NULL;
}
HandBrake-0.10.2/libhb/vadxva2.h 0000664 0001752 0001752 00000022455 12463330511 016651 0 ustar handbrake handbrake /* vadxva2.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifdef USE_HWD
#ifndef HB_VA_DXVA2_H
#define HB_VA_DXVA2_H
#include "hbffmpeg.h"
#include "d3d9.h"
#include "libavcodec/dxva2.h"
#include "dxva2api.h"
#include "common.h"
#include "opencl.h"
#include "openclwrapper.h"
#define HB_FOURCC( a, b, c, d ) ( ((uint32_t)a) | ( ((uint32_t)b) << 8 ) | ( ((uint32_t)c) << 16 ) | ( ((uint32_t)d) << 24 ) )
#define MAKEFOURCC( a, b, c, d ) ((DWORD)(BYTE)(a) | ((DWORD)(BYTE)(b) << 8) | ((DWORD)(BYTE)(c) << 16) | ((DWORD)(BYTE)(d) << 24 ))
#define HB_CODEC_YV12 HB_FOURCC( 'Y', 'V', '1', '2' )
#define HB_CODEC_NV12 HB_FOURCC( 'N', 'V', '1', '2' )
#define DXVA2_E_NOT_INITIALIZED MAKE_HRESULT( 1, 4, 4096 )
#define DXVA2_E_NEW_VIDEO_DEVICE MAKE_HRESULT( 1, 4, 4097 )
#define DXVA2_E_VIDEO_DEVICE_LOCKED MAKE_HRESULT( 1, 4, 4098 )
#define DXVA2_E_NOT_AVAILABLE MAKE_HRESULT( 1, 4, 4099 )
#define VA_DXVA2_MAX_SURFACE_COUNT (64)
static const GUID DXVA_NoEncrypt = { 0x1b81bed0, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID IID_IDirectXVideoDecoderService = {0xfc51a551, 0xd5e7, 0x11d9, {0xaf, 0x55, 0x00, 0x05, 0x4e, 0x43, 0xff, 0x02}};
static const GUID DXVA2_ModeMPEG2_MoComp = { 0xe6a9f44b, 0x61b0, 0x4563, {0x9e, 0xa4, 0x63, 0xd2, 0xa3, 0xc6, 0xfe, 0x66} };
static const GUID DXVA2_ModeMPEG2_IDCT = { 0xbf22ad00, 0x03ea, 0x4690, {0x80, 0x77, 0x47, 0x33, 0x46, 0x20, 0x9b, 0x7e} };
static const GUID DXVA2_ModeMPEG2_VLD = { 0xee27417f, 0x5e28, 0x4e65, {0xbe, 0xea, 0x1d, 0x26, 0xb5, 0x08, 0xad, 0xc9} };
static const GUID DXVA2_ModeH264_A = { 0x1b81be64, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeH264_B = { 0x1b81be65, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeH264_C = { 0x1b81be66, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeH264_D = { 0x1b81be67, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeH264_E = { 0x1b81be68, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeH264_F = { 0x1b81be69, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVADDI_Intel_ModeH264_A = { 0x604F8E64, 0x4951, 0x4c54, {0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6} };
static const GUID DXVADDI_Intel_ModeH264_C = { 0x604F8E66, 0x4951, 0x4c54, {0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6} };
static const GUID DXVADDI_Intel_ModeH264_E = { 0x604F8E68, 0x4951, 0x4c54, {0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6} };
static const GUID DXVA2_ModeWMV8_A = { 0x1b81be80, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeWMV8_B = { 0x1b81be81, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeWMV9_A = { 0x1b81be90, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeWMV9_B = { 0x1b81be91, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeWMV9_C = { 0x1b81be94, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeVC1_A = { 0x1b81beA0, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeVC1_B = { 0x1b81beA1, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeVC1_C = { 0x1b81beA2, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
static const GUID DXVA2_ModeVC1_D = { 0x1b81beA3, 0xa0c7, 0x11d3, {0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5} };
typedef struct
{
int width;
int height;
int rate;
int rate_base;
}hb_dx_format;
typedef struct
{
LPDIRECT3DSURFACE9 d3d;
int refcount;
unsigned int order;
} hb_va_surface_t;
typedef struct
{
uint8_t *base;
uint8_t *buffer;
size_t size;
} hb_copy_cache_t;
typedef struct
{
const char *name;
D3DFORMAT format;
uint32_t codec;
} hb_d3d_format_t;
typedef struct
{
const char *name;
const GUID *guid;
int codec;
} hb_dx_mode_t;
typedef struct
{
char *description;
int codec_id;
uint32_t i_chroma;
int width;
int height;
HINSTANCE hd3d9_dll;
HINSTANCE hdxva2_dll;
D3DPRESENT_PARAMETERS d3dpp;
LPDIRECT3D9 d3dobj;
D3DADAPTER_IDENTIFIER9 d3dai;
LPDIRECT3DDEVICE9 d3ddev;
UINT token;
IDirect3DDeviceManager9 *devmng;
HANDLE device;
IDirectXVideoDecoderService *vs;
GUID input;
D3DFORMAT render;
DXVA2_ConfigPictureDecode cfg;
IDirectXVideoDecoder *decoder;
D3DFORMAT output;
struct dxva_context hw;
unsigned surface_count;
unsigned surface_order;
int surface_width;
int surface_height;
uint32_t surface_chroma;
hb_va_surface_t surface[VA_DXVA2_MAX_SURFACE_COUNT];
LPDIRECT3DSURFACE9 hw_surface[VA_DXVA2_MAX_SURFACE_COUNT];
IDirectXVideoProcessorService *ps;
IDirectXVideoProcessor *vp;
int64_t input_pts[2];
int64_t input_dts;
int do_job;
// running nv12toyuv kernel.
cl_kernel nv12toyuv;
cl_mem cl_mem_nv12;
cl_mem cl_mem_yuv;
uint8_t * nv12toyuv_tmp_in;
uint8_t * nv12toyuv_tmp_out;
} hb_va_dxva2_t;
typedef struct FilterLink_T
{
cl_mem cl_inbuf;
cl_mem cl_outbuf;
uint8_t *mem_inbuf;
uint8_t *mem_outbuf;
int width;
int height;
int linesizeY;
int linesizeUV;
int inmemdataflag;
int outmemdataflag;
int incldataflag;
int outcldataflag;
int framenum;
int outputSize;
} T_FilterLink;
static const hb_d3d_format_t d3d_formats[] =
{
{ "YV12", MAKEFOURCC( 'Y', 'V', '1', '2' ), HB_CODEC_YV12 },
{ "NV12", MAKEFOURCC( 'N', 'V', '1', '2' ), HB_CODEC_NV12 },
{ NULL, 0, 0 }
};
static const hb_dx_mode_t dxva2_modes[] =
{
{ "DXVA2_ModeMPEG2_VLD", &DXVA2_ModeMPEG2_VLD, AV_CODEC_ID_MPEG2VIDEO },
{ "DXVA2_ModeMPEG2_MoComp", &DXVA2_ModeMPEG2_MoComp, 0 },
{ "DXVA2_ModeMPEG2_IDCT", &DXVA2_ModeMPEG2_IDCT, 0 },
{ "H.264 variable-length decoder (VLD), FGT", &DXVA2_ModeH264_F, AV_CODEC_ID_H264 },
{ "H.264 VLD, no FGT", &DXVA2_ModeH264_E, AV_CODEC_ID_H264 },
{ "H.264 VLD, no FGT (Intel)", &DXVADDI_Intel_ModeH264_E, AV_CODEC_ID_H264 },
{ "H.264 IDCT, FGT", &DXVA2_ModeH264_D, 0 },
{ "H.264 inverse discrete cosine transform (IDCT), no FGT", &DXVA2_ModeH264_C, 0 },
{ "H.264 inverse discrete cosine transform (IDCT), no FGT (Intel)", &DXVADDI_Intel_ModeH264_C, 0 },
{ "H.264 MoComp, FGT", &DXVA2_ModeH264_B, 0 },
{ "H.264 motion compensation (MoComp), no FGT", &DXVA2_ModeH264_A, 0 },
{ "H.264 motion compensation (MoComp), no FGT (Intel)", &DXVADDI_Intel_ModeH264_A, 0 },
{ "Windows Media Video 8 MoComp", &DXVA2_ModeWMV8_B, 0 },
{ "Windows Media Video 8 post processing", &DXVA2_ModeWMV8_A, 0 },
{ "Windows Media Video 9 IDCT", &DXVA2_ModeWMV9_C, 0 },
{ "Windows Media Video 9 MoComp", &DXVA2_ModeWMV9_B, 0 },
{ "Windows Media Video 9 post processing", &DXVA2_ModeWMV9_A, 0 },
{ "VC-1 VLD", &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
{ "VC-1 VLD", &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
{ "VC-1 IDCT", &DXVA2_ModeVC1_C, 0 },
{ "VC-1 MoComp", &DXVA2_ModeVC1_B, 0 },
{ "VC-1 post processing", &DXVA2_ModeVC1_A, 0 },
{ NULL, NULL, 0 }
};
int hb_va_get_frame_buf( hb_va_dxva2_t *dxva2, AVCodecContext *p_context, AVFrame *frame );
int hb_va_extract( hb_va_dxva2_t *dxva2, uint8_t *dst, AVFrame *frame, int job_w, int job_h, int *crop, hb_oclscale_t *os, int use_opencl, int use_decomb, int use_detelecine );
enum PixelFormat hb_ffmpeg_get_format( AVCodecContext *, const enum PixelFormat * );
hb_va_dxva2_t *hb_va_create_dxva2( hb_va_dxva2_t *dxva2, int codec_id );
void hb_va_new_dxva2( hb_va_dxva2_t *dxva2, AVCodecContext *p_context );
void hb_va_release( hb_va_dxva2_t *dxva2, AVFrame *frame );
void hb_va_close( hb_va_dxva2_t *dxva2 );
int hb_check_hwd_fmt( int fmt );
#endif // HB_VA_DXVA2_H
#endif // USE_HWD
HandBrake-0.10.2/libhb/qsv_common.h 0000664 0001752 0001752 00000013402 12463330511 017447 0 ustar handbrake handbrake /* qsv_common.h
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code.
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_QSV_COMMON_H
#define HB_QSV_COMMON_H
#include "msdk/mfxvideo.h"
#include "msdk/mfxplugin.h"
#include "libavcodec/avcodec.h"
/* Minimum Intel Media SDK version (currently 1.3, for Sandy Bridge support) */
#define HB_QSV_MINVERSION_MAJOR AV_QSV_MSDK_VERSION_MAJOR
#define HB_QSV_MINVERSION_MINOR AV_QSV_MSDK_VERSION_MINOR
/*
* Get & store all available Intel Quick Sync information:
*
* - general availability
* - available implementations (hardware-accelerated, software fallback, etc.)
* - available codecs, filters, etc. for direct access (convenience)
* - supported API version
* - supported resolutions
*/
typedef struct hb_qsv_info_s
{
// each info struct only corresponds to one CodecId and implementation combo
const mfxU32 codec_id;
const mfxIMPL implementation;
// whether the encoder is available for this implementation
int available;
// version-specific or hardware-specific capabilities
uint64_t capabilities;
// support for API 1.6 or later
#define HB_QSV_CAP_MSDK_API_1_6 (1LL << 0)
// H.264, H.265: B-frames can be used as references
#define HB_QSV_CAP_B_REF_PYRAMID (1LL << 1)
// optional rate control methods
#define HB_QSV_CAP_RATECONTROL_LA (1LL << 10)
#define HB_QSV_CAP_RATECONTROL_LAi (1LL << 11)
#define HB_QSV_CAP_RATECONTROL_ICQ (1LL << 12)
// mfxExtCodingOption2 fields
#define HB_QSV_CAP_OPTION2_MBBRC (1LL << 20)
#define HB_QSV_CAP_OPTION2_EXTBRC (1LL << 21)
#define HB_QSV_CAP_OPTION2_TRELLIS (1LL << 22)
#define HB_QSV_CAP_OPTION2_BREFTYPE (1LL << 23)
#define HB_QSV_CAP_OPTION2_IB_ADAPT (1LL << 24)
#define HB_QSV_CAP_OPTION2_LA_DOWNS (1LL << 25)
#define HB_QSV_CAP_OPTION2_NMBSLICE (1LL << 26)
// TODO: add maximum encode resolution, etc.
} hb_qsv_info_t;
/* Intel Quick Sync Video utilities */
int hb_qsv_available();
int hb_qsv_video_encoder_is_enabled(int encoder);
int hb_qsv_audio_encoder_is_enabled(int encoder);
int hb_qsv_info_init();
void hb_qsv_info_print();
hb_qsv_info_t* hb_qsv_info_get(int encoder);
/* Intel Quick Sync Video DECODE utilities */
const char* hb_qsv_decode_get_codec_name(enum AVCodecID codec_id);
int hb_qsv_decode_is_enabled(hb_job_t *job);
/*
* mfxCoreInterface::CopyFrame had a bug preventing us from using it, but
* it was fixed in newer drivers - we can use this to determine usability
*/
int hb_qsv_copyframe_is_slow(int encoder);
/* Media SDK parameters handling */
enum
{
HB_QSV_PARAM_OK,
HB_QSV_PARAM_ERROR,
HB_QSV_PARAM_BAD_NAME,
HB_QSV_PARAM_BAD_VALUE,
HB_QSV_PARAM_UNSUPPORTED,
};
typedef struct
{
/*
* Supported mfxExtBuffer.BufferId values:
*
* MFX_EXTBUFF_AVC_REFLIST_CTRL
* MFX_EXTBUFF_AVC_TEMPORAL_LAYERS
* MFX_EXTBUFF_CODING_OPTION
* MFX_EXTBUFF_CODING_OPTION_SPSPPS
* MFX_EXTBUFF_CODING_OPTION2
* MFX_EXTBUFF_ENCODER_CAPABILITY
* MFX_EXTBUFF_ENCODER_RESET_OPTION
* MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION
* MFX_EXTBUFF_PICTURE_TIMING_SEI
* MFX_EXTBUFF_VIDEO_SIGNAL_INFO
*
* This should cover all encode-compatible extended
* buffers that can be attached to an mfxVideoParam.
*/
#define HB_QSV_ENC_NUM_EXT_PARAM_MAX 10
mfxExtBuffer* ExtParamArray[HB_QSV_ENC_NUM_EXT_PARAM_MAX];
mfxExtCodingOption codingOption;
mfxExtCodingOption2 codingOption2;
mfxExtVideoSignalInfo videoSignalInfo;
struct
{
int b_pyramid;
int gop_pic_size;
int gop_ref_dist;
int int_ref_cycle_size;
} gop;
struct
{
int icq;
int lookahead;
int cqp_offsets[3];
int vbv_max_bitrate;
int vbv_buffer_size;
float vbv_buffer_init;
} rc;
// assigned via hb_qsv_param_default, may be shared with another structure
mfxVideoParam *videoParam;
} hb_qsv_param_t;
static const char* const hb_qsv_preset_names1[] = { "speed", "balanced", NULL, };
static const char* const hb_qsv_preset_names2[] = { "speed", "balanced", "quality", NULL, };
const char* const* hb_qsv_preset_get_names();
const char* const* hb_qsv_profile_get_names(int encoder);
const char* const* hb_qsv_level_get_names(int encoder);
const char* hb_qsv_video_quality_get_name(uint32_t codec);
void hb_qsv_video_quality_get_limits(uint32_t codec, float *low, float *high, float *granularity, int *direction);
#define HB_QSV_CLIP3(min, max, val) ((val < min) ? min : (val > max) ? max : val)
int hb_qsv_codingoption_xlat (int val);
const char* hb_qsv_codingoption_get_name(int val);
int hb_qsv_trellisvalue_xlat(int val);
int hb_qsv_atoindex(const char* const *arr, const char *str, int *err);
int hb_qsv_atobool (const char *str, int *err);
int hb_qsv_atoi (const char *str, int *err);
float hb_qsv_atof (const char *str, int *err);
int hb_qsv_param_default_preset(hb_qsv_param_t *param, mfxVideoParam *videoParam, hb_qsv_info_t *info, const char *preset);
int hb_qsv_param_default (hb_qsv_param_t *param, mfxVideoParam *videoParam, hb_qsv_info_t *info);
int hb_qsv_param_parse (hb_qsv_param_t *param, hb_qsv_info_t *info, const char *key, const char *value);
const char* hb_qsv_frametype_name(uint16_t qsv_frametype);
uint8_t hb_qsv_frametype_xlat(uint16_t qsv_frametype, uint16_t *out_flags);
int hb_qsv_impl_set_preferred(const char *name);
const char* hb_qsv_impl_get_name(int impl);
void hb_qsv_force_workarounds(); // for developers only
#endif
HandBrake-0.10.2/libhb/openclkernels.h 0000664 0001752 0001752 00000074746 12463330511 020154 0 ustar handbrake handbrake /* openclkernels.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifndef USE_EXTERNAL_KERNEL
#define KERNEL( ... )# __VA_ARGS__
char *kernel_src_hscale = KERNEL (
typedef unsigned char fixed8;
/*******************************************************************************************************
dst: Horizontal scale destination;
src: YUV content in opencl buf;
hf_Y: Horizontal filter coefficients for Y planes;
hf_UV: Horizontal filter coefficients for UV planes;
hi_Y: Horizontal filter index for Y planes;
hi_UV: Horizontal filter index for UV planes;
stride: Src width;
filter_len: Length of filter;
********************************************************************************************************/
kernel void frame_h_scale (
global fixed8 *src,
global float *hf_Y,
global float *hf_UV,
global int *hi_Y,
global int *hi_UV,
global fixed8 *dst,
int stride, //src_width
int filter_len
)
{
int x = get_global_id( 0 );
int y = get_global_id( 1 );
int width = get_global_size( 0 );
int height = get_global_size( 1 );
float result_Y = 0, result_U = 0, result_V = 0;
int i = 0;
global fixed8 *src_Y = src;
global fixed8 *src_U = src_Y + stride * height;
global fixed8 *src_V = src_U + (stride >> 1) * (height >> 1);
global fixed8 *dst_Y = dst;
global fixed8 *dst_U = dst_Y + width * height;
global fixed8 *dst_V = dst_U + (width >> 1) * (height >> 1);
int xy = y * width + x;
global fixed8 *rowdata_Y = src_Y + (y * stride);
for( int i = 0; i < filter_len; i++ )
{
result_Y += ( hf_Y[x + i * width] * rowdata_Y[hi_Y[x] + i]);
}
dst_Y[xy] = result_Y;
if( y < (height >> 1) && x < (width >> 1) )
{
int xy = y * (width >> 1) + x;
global fixed8 *rowdata_U = src_U + (y * (stride >> 1));
global fixed8 *rowdata_V = src_V + (y * (stride >> 1));
for( i = 0; i < filter_len; i++ )
{
result_U += ( hf_UV[x + i * (width >> 1)] * rowdata_U[hi_UV[x] + i]);
result_V += ( hf_UV[x + i * (width >> 1)] * rowdata_V[hi_UV[x] + i]);
}
dst_U[xy] = result_U;
dst_V[xy] = result_V;
}
}
);
/*******************************************************************************************************
dst: Vertical scale destination;
src: YUV content in opencl buf;
hf_Y: Vertical filter coefficients for Y planes;
hf_UV: Vertical filter coefficients for UV planes;
hi_Y: Vertical filter index for Y planes;
hi_UV: Vertical filter index for UV planes;
stride: Src height;
filter_len: Length of filter;
********************************************************************************************************/
char *kernel_src_vscale = KERNEL (
kernel void frame_v_scale (
global fixed8 *src,
global float *vf_Y,
global float *vf_UV,
global int *vi_Y,
global int *vi_UV,
global fixed8 *dst,
int src_height,
int filter_len
)
{
int x = get_global_id( 0 );
int y = get_global_id( 1 );
int width = get_global_size( 0 );
int height = get_global_size( 1 );
float result_Y = 0, result_U = 0, result_V = 0;
int i = 0;
global fixed8 *src_Y = src;
global fixed8 *src_U = src_Y + src_height * width;
global fixed8 *src_V = src_U + (src_height >> 1) * (width >> 1);
global fixed8 *dst_Y = dst;
global fixed8 *dst_U = dst_Y + height * width;
global fixed8 *dst_V = dst_U + (height >> 1) * (width >> 1);
int xy = y * width + x;
for( i = 0; i < filter_len; i++ )
{
result_Y += vf_Y[y + i * height] * src_Y[(vi_Y[y] + i) * width + x];
}
dst_Y[xy] = result_Y;
if( y < (height >> 1) && x < (width >> 1) )
{
int xy = y * (width >> 1) + x;
for( i = 0; i < filter_len; i++ )
{
result_U += vf_UV[y + i * (height >> 1)] * src_U[(vi_UV[y] + i) * (width >> 1) + x];
result_V += vf_UV[y + i * (height >> 1)] * src_V[(vi_UV[y] + i) * (width >> 1) + x];
}
dst_U[xy] = result_U;
dst_V[xy] = result_V;
}
}
);
/*******************************************************************************************************
input: Input buffer;
output: Output buffer;
w: Width of frame;
h: Height of frame;
********************************************************************************************************/
char *kernel_src_nvtoyuv = KERNEL (
kernel void nv12toyuv ( global char *input, global char* output, int w, int h )
{
int x = get_global_id( 0 );
int y = get_global_id( 1 );
int idx = y * (w >> 1) + x;
vstore4((vload4( 0, input + (idx << 2))), 0, output + (idx << 2)); //Y
char2 uv = vload2( 0, input + (idx << 1) + w * h );
output[idx + w * h] = uv.s0;
output[idx + w * h + ((w * h) >> 2)] = uv.s1;
}
);
/*******************************************************************************************************
dst: Horizontal scale destination;
src: YUV content in opencl buf;
yfilter: Opencl memory of horizontal filter coefficients for luma/alpha planes;
yfilterPos: Opencl memory of horizontal filter starting positions for each dst[i] for luma/alpha planes;
yfilterSize: Horizontal filter size for luma/alpha pixels;
cfilter: Opencl memory of horizontal filter coefficients for chroma planes;
cfilterPos: Opencl memory of horizontal filter starting positions for each dst[i] for chroma planes;
cfilterSize: Horizontal filter size for chroma pixels;
dstStride: Width of destination luma/alpha planes;
dstChrStride: Width of destination chroma planes;
********************************************************************************************************/
char *kernel_src_hscaleall = KERNEL (
kernel void hscale_all_opencl (
global short *dst,
const global unsigned char *src,
const global short *yfilter,
const global int *yfilterPos,
int yfilterSize,
const global short *cfilter,
const global int *cfilterPos,
int cfilterSize,
int dstWidth,
int dstHeight,
int srcWidth,
int srcHeight,
int dstStride,
int dstChrStride,
int srcStride,
int srcChrStride)
{
int w = get_global_id(0);
int h = get_global_id(1);
int chrWidth = get_global_size(0);
int chrHeight = get_global_size(1);
int srcPos1 = h * srcStride + yfilterPos[w];
int srcPos2 = h * srcStride + yfilterPos[w + chrWidth];
int srcPos3 = (h + (srcHeight >> 1)) * srcStride + yfilterPos[w];
int srcPos4 = (h + (srcHeight >> 1)) * srcStride + yfilterPos[w + chrWidth];
int srcc1Pos = srcStride * srcHeight + (h) * (srcChrStride) + cfilterPos[w];
int srcc2Pos = srcc1Pos + ((srcChrStride)*(chrHeight));
int val1 = 0;
int val2 = 0;
int val3 = 0;
int val4 = 0;
int val5 = 0;
int val6 = 0;
int filterPos1 = yfilterSize * w;
int filterPos2 = yfilterSize * (w + chrWidth);
int cfilterPos1 = cfilterSize * w;
int j;
for (j = 0; j < yfilterSize; j++)
{
val1 += src[srcPos1 + j] * yfilter[filterPos1+ j];
val2 += src[srcPos2 + j] * yfilter[filterPos2 + j];
val3 += src[srcPos3 + j] * yfilter[filterPos1 + j];
val4 += src[srcPos4 + j] * yfilter[filterPos2 + j];
val5 += src[srcc1Pos+j] * cfilter[cfilterPos1 + j];
val6 += src[srcc2Pos+j] * cfilter[cfilterPos1 + j];
}
int dstPos1 = h *dstStride;
int dstPos2 = (h + chrHeight) * dstStride;
dst[dstPos1 + w] = ((val1 >> 7) > ((1 << 15) - 1) ? ((1 << 15) - 1) : (val1 >> 7));
dst[dstPos1 + w + chrWidth] = ((val2 >> 7) > ((1 << 15) - 1) ? ((1 << 15) - 1) : (val2 >> 7));
dst[dstPos2 + w] = ((val3 >> 7) > ((1 << 15) - 1) ? ((1 << 15) - 1) : (val3 >> 7));
dst[dstPos2 + w + chrWidth] = ((val4 >> 7) > ((1 << 15) - 1) ? ((1 << 15) - 1) : (val4 >> 7));
int dstPos3 = h * (dstChrStride) + w + dstStride * dstHeight;
int dstPos4 = h * (dstChrStride) + w + dstStride * dstHeight + ((dstChrStride) * chrHeight);
dst[dstPos3] = ((val5 >> 7) > ((1 << 15) - 1) ? ((1 << 15) - 1) : (val5 >> 7));
dst[dstPos4] = ((val6 >> 7) > ((1 << 15) - 1) ? ((1 << 15) - 1) : (val6 >> 7));
}
);
char *kernel_src_hscalefast = KERNEL (
kernel void hscale_fast_opencl (
global short *dst,
const global unsigned char *src,
int xInc,
int chrXInc,
int dstWidth,
int dstHeight,
int srcWidth,
int srcHeight,
int dstStride,
int dstChrStride,
int srcStride,
int srcChrStride)
{
int w = get_global_id(0);
int h = get_global_id(1);
int chrWidth = get_global_size(0);
int chrHeight = get_global_size(1);
int xpos1 = 0;
int xpos2 = 0;
int xx = xpos1 >> 16;
int xalpha = (xpos1 & 0xFFFF) >> 9;
dst[h * dstStride + w] = (src[h * srcStride + xx] << 7) + (src[h * srcStride + xx + 1] -src[h * srcStride + xx]) * xalpha;
int lowpart = h + (chrHeight);
dst[lowpart * dstStride + w] = (src[lowpart * srcStride + xx] << 7) + (src[lowpart * srcStride + xx + 1] - src[lowpart * srcStride + xx]) * xalpha;
int inv_i = w * xInc >> 16;
if( inv_i >= srcWidth - 1)
{
dst[h*dstStride + w] = src[h*srcStride + srcWidth-1]*128;
dst[lowpart*dstStride + w] = src[lowpart*srcStride + srcWidth - 1] * 128;
}
int rightpart = w + (chrWidth);
xx = xpos2 >> 16;
xalpha = (xpos2 & 0xFFFF) >> 9;
dst[h * dstStride + rightpart] = (src[h *srcStride + xx] << 7) + (src[h * srcStride + xx + 1] - src[h * srcStride + xx]) * xalpha;
dst[lowpart * dstStride + rightpart] = (src[lowpart * srcStride + xx] << 7) + (src[lowpart * srcStride + xx + 1] - src[lowpart * srcStride + xx]) * xalpha;
inv_i = rightpart * xInc >> 16;
if( inv_i >= srcWidth - 1)
{
dst[h * dstStride + rightpart] = src[h * srcStride + srcWidth - 1] * 128;
dst[lowpart * dstStride + rightpart] = src[lowpart * srcStride + srcWidth - 1] * 128;
}
int xpos = 0;
xpos = chrXInc * w;
xx = xpos >> 16;
xalpha = (xpos & 0xFFFF) >> 9;
src += srcStride * srcHeight;
dst += dstStride * dstHeight;
dst[h * (dstChrStride) + w] = (src[h * (srcChrStride) + xx] * (xalpha^127) + src[h * (srcChrStride) + xx + 1] * xalpha);
inv_i = w * xInc >> 16;
if( inv_i >= (srcWidth >> 1) - 1)
{
dst[h * (dstChrStride) + w] = src[h * (srcChrStride) + (srcWidth >> 1) -1]*128;
}
xpos = chrXInc * (w);
xx = xpos >> 16;
src += srcChrStride * srcHeight >> 1;
dst += (dstChrStride * chrHeight);
dst[h * (dstChrStride) + w] = (src[h * (srcChrStride) + xx] * (xalpha^127) + src[h * (srcChrStride) + xx + 1 ] * xalpha);
if( inv_i >= (srcWidth >> 1) - 1)
{
//v channel:
dst[h * (dstChrStride) + w] = src[h * (srcChrStride) + (srcWidth >> 1) -1] * 128;
}
}
);
char *kernel_src_vscalealldither = KERNEL (
kernel void vscale_all_dither_opencl (
global unsigned char *dst,
const global short *src,
const global short *yfilter,
int yfilterSize,
const global short *cfilter,
int cfilterSize,
const global int *yfilterPos,
const global int *cfilterPos,
int dstWidth,
int dstHeight,
int srcWidth,
int srcHeight,
int dstStride,
int dstChrStride,
int srcStride,
int srcChrStride)
{
const unsigned char hb_dither_8x8_128[8][8] = {
{ 36, 68, 60, 92, 34, 66, 58, 90, },
{ 100, 4, 124, 28, 98, 2, 122, 26, },
{ 52, 84, 44, 76, 50, 82, 42, 74, },
{ 116, 20, 108, 12, 114, 18, 106, 10, },
{ 32, 64, 56, 88, 38, 70, 62, 94, },
{ 96, 0, 120, 24, 102, 6, 126, 30, },
{ 48, 80, 40, 72, 54, 86, 46, 78, },
{ 112, 16, 104, 8, 118, 22, 110, 14, },
};
int w = get_global_id(0);
int h = get_global_id(1);
int chrWidth = get_global_size(0);
int chrHeight = get_global_size(1);
const unsigned char *local_up_dither;
const unsigned char *local_down_dither;
local_up_dither = hb_dither_8x8_128[h & 7];
local_down_dither = hb_dither_8x8_128[(h + chrHeight) & 7];
//yscale;
int srcPos1 = (yfilterPos[h]) * srcStride + w;
int srcPos2 = (yfilterPos[h]) * srcStride + w + (chrWidth);
int srcPos3 = (yfilterPos[h + chrHeight]) * srcStride + w;
int srcPos4 = (yfilterPos[h + chrHeight]) * srcStride + w + chrWidth;
int src1Pos = dstStride * srcHeight + (cfilterPos[h]) * dstChrStride + (w);
int src2Pos = dstStride * srcHeight + (dstChrStride*(srcHeight>>1)) + (cfilterPos[h]) * dstChrStride + w;
int val1 = (local_up_dither[w & 7] << 12); //y offset is 0;
int val2 = (local_up_dither[(w + chrWidth) & 7] << 12);
int val3 = (local_down_dither[w &7] << 12);
int val4 = (local_down_dither[(w + chrWidth) & 7] << 12);
int val5 = (local_up_dither[w & 7] << 12);
int val6 = (local_up_dither[(w + 3) & 7] << 12); // 3 is offset of the chrome channel.
int j;
int filterPos1 = h * yfilterSize;
int filterPos2 = ( h + chrHeight ) * yfilterSize;
for(j = 0; j < yfilterSize; j++)
{
val1 += src[srcPos1] * yfilter[filterPos1 + j];
srcPos1 += srcStride;
val2 += src[srcPos2] * yfilter[filterPos1 + j];
srcPos2 += srcStride;
val3 += src[srcPos3] * yfilter[filterPos2 + j];
srcPos3 += srcStride;
val4 += src[srcPos4] * yfilter[filterPos2 + j];
srcPos4 += srcStride;
val5 += src[src1Pos] * cfilter[filterPos1 + j];
val6 += src[src2Pos] * cfilter[filterPos1 + j];
src1Pos += dstChrStride;
src2Pos += dstChrStride;
}
dst[h * dstStride + w] = (((val1 >> 19)&(~0xFF)) ? ((-(val1 >> 19)) >> 31) : (val1 >> 19));
dst[h * dstStride + w + chrWidth] = (((val2 >> 19)&(~0xFF)) ? ((-(val2 >> 19)) >> 31) : (val2 >> 19));
dst[(h + chrHeight) * dstStride + w] = (((val3 >> 19)&(~0xFF)) ? ((-(val3 >> 19)) >> 31) : (val3 >> 19));
dst[(h + chrHeight) * dstStride + w + chrWidth] = (((val4 >> 19)&(~0xFF)) ? ((-(val4 >> 19)) >> 31) : (val4 >> 19));
int dst1Pos = dstStride * dstHeight + h*(dstChrStride)+(w);
int dst2Pos = (dstChrStride * chrHeight) + dst1Pos;
dst[dst1Pos] = (((val5 >> 19)&(~0xFF)) ? ((-(val5 >> 19)) >> 31) : (val5 >> 19));
dst[dst2Pos] = (((val6 >> 19)&(~0xFF)) ? ((-(val6 >> 19)) >> 31) : (val6 >> 19));
}
);
char *kernel_src_vscaleallnodither = KERNEL (
kernel void vscale_all_nodither_opencl (
global unsigned char *dst,
const global short *src,
const global short *yfilter,
int yfilterSize,
const global short *cfilter,
int cfilterSize,
const global int *yfilterPos,
const global int *cfilterPos,
int dstWidth,
int dstHeight,
int srcWidth,
int srcHeight,
int dstStride,
int dstChrStride,
int srcStride,
int srcChrStride)
{
const unsigned char hb_sws_pb_64[8] = {
64, 64, 64, 64, 64, 64, 64, 64
};
int w = get_global_id(0);
int h = get_global_id(1);
int chrWidth = get_global_size(0);
int chrHeight = get_global_size(1);
const unsigned char *local_up_dither;
const unsigned char *local_down_dither;
local_up_dither = hb_sws_pb_64;
local_down_dither = hb_sws_pb_64;
//yscale;
int srcPos1 = (yfilterPos[h]) * srcStride + w;
int srcPos2 = (yfilterPos[h]) * srcStride + w + (chrWidth);
int srcPos3 = (yfilterPos[h + chrHeight]) * srcStride + w;
int srcPos4 = (yfilterPos[h + chrHeight]) * srcStride + w + chrWidth;
int src1Pos = dstStride * srcHeight + (cfilterPos[h]) * dstChrStride + (w);
int src2Pos = dstStride * srcHeight + (dstChrStride*(srcHeight>>1)) + (cfilterPos[h]) * dstChrStride + w;
int val1 = (local_up_dither[w & 7] << 12); //y offset is 0;
int val2 = (local_up_dither[(w + chrWidth) & 7] << 12);
int val3 = (local_down_dither[w &7] << 12);
int val4 = (local_down_dither[(w + chrWidth) & 7] << 12);
int val5 = (local_up_dither[w & 7] << 12);
int val6 = (local_up_dither[(w + 3) & 7] << 12); // 3 is offset of the chrome channel.
int j;
int filterPos1 = h * yfilterSize;
int filterPos2 = ( h + chrHeight ) * yfilterSize;
for(j = 0; j < yfilterSize; j++)
{
val1 += src[srcPos1] * yfilter[filterPos1 + j];
srcPos1 += srcStride;
val2 += src[srcPos2] * yfilter[filterPos1 + j];
srcPos2 += srcStride;
val3 += src[srcPos3] * yfilter[filterPos2 + j];
srcPos3 += srcStride;
val4 += src[srcPos4] * yfilter[filterPos2 + j];
srcPos4 += srcStride;
val5 += src[src1Pos] * cfilter[filterPos1 + j];
val6 += src[src2Pos] * cfilter[filterPos1 + j];
src1Pos += dstChrStride;
src2Pos += dstChrStride;
}
dst[h * dstStride + w] = (((val1 >> 19)&(~0xFF)) ? ((-(val1 >> 19)) >> 31) : (val1 >> 19));
dst[h * dstStride + w + chrWidth] = (((val2 >> 19)&(~0xFF)) ? ((-(val2 >> 19)) >> 31) : (val2 >> 19));
dst[(h + chrHeight) * dstStride + w] = (((val3 >> 19)&(~0xFF)) ? ((-(val3 >> 19)) >> 31) : (val3 >> 19));
dst[(h + chrHeight) * dstStride + w + chrWidth] = (((val4 >> 19)&(~0xFF)) ? ((-(val4 >> 19)) >> 31) : (val4 >> 19));;
int dst1Pos = dstStride * dstHeight + h * (dstChrStride) + (w);
int dst2Pos = (dstChrStride * chrHeight) + dst1Pos;
dst[dst1Pos] = (((val5 >> 19)&(~0xFF)) ? ((-(val5 >> 19)) >> 31) : (val5 >> 19));
dst[dst2Pos] = (((val6 >> 19)&(~0xFF)) ? ((-(val6 >> 19)) >> 31) : (val6 >> 19));
}
);
char *kernel_src_vscalefast = KERNEL (
kernel void vscale_fast_opencl (
global unsigned char *dst,
const global short *src,
const global int *yfilterPos,
const global int *cfilterPos,
int dstWidth,
int dstHeight,
int srcWidth,
int srcHeight,
int dstStride,
int dstChrStride,
int srcStride,
int srcChrStride)
{
const unsigned char hb_sws_pb_64[8] = {
64, 64, 64, 64, 64, 64, 64, 64
};
int w = get_global_id(0);
int h = get_global_id(1);
int chrWidth = get_global_size(0);
int chrHeight = get_global_size(1);
const unsigned char *local_up_dither;
const unsigned char *local_down_dither;
local_up_dither = hb_sws_pb_64;
local_down_dither = hb_sws_pb_64;
int rightpart = w + chrWidth;
int bh = h + chrHeight; // bottom part
short val1 = (src[(yfilterPos[h]) * dstStride + w] + local_up_dither[(w + 0) & 7]) >> 7; //lum offset is 0;
short val2 = (src[(yfilterPos[h]) * dstStride + rightpart] + local_up_dither[rightpart & 7]) >> 7;
short val3 = (src[(yfilterPos[bh]) * dstStride + w] + local_down_dither[w & 7]) >> 7;
short val4 = (src[(yfilterPos[bh]) * dstStride + rightpart] + local_down_dither[rightpart & 7]) >> 7;
dst[h * dstStride + w] = ((val1&(~0xFF)) ? ((-val1) >> 31) : (val1));
dst[h * dstStride + rightpart] = ((val2&(~0xFF)) ? ((-val2) >> 31) : (val2));
dst[bh * dstStride + w] = ((val3&(~0xFF)) ? ((-val3) >> 31) : (val3));
dst[bh * dstStride + rightpart] = ((val4&(~0xFF)) ? ((-val4) >> 31) : (val4));
src += dstStride * srcHeight;
dst += dstStride * dstHeight;
val1 = (src[cfilterPos[h] * (dstChrStride) + w] + local_up_dither[ w & 7]) >> 7;
dst[h * (dstChrStride) + w] = ((val1&(~0xFF)) ? ((-val1) >> 31) : (val1));
src += dstChrStride * (srcHeight >> 1);
dst += dstChrStride * chrHeight;
val1 = (src[cfilterPos[h] * dstChrStride + w] + local_up_dither[ (w + 3) & 7] ) >> 7;
dst[h * dstChrStride + w] = ((val1&(~0xFF)) ? ((-val1) >> 31) : (val1));
}
);
char *kernel_src_scale = KERNEL (
__kernel __attribute__((reqd_work_group_size(64, 1, 1))) void frame_scale(__global uchar *dst,
__global const uchar *src,
const float xscale,
const float yscale,
const int srcPlaneOffset0,
const int srcPlaneOffset1,
const int srcPlaneOffset2,
const int dstPlaneOffset0,
const int dstPlaneOffset1,
const int dstPlaneOffset2,
const int srcRowWords0,
const int srcRowWords1,
const int srcRowWords2,
const int dstRowWords0,
const int dstRowWords1,
const int dstRowWords2,
const int srcWidth,
const int srcHeight,
const int dstWidth,
const int dstHeight,
__global const float4* restrict xweights,
__global const float4* restrict yweights
)
{
const int x = get_global_id(0);
const int y = get_global_id(1);
const int z = get_global_id(2);
// Abort work items outside the dst image bounds.
if ((get_group_id(0) * 64 >= (dstWidth >> ((z == 0) ? 0 : 1))) || (get_group_id(1) * 16 >= (dstHeight >> ((z == 0) ? 0 : 1))))
return;
const int srcPlaneOffset = (z == 0) ? srcPlaneOffset0 : ((z == 1) ? srcPlaneOffset1 : srcPlaneOffset2);
const int dstPlaneOffset = (z == 0) ? dstPlaneOffset0 : ((z == 1) ? dstPlaneOffset1 : dstPlaneOffset2);
const int srcRowWords = (z == 0) ? srcRowWords0: ((z == 1) ? srcRowWords1 : srcRowWords2);
const int dstRowWords = (z == 0) ? dstRowWords0: ((z == 1) ? dstRowWords1 : dstRowWords2);
__local uchar pixels[64 * 36];
const int localRowPixels = 64;
const int groupHeight = 16; // src pixel height output by the workgroup
const int ypad = 2;
const int localx = get_local_id(0);
const int globalStartRow = floor((get_group_id(1) * groupHeight) / yscale);
const int globalRowCount = ceil(groupHeight / yscale) + 2 * ypad;
float4 weights = xweights[x];
int4 woffs = floor(x / xscale);
woffs += (int4)(-1, 0, 1, 2);
woffs = clamp(woffs, 0, (srcWidth >> ((z == 0) ? 0 : 1)) - 1);
const int maxy = (srcHeight >> ((z == 0) ? 0 : 1)) - 1;
// Scale x from global into LDS
for (int i = 0; i <= globalRowCount; ++i) {
int4 offs = srcPlaneOffset + clamp(globalStartRow - ypad + i, 0, maxy) * srcRowWords;
offs += woffs;
pixels[localx + i * localRowPixels] = convert_uchar(clamp(round(dot(weights,
(float4)(src[offs.x], src[offs.y], src[offs.z], src[offs.w]))), 0.0f, 255.0f));
}
barrier(CLK_LOCAL_MEM_FENCE);
// Scale y from LDS into global
if (x >= dstWidth >> ((z == 0) ? 0 : 1))
return;
int off = dstPlaneOffset + x + (get_group_id(1) * groupHeight) * dstRowWords;
for (int i = 0; i < groupHeight; ++i) {
if (y >= dstHeight >> ((z == 0) ? 0 : 1))
break;
int localy = floor((get_group_id(1) * groupHeight + i) / yscale);
localy = localy - globalStartRow + ypad;
int loff = localx + localy * localRowPixels;
dst[off] = convert_uchar(clamp(round(dot(yweights[get_group_id(1) * groupHeight + i],
(float4)(pixels[loff - localRowPixels], pixels[loff], pixels[loff + localRowPixels]
, pixels[loff + localRowPixels * 2]))), 0.0f, 255.0f));
off += dstRowWords;
}
}
);
char *kernel_src_yadif_filter = KERNEL(
void filter_v6(
global unsigned char *dst,
global unsigned char *prev,
global unsigned char *cur,
global unsigned char *next,
int x,
int y,
int width,
int height,
int parity,
int inlinesize,
int outlinesize,
int inmode,
int uvflag
)
{
int flag = uvflag * (y >=height) * height;
int prefs = select(-(inlinesize), inlinesize,((y+1) - flag) >1;
int e = cur[index + prefs];
int temporal_diff0 = abs((prev2[index]) - (next2[index]));
int temporal_diff1 =(abs(prev[index + mrefs] - c) + abs(prev[index + prefs] - e) )>>1;
int temporal_diff2 =(abs(next[index + mrefs] - c) + abs(next[index + prefs] - e) )>>1;
int diff = max(max(temporal_diff0>>1, temporal_diff1), temporal_diff2);
int spatial_pred = (c+e)>>1;
int spatial_score = abs(cur[index + mrefs-1] - cur[index + prefs-1]) + abs(c-e) + abs(cur[index + mrefs+1] - cur[index + prefs+1]) - 1;
//check -1
score = abs(cur[index + mrefs-2] - cur[index + prefs])
+ abs(cur[index + mrefs-1] - cur[index + prefs+1])
+ abs(cur[index + mrefs] - cur[index + prefs+2]);
if (score < spatial_score)
{
spatial_score= score;
spatial_pred= (cur[index + mrefs-1] + cur[index + prefs+1])>>1;
}
//check -2
score = abs(cur[index + mrefs-3] - cur[index + prefs+1])
+ abs(cur[index + mrefs-2] - cur[index + prefs+2])
+ abs(cur[index + mrefs-1] - cur[index + prefs+3]);
if (score < spatial_score)
{
spatial_score= score;
spatial_pred= (cur[index + mrefs-2] + cur[index + prefs+2])>>1;
}
//check 1
score = abs(cur[index + mrefs] - cur[index + prefs-2])
+ abs(cur[index + mrefs+1] - cur[index + prefs-1])
+ abs(cur[index + mrefs+2] - cur[index + prefs]);
if (score < spatial_score)
{
spatial_score= score;
spatial_pred= (cur[index + mrefs+1] + cur[index + prefs-1])>>1;
}
//check 2
score = abs(cur[index + mrefs+1] - cur[index + prefs-3])
+ abs(cur[index + mrefs+2] - cur[index + prefs-2])
+ abs(cur[index + mrefs+3] - cur[index + prefs-1]);
if (score < spatial_score)
{
spatial_score= score;
spatial_pred= (cur[index + mrefs+2] + cur[index + prefs-2])>>1;
}
if (mode < 2)
{
int b = (prev2[index + (mrefs<<1)] + next2[index + (mrefs<<1)])>>1;
int f = (prev2[index + (prefs<<1)] + next2[index + (prefs<<1)])>>1;
int diffmax = max(max(d-e, d-c), min(b-c, f-e));
int diffmin = min(min(d-e, d-c), max(b-c, f-e));
diff = max(max(diff, diffmin), -diffmax);
}
if (spatial_pred > d + diff)
{
spatial_pred = d + diff;
}
else if (spatial_pred < d - diff)
{
spatial_pred = d - diff;
}
dst[outindex] = spatial_pred;
}
kernel void yadif_filter(
global unsigned char *dst,
global unsigned char *prev,
global unsigned char *cur,
global unsigned char *next,
int parity,
int inlinesizeY,
int inlinesizeUV,
int outlinesizeY,
int outlinesizeUV,
int mode)
{
int x=get_global_id(0);
int y=(get_global_id(1)<<1) + (!parity);
int width=(get_global_size(0)<<1)/3;
int height=get_global_size(1)<<1;
global unsigned char *dst_Y=dst;
global unsigned char *dst_U=dst_Y+height*outlinesizeY;
global unsigned char *prev_Y=prev;
global unsigned char *prev_U=prev_Y+height*inlinesizeY;
global unsigned char *cur_Y=cur;
global unsigned char *cur_U=cur_Y+height*inlinesizeY;
global unsigned char *next_Y=next;
global unsigned char *next_U=next_Y+height*inlinesizeY;
if(x < width)
{
filter_v6(dst_Y,prev_Y,cur_Y,next_Y,x,y,width,height,parity,inlinesizeY,outlinesizeY,mode,0);
}
else
{
x = x - width;
filter_v6(dst_U,prev_U,cur_U,next_U,x,y,width>>1,height>>1,parity,inlinesizeUV,outlinesizeUV,mode,1);
}
}
);
#endif
HandBrake-0.10.2/libhb/decssasub.h 0000664 0001752 0001752 00000002100 12463330511 017233 0 ustar handbrake handbrake /* decssasub.h
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef __DECSSASUB_H__
#define __DECSSASUB_H__
typedef struct
{
uint32_t flags;
uint32_t fg_rgb; // forground color
uint32_t alt_rgb; // secondary color
uint32_t ol_rgb; // outline color
uint32_t bg_rgb; // background color
uint32_t fg_alpha; // forground alpha
uint32_t alt_alpha; // secondary alpha
uint32_t ol_alpha; // outline alpha
uint32_t bg_alpha; // background alpha
} hb_subtitle_style_t;
#define HB_STYLE_FLAG_ITALIC 0x0001
#define HB_STYLE_FLAG_BOLD 0x0002
#define HB_STYLE_FLAG_UNDERLINE 0x0004
char * hb_ssa_to_text(char *in, int *consumed, hb_subtitle_style_t *style);
void hb_ssa_style_init(hb_subtitle_style_t *style);
#endif // __DECSSASUB_H__
HandBrake-0.10.2/libhb/ports.h 0000664 0001752 0001752 00000012374 12463330511 016444 0 ustar handbrake handbrake /* ports.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_PORTS_H
#define HB_PORTS_H
#if defined(_WIN32)
#define DIR_SEP_STR "\\"
#define DIR_SEP_CHAR '\\'
#define IS_DIR_SEP(c) (c == '\\' || c == '/')
#else
#define DIR_SEP_STR "/"
#define DIR_SEP_CHAR '/'
#define IS_DIR_SEP(c) (c == '/')
#endif
/************************************************************************
* CPU info utilities
***********************************************************************/
enum hb_cpu_platform
{
// list of microarchitecture codenames
HB_CPU_PLATFORM_UNSPECIFIED = 0,
HB_CPU_PLATFORM_INTEL_BNL,
HB_CPU_PLATFORM_INTEL_SNB,
HB_CPU_PLATFORM_INTEL_IVB,
HB_CPU_PLATFORM_INTEL_SLM,
HB_CPU_PLATFORM_INTEL_HSW,
};
int hb_get_cpu_count();
int hb_get_cpu_platform();
const char* hb_get_cpu_name();
const char* hb_get_cpu_platform_name();
/************************************************************************
* Utils
***********************************************************************/
// provide time in ms
uint64_t hb_get_date();
// provide time in us
uint64_t hb_get_time_us();
void hb_snooze( int delay );
int hb_platform_init();
#ifdef SYS_MINGW
char *strtok_r(char *s, const char *delim, char **save_ptr);
#endif
#ifdef SYS_MINGW
typedef struct
{
_WDIR *wdir;
struct dirent entry;
} HB_DIR;
#else
typedef DIR HB_DIR;
#endif
#ifdef SYS_MINGW
typedef struct _stat64 hb_stat_t;
#else
typedef struct stat hb_stat_t;
#endif
HB_DIR* hb_opendir(char *path);
int hb_closedir(HB_DIR *dir);
void hb_rewinddir(HB_DIR *dir);
struct dirent * hb_readdir(HB_DIR *dir);
int hb_mkdir(char * name);
int hb_stat(const char *path, hb_stat_t *sb);
FILE * hb_fopen(const char *path, const char *mode);
char * hb_strr_dir_sep(const char *path);
#ifdef __LIBHB__
// Convert utf8 string to current code page.
char * hb_utf8_to_cp(const char *src);
/* Everything from now is only used internally and hidden to the UI */
/************************************************************************
* DVD utils
***********************************************************************/
int hb_dvd_region(char *device, int *region_mask);
/************************************************************************
* File utils
***********************************************************************/
void hb_get_temporary_directory( char path[512] );
void hb_get_tempory_filename( hb_handle_t *, char name[1024],
char * fmt, ... );
/************************************************************************
* Threads
***********************************************************************/
typedef struct hb_thread_s hb_thread_t;
#if defined( SYS_BEOS )
# define HB_LOW_PRIORITY 5
# define HB_NORMAL_PRIORITY 10
#elif defined( SYS_DARWIN )
# define HB_LOW_PRIORITY 0
# define HB_NORMAL_PRIORITY 31
#elif defined( SYS_LINUX ) || defined( SYS_FREEBSD ) || defined ( SYS_SunOS ) || defined ( __FreeBSD_kernel__ )
# define HB_LOW_PRIORITY 0
# define HB_NORMAL_PRIORITY 0
#elif defined( SYS_CYGWIN )
# define HB_LOW_PRIORITY 0
# define HB_NORMAL_PRIORITY 1
#elif defined( SYS_MINGW )
# define HB_LOW_PRIORITY 0
# define HB_NORMAL_PRIORITY 0
#endif
typedef void (thread_func_t)(void *);
hb_thread_t * hb_thread_init( const char * name, thread_func_t *function,
void * arg, int priority );
void hb_thread_close( hb_thread_t ** );
int hb_thread_has_exited( hb_thread_t * );
/************************************************************************
* Mutexes
***********************************************************************/
hb_lock_t * hb_lock_init();
void hb_lock_close( hb_lock_t ** );
void hb_lock( hb_lock_t * );
void hb_unlock( hb_lock_t * );
/************************************************************************
* Condition variables
***********************************************************************/
typedef struct hb_cond_s hb_cond_t;
hb_cond_t * hb_cond_init();
void hb_cond_wait( hb_cond_t *, hb_lock_t * );
void hb_cond_timedwait( hb_cond_t * c, hb_lock_t * lock, int msec );
void hb_cond_signal( hb_cond_t * );
void hb_cond_broadcast( hb_cond_t * c );
void hb_cond_close( hb_cond_t ** );
/************************************************************************
* Network
***********************************************************************/
typedef struct hb_net_s hb_net_t;
hb_net_t * hb_net_open( char * address, int port );
int hb_net_send( hb_net_t *, char * );
int hb_net_recv( hb_net_t *, char *, int );
void hb_net_close( hb_net_t ** );
/************************************************************************
* OS Sleep Allow / Prevent
***********************************************************************/
void* hb_system_sleep_opaque_init();
void hb_system_sleep_opaque_close(void **opaque);
void hb_system_sleep_private_enable(void *opaque);
void hb_system_sleep_private_disable(void *opaque);
#endif /* __LIBHB__ */
#endif
HandBrake-0.10.2/libhb/openclwrapper.c 0000664 0001752 0001752 00000102135 12463330511 020144 0 ustar handbrake handbrake /* openclwrapper.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#include
#include
#include
#include "extras/cl.h"
#include "opencl.h"
#include "openclwrapper.h"
#include "openclkernels.h"
//#define USE_EXTERNAL_KERNEL
#ifdef SYS_MINGW
#include
#endif
#if defined(_MSC_VER)
#define strcasecmp strcmpi
#endif
#define MAX_KERNEL_STRING_LEN 64
#define MAX_CLFILE_NUM 50
#define MAX_CLKERNEL_NUM 200
#define MAX_CLFILE_PATH 255
#define MAX_KERNEL_NUM 50
#define MAX_KERNEL_NAME_LEN 64
#ifndef INVALID_HANDLE_VALUE
#define INVALID_HANDLE_VALUE NULL
#endif
//#define THREAD_PRIORITY_TIME_CRITICAL 15
enum VENDOR
{
AMD = 0,
Intel,
NVIDIA,
others
};
typedef struct _GPUEnv
{
//share vb in all modules in hb library
cl_platform_id platform;
cl_device_type dType;
cl_context context;
cl_device_id * devices;
cl_device_id dev;
cl_command_queue command_queue;
cl_kernel kernels[MAX_CLFILE_NUM];
cl_program programs[MAX_CLFILE_NUM]; //one program object maps one kernel source file
char kernelSrcFile[MAX_CLFILE_NUM][256]; //the max len of kernel file name is 256
int file_count; // only one kernel file
char kernel_names[MAX_CLKERNEL_NUM][MAX_KERNEL_STRING_LEN+1];
cl_kernel_function kernel_functions[MAX_CLKERNEL_NUM];
int kernel_count;
int isUserCreated; // 1: created , 0:no create and needed to create by opencl wrapper
enum VENDOR vendor;
}GPUEnv;
typedef struct
{
char kernelName[MAX_KERNEL_NAME_LEN+1];
char * kernelStr;
}hb_kernel_node;
static GPUEnv gpu_env;
static int isInited = 0;
static int useBuffers = 0;
static hb_kernel_node gKernels[MAX_KERNEL_NUM];
#define HB_OCL_ADD_KERNEL_CFG(idx, s, p) \
{ \
strcpy(gKernels[idx].kernelName, s); \
gKernels[idx].kernelStr = p; \
strcpy(gpu_env.kernel_names[idx], s); \
gpu_env.kernel_count++; \
}
/**
* hb_regist_opencl_kernel
*/
int hb_regist_opencl_kernel()
{
//if( !gpu_env.isUserCreated )
// memset( &gpu_env, 0, sizeof(gpu_env) );
//Comment for posterity: When in doubt just zero out a structure full of pointers to allocated resources.
gpu_env.file_count = 0; //argc;
gpu_env.kernel_count = 0UL;
HB_OCL_ADD_KERNEL_CFG(0, "frame_scale", NULL);
HB_OCL_ADD_KERNEL_CFG(1, "yadif_filter", NULL);
return 0;
}
/**
* hb_regist_opencl_kernel
* @param filename -
* @param source -
* @param gpu_info -
* @param int idx -
*/
int hb_convert_to_string( const char *filename, char **source, GPUEnv *gpu_info, int idx )
{
int file_size;
size_t result;
FILE * file = NULL;
file_size = 0;
result = 0;
file = fopen( filename, "rb+" );
if( file!=NULL )
{
fseek( file, 0, SEEK_END );
file_size = ftell( file );
rewind( file );
*source = (char*)malloc( sizeof(char) * file_size + 1 );
if( *source == (char*)NULL )
{
return(0);
}
result = fread( *source, 1, file_size, file );
if( result != file_size )
{
free( *source );
return(0);
}
(*source)[file_size] = '\0';
fclose( file );
return(1);
}
return(0);
}
/**
* hb_binary_generated
* @param context -
* @param cl_file_name -
* @param fhandle -
*/
int hb_binary_generated( cl_context context, const char * cl_file_name, FILE ** fhandle )
{
int i = 0;
cl_int status;
cl_uint numDevices;
cl_device_id *devices;
char * str = NULL;
FILE * fd = NULL;
if (hb_ocl == NULL)
{
hb_error("hb_binary_generated: OpenCL support not available");
return 0;
}
status = hb_ocl->clGetContextInfo(context, CL_CONTEXT_NUM_DEVICES,
sizeof(numDevices), &numDevices, NULL);
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: Get context info failed" );
return 0;
}
devices = (cl_device_id*)malloc( sizeof(cl_device_id) * numDevices );
if( devices == NULL )
{
hb_log( "OpenCL: No device found" );
return 0;
}
/* grab the handles to all of the devices in the context. */
status = hb_ocl->clGetContextInfo(context, CL_CONTEXT_DEVICES,
sizeof(cl_device_id) * numDevices,
devices, NULL);
status = 0;
/* dump out each binary into its own separate file. */
for (i = 0; i < numDevices; i++)
{
char fileName[256] = { 0 };
char cl_name[128] = { 0 };
if (devices[i])
{
char deviceName[1024];
status = hb_ocl->clGetDeviceInfo(devices[i], CL_DEVICE_NAME,
sizeof(deviceName), deviceName, NULL);
str = (char*)strstr(cl_file_name, ".cl");
memcpy(cl_name, cl_file_name, str - cl_file_name);
cl_name[str - cl_file_name] = '\0';
sprintf(fileName, "./%s - %s.bin", cl_name, deviceName);
fd = fopen(fileName, "rb");
status = fd != NULL;
}
}
if( devices != NULL )
{
free( devices );
devices = NULL;
}
if( fd != NULL )
*fhandle = fd;
return status;
}
/**
* hb_write_binary_to_file
* @param fileName -
* @param birary -
* @param numBytes -
*/
int hb_write_binary_to_file( const char* fileName, const char* birary, size_t numBytes )
{
FILE *output = NULL;
output = fopen( fileName, "wb" );
if( output == NULL )
return 0;
fwrite( birary, sizeof(char), numBytes, output );
fclose( output );
return 1;
}
/**
* hb_generat_bin_from_kernel_source
* @param program -
* @param cl_file_name -
*/
int hb_generat_bin_from_kernel_source( cl_program program, const char * cl_file_name )
{
int i = 0;
cl_int status;
cl_uint numDevices;
size_t *binarySizes;
cl_device_id *devices;
char **binaries;
char *str = NULL;
if (hb_ocl == NULL)
{
hb_error("hb_generat_bin_from_kernel_source: OpenCL support not available");
return 0;
}
status = hb_ocl->clGetProgramInfo(program, CL_PROGRAM_NUM_DEVICES,
sizeof(numDevices), &numDevices, NULL);
if( status != CL_SUCCESS )
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: clGetProgramInfo for CL_PROGRAM_NUM_DEVICES failed");
return 0;
}
devices = (cl_device_id*)malloc( sizeof(cl_device_id) * numDevices );
if( devices == NULL )
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: no device found");
return 0;
}
/* grab the handles to all of the devices in the program. */
status = hb_ocl->clGetProgramInfo(program, CL_PROGRAM_DEVICES,
sizeof(cl_device_id) * numDevices,
devices, NULL);
if( status != CL_SUCCESS )
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: clGetProgramInfo for CL_PROGRAM_DEVICES failed");
return 0;
}
/* figure out the sizes of each of the binaries. */
binarySizes = (size_t*)malloc( sizeof(size_t) * numDevices );
status = hb_ocl->clGetProgramInfo(program, CL_PROGRAM_BINARY_SIZES,
sizeof(size_t) * numDevices,
binarySizes, NULL);
if( status != CL_SUCCESS )
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: clGetProgramInfo for CL_PROGRAM_BINARY_SIZES failed");
return 0;
}
/* copy over all of the generated binaries. */
binaries = (char**)malloc( sizeof(char *) * numDevices );
if( binaries == NULL )
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: malloc for binaries failed");
return 0;
}
for( i = 0; i < numDevices; i++ )
{
if( binarySizes[i] != 0 )
{
binaries[i] = (char*)malloc( sizeof(char) * binarySizes[i] );
if( binaries[i] == NULL )
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: malloc for binaries[%d] failed", i);
return 0;
}
}
else
{
binaries[i] = NULL;
}
}
status = hb_ocl->clGetProgramInfo(program, CL_PROGRAM_BINARIES,
sizeof(char *) * numDevices,
binaries, NULL);
if( status != CL_SUCCESS )
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: clGetProgramInfo for CL_PROGRAM_BINARIES failed");
return 0;
}
/* dump out each binary into its own separate file. */
for (i = 0; i < numDevices; i++)
{
char fileName[256] = {0};
char cl_name[128] = {0};
if (binarySizes[i])
{
char deviceName[1024];
status = hb_ocl->clGetDeviceInfo(devices[i], CL_DEVICE_NAME,
sizeof(deviceName), deviceName,
NULL);
str = (char*)strstr( cl_file_name, (char*)".cl" );
memcpy(cl_name, cl_file_name, str - cl_file_name);
cl_name[str - cl_file_name] = '\0';
sprintf(fileName, "./%s - %s.bin", cl_name, deviceName);
if (!hb_write_binary_to_file(fileName, binaries[i], binarySizes[i]))
{
hb_log("OpenCL: hb_generat_bin_from_kernel_source: unable to write kernel, writing to temporary directory instead.");
return 0;
}
}
}
// Release all resouces and memory
for( i = 0; i < numDevices; i++ )
{
if( binaries[i] != NULL )
{
free( binaries[i] );
binaries[i] = NULL;
}
}
if( binaries != NULL )
{
free( binaries );
binaries = NULL;
}
if( binarySizes != NULL )
{
free( binarySizes );
binarySizes = NULL;
}
if( devices != NULL )
{
free( devices );
devices = NULL;
}
return 1;
}
/**
* hb_init_opencl_attr
* @param env -
*/
int hb_init_opencl_attr( OpenCLEnv * env )
{
if( gpu_env.isUserCreated )
return 1;
gpu_env.context = env->context;
gpu_env.platform = env->platform;
gpu_env.dev = env->devices;
gpu_env.command_queue = env->command_queue;
gpu_env.isUserCreated = 1;
return 0;
}
/**
* hb_create_kernel
* @param kernelname -
* @param env -
*/
int hb_create_kernel( char * kernelname, KernelEnv * env )
{
int status;
if (hb_ocl == NULL)
{
hb_error("hb_create_kernel: OpenCL support not available");
return 0;
}
env->kernel = hb_ocl->clCreateKernel(gpu_env.programs[0], kernelname, &status);
env->context = gpu_env.context;
env->command_queue = gpu_env.command_queue;
return status != CL_SUCCESS ? 1 : 0;
}
/**
* hb_release_kernel
* @param env -
*/
int hb_release_kernel( KernelEnv * env )
{
if (hb_ocl == NULL)
{
hb_error("hb_release_kernel: OpenCL support not available");
return 0;
}
int status = hb_ocl->clReleaseKernel(env->kernel);
return status != CL_SUCCESS ? 1 : 0;
}
/**
* hb_init_opencl_env
* @param gpu_info -
*/
static int init_once = 0;
int hb_init_opencl_env( GPUEnv *gpu_info )
{
size_t length;
cl_int status;
cl_uint numPlatforms, numDevices;
cl_platform_id *platforms;
cl_context_properties cps[3];
char platformName[100];
unsigned int i;
void *handle = INVALID_HANDLE_VALUE;
if (init_once != 0)
return 0;
else
init_once = 1;
if (hb_ocl == NULL)
{
hb_error("hb_init_opencl_env: OpenCL support not available");
return 1;
}
/*
* Have a look at the available platforms.
*/
if( !gpu_info->isUserCreated )
{
status = hb_ocl->clGetPlatformIDs(0, NULL, &numPlatforms);
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: OpenCL device platform not found." );
return(1);
}
gpu_info->platform = NULL;
if( 0 < numPlatforms )
{
platforms = (cl_platform_id*)malloc(
numPlatforms * sizeof(cl_platform_id));
if( platforms == (cl_platform_id*)NULL )
{
return(1);
}
status = hb_ocl->clGetPlatformIDs(numPlatforms, platforms, NULL);
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: Specific opencl platform not found." );
return(1);
}
for( i = 0; i < numPlatforms; i++ )
{
status = hb_ocl->clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR,
sizeof(platformName), platformName, NULL);
if( status != CL_SUCCESS )
{
continue;
}
gpu_info->platform = platforms[i];
if (!strcmp(platformName, "Advanced Micro Devices, Inc.") ||
!strcmp(platformName, "AMD"))
gpu_info->vendor = AMD;
else
gpu_info->vendor = others;
gpu_info->platform = platforms[i];
status = hb_ocl->clGetDeviceIDs(gpu_info->platform /* platform */,
CL_DEVICE_TYPE_GPU /* device_type */,
0 /* num_entries */,
NULL /* devices */, &numDevices);
if( status != CL_SUCCESS )
{
continue;
}
if( numDevices )
break;
}
free( platforms );
}
if( NULL == gpu_info->platform )
{
hb_log( "OpenCL: No OpenCL-compatible GPU found." );
return(1);
}
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: No OpenCL-compatible GPU found." );
return(1);
}
/*
* Use available platform.
*/
cps[0] = CL_CONTEXT_PLATFORM;
cps[1] = (cl_context_properties)gpu_info->platform;
cps[2] = 0;
/* Check for GPU. */
gpu_info->dType = CL_DEVICE_TYPE_GPU;
gpu_info->context = hb_ocl->clCreateContextFromType(cps, gpu_info->dType,
NULL, NULL, &status);
if( (gpu_info->context == (cl_context)NULL) || (status != CL_SUCCESS) )
{
gpu_info->dType = CL_DEVICE_TYPE_CPU;
gpu_info->context = hb_ocl->clCreateContextFromType(cps, gpu_info->dType,
NULL, NULL, &status);
}
if( (gpu_info->context == (cl_context)NULL) || (status != CL_SUCCESS) )
{
gpu_info->dType = CL_DEVICE_TYPE_DEFAULT;
gpu_info->context = hb_ocl->clCreateContextFromType(cps, gpu_info->dType,
NULL, NULL, &status);
}
if( (gpu_info->context == (cl_context)NULL) || (status != CL_SUCCESS) )
{
hb_log( "OpenCL: Unable to create opencl context." );
return(1);
}
/* Detect OpenCL devices. */
/* First, get the size of device list data */
status = hb_ocl->clGetContextInfo(gpu_info->context, CL_CONTEXT_DEVICES,
0, NULL, &length);
if((status != CL_SUCCESS) || (length == 0))
{
hb_log( "OpenCL: Unable to get the list of devices in context." );
return(1);
}
/* Now allocate memory for device list based on the size we got earlier */
gpu_info->devices = (cl_device_id*)malloc( length );
if( gpu_info->devices == (cl_device_id*)NULL )
{
return(1);
}
/* Now, get the device list data */
status = hb_ocl->clGetContextInfo(gpu_info->context, CL_CONTEXT_DEVICES,
length, gpu_info->devices, NULL);
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: Unable to get the device list data in context." );
return(1);
}
/* Create OpenCL command queue. */
gpu_info->command_queue = hb_ocl->clCreateCommandQueue(gpu_info->context,
gpu_info->devices[0],
0, &status);
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: Unable to create opencl command queue." );
return(1);
}
}
if ((CL_SUCCESS == hb_ocl->clGetCommandQueueInfo(gpu_info->command_queue,
CL_QUEUE_THREAD_HANDLE_AMD,
sizeof(handle), &handle, NULL)) &&
(INVALID_HANDLE_VALUE != handle))
{
#ifdef SYS_MINGW
SetThreadPriority( handle, THREAD_PRIORITY_TIME_CRITICAL );
#endif
}
return 0;
}
/**
* hb_release_opencl_env
* @param gpu_info -
*/
int hb_release_opencl_env( GPUEnv *gpu_info )
{
if( !isInited )
return 1;
int i;
if (hb_ocl == NULL)
{
hb_error("hb_release_opencl_env: OpenCL support not available");
return 0;
}
for( i = 0; iclReleaseProgram(gpu_env.programs[i]);
gpu_env.programs[i] = NULL;
}
}
if( gpu_env.command_queue )
{
hb_ocl->clReleaseCommandQueue(gpu_env.command_queue);
gpu_env.command_queue = NULL;
}
if( gpu_env.context )
{
hb_ocl->clReleaseContext(gpu_env.context);
gpu_env.context = NULL;
}
isInited = 0;
gpu_info->isUserCreated = 0;
return 1;
}
/**
* hb_register_kernel_wrapper
* @param kernel_name -
* @param function -
*/
int hb_register_kernel_wrapper( const char *kernel_name, cl_kernel_function function )
{
int i;
for( i = 0; i < gpu_env.kernel_count; i++ )
{
if( strcasecmp( kernel_name, gpu_env.kernel_names[i] ) == 0 )
{
gpu_env.kernel_functions[i] = function;
return(1);
}
}
return(0);
}
/**
* hb_cached_of_kerner_prg
* @param gpu_env -
* @param cl_file_name -
*/
int hb_cached_of_kerner_prg( const GPUEnv *gpu_env, const char * cl_file_name )
{
int i;
for( i = 0; i < gpu_env->file_count; i++ )
{
if( strcasecmp( gpu_env->kernelSrcFile[i], cl_file_name ) == 0 )
{
if( gpu_env->programs[i] != NULL )
return(1);
}
}
return(0);
}
/**
* hb_compile_kernel_file
* @param filename -
* @param gpu_info -
* @param indx -
* @param build_option -
*/
int hb_compile_kernel_file( const char *filename, GPUEnv *gpu_info,
int indx, const char *build_option )
{
cl_int status;
size_t length;
char *source_str;
const char *source;
size_t source_size[1];
char *buildLog = NULL;
int b_error, binary_status, binaryExisted;
char * binary;
cl_uint numDevices;
cl_device_id *devices;
FILE * fd;
FILE * fd1;
int idx;
if( hb_cached_of_kerner_prg( gpu_info, filename ) == 1 )
return (1);
idx = gpu_info->file_count;
#ifdef USE_EXTERNAL_KERNEL
status = hb_convert_to_string( filename, &source_str, gpu_info, idx );
if( status == 0 )
return(0);
#else
int kernel_src_size = strlen(kernel_src_scale) + strlen(kernel_src_yadif_filter);
// char *scale_src;
// status = hb_convert_to_string("./scale_kernels.cl", &scale_src, gpu_info, idx);
// if (status != 0)
// kernel_src_size += strlen(scale_src);
source_str = (char*)malloc( kernel_src_size + 2 );
strcpy( source_str, kernel_src_scale );
// strcat( source_str, scale_src ); //
strcat( source_str, kernel_src_yadif_filter );
#endif
source = source_str;
source_size[0] = strlen( source );
if (hb_ocl == NULL)
{
hb_error("hb_compile_kernel_file: OpenCL support not available");
return 0;
}
if ((binaryExisted = hb_binary_generated(gpu_info->context, filename, &fd)) == 1)
{
status = hb_ocl->clGetContextInfo(gpu_info->context, CL_CONTEXT_NUM_DEVICES,
sizeof(numDevices), &numDevices, NULL);
if (status != CL_SUCCESS)
{
hb_log("OpenCL: Unable to get the number of devices in context.");
return 0;
}
devices = (cl_device_id*)malloc(sizeof(cl_device_id) * numDevices);
if (devices == NULL)
return 0;
length = 0;
b_error = 0;
b_error |= fseek(fd, 0, SEEK_END) < 0;
b_error |= (length = ftell(fd)) <= 0;
b_error |= fseek(fd, 0, SEEK_SET) < 0;
if (b_error)
return 0;
binary = (char*)calloc(length + 2, sizeof(char));
if (binary == NULL)
return 0;
b_error |= fread(binary, 1, length, fd) != length;
#if 0 // this doesn't work under OS X and/or with some non-AMD GPUs
if (binary[length-1] != '\n')
binary[length++] = '\n';
#endif
if (b_error)
return 0;
/* grab the handles to all of the devices in the context. */
status = hb_ocl->clGetContextInfo(gpu_info->context, CL_CONTEXT_DEVICES,
sizeof(cl_device_id) * numDevices,
devices, NULL);
gpu_info->programs[idx] = hb_ocl->clCreateProgramWithBinary(gpu_info->context,
numDevices,
devices,
&length,
(const unsigned char**)&binary,
&binary_status,
&status);
fclose(fd);
free(devices);
fd = NULL;
devices = NULL;
}
else
{
/* create a CL program using the kernel source */
gpu_info->programs[idx] = hb_ocl->clCreateProgramWithSource(gpu_info->context, 1,
&source, source_size,
&status);
}
if((gpu_info->programs[idx] == (cl_program)NULL) || (status != CL_SUCCESS)){
hb_log( "OpenCL: Unable to get list of devices in context." );
return(0);
}
/* create a cl program executable for all the devices specified */
if( !gpu_info->isUserCreated )
{
status = hb_ocl->clBuildProgram(gpu_info->programs[idx], 1, gpu_info->devices,
build_option, NULL, NULL);
}
else
{
status = hb_ocl->clBuildProgram(gpu_info->programs[idx], 1, &(gpu_info->dev),
build_option, NULL, NULL);
}
if( status != CL_SUCCESS )
{
if( !gpu_info->isUserCreated )
{
status = hb_ocl->clGetProgramBuildInfo(gpu_info->programs[idx],
gpu_info->devices[0],
CL_PROGRAM_BUILD_LOG,
0, NULL, &length);
}
else
{
status = hb_ocl->clGetProgramBuildInfo(gpu_info->programs[idx],
gpu_info->dev,
CL_PROGRAM_BUILD_LOG,
0, NULL, &length);
}
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: Unable to get GPU build information." );
return(0);
}
buildLog = (char*)malloc( length );
if( buildLog == (char*)NULL )
{
return(0);
}
if( !gpu_info->isUserCreated )
{
status = hb_ocl->clGetProgramBuildInfo(gpu_info->programs[idx],
gpu_info->devices[0],
CL_PROGRAM_BUILD_LOG,
length, buildLog, &length);
}
else
{
status = hb_ocl->clGetProgramBuildInfo(gpu_info->programs[idx],
gpu_info->dev,
CL_PROGRAM_BUILD_LOG,
length, buildLog, &length);
}
fd1 = fopen( "kernel-build.log", "w+" );
if( fd1 != NULL ) {
fwrite( buildLog, sizeof(char), length, fd1 );
fclose( fd1 );
}
free( buildLog );
return(0);
}
strcpy( gpu_env.kernelSrcFile[idx], filename );
if (binaryExisted != 1)
{
//hb_generat_bin_from_kernel_source(gpu_env.programs[idx], filename);
}
gpu_info->file_count += 1;
return(1);
}
/**
* hb_get_kernel_env_and_func
* @param kernel_name -
* @param env -
* @param function -
*/
int hb_get_kernel_env_and_func( const char *kernel_name,
KernelEnv *env,
cl_kernel_function *function )
{
int i;
for( i = 0; i < gpu_env.kernel_count; i++ )
{
if( strcasecmp( kernel_name, gpu_env.kernel_names[i] ) == 0 )
{
env->context = gpu_env.context;
env->command_queue = gpu_env.command_queue;
env->program = gpu_env.programs[0];
env->kernel = gpu_env.kernels[i];
env->isAMD = ( gpu_env.vendor == AMD ) ? 1 : 0;
*function = gpu_env.kernel_functions[i];
return(1);
}
}
return(0);
}
/**
* hb_get_kernel_env_and_func
* @param kernel_name -
* @param userdata -
*/
int hb_run_kernel( const char *kernel_name, void **userdata )
{
KernelEnv env;
cl_kernel_function function;
int status;
memset( &env, 0, sizeof(KernelEnv));
status = hb_get_kernel_env_and_func( kernel_name, &env, &function );
strcpy( env.kernel_name, kernel_name );
if( status == 1 )
{
return(function( userdata, &env ));
}
return(0);
}
/**
* hb_init_opencl_run_env
* @param argc -
* @param argv -
* @param build_option -
*/
int hb_init_opencl_run_env( int argc, char **argv, const char *build_option )
{
int status = 0;
if( MAX_CLKERNEL_NUM <= 0 )
{
return 1;
}
if((argc > MAX_CLFILE_NUM) || (argc<0))
{
return 1;
}
if( !isInited )
{
hb_regist_opencl_kernel();
/*initialize devices, context, comand_queue*/
status = hb_init_opencl_env( &gpu_env );
if( status )
return(1);
/*initialize program, kernel_name, kernel_count*/
status = hb_compile_kernel_file("hb-opencl-kernels.cl",
&gpu_env, 0, build_option);
if( status == 0 || gpu_env.kernel_count == 0 )
{
return(1);
}
useBuffers = 1;
isInited = 1;
}
return(0);
}
/**
* hb_release_opencl_run_env
*/
int hb_release_opencl_run_env()
{
return hb_release_opencl_env( &gpu_env );
}
/**
* hb_opencl_stats
*/
int hb_opencl_stats()
{
return isInited;
}
/**
* hb_get_opencl_env
*/
int hb_get_opencl_env()
{
/* initialize devices, context, command_queue */
return hb_init_opencl_env(&gpu_env);
}
/**
* hb_create_buffer
* @param cl_inBuf -
* @param flags -
* @param size -
*/
int hb_create_buffer( cl_mem *cl_Buf, int flags, int size )
{
int status;
if (hb_ocl == NULL)
{
hb_error("hb_create_buffer: OpenCL support not available");
return 0;
}
*cl_Buf = hb_ocl->clCreateBuffer(gpu_env.context, flags, size, NULL, &status);
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: clCreateBuffer error '%d'", status );
return 0;
}
return 1;
}
/**
* hb_read_opencl_buffer
* @param cl_inBuf -
* @param outbuf -
* @param size -
*/
int hb_read_opencl_buffer( cl_mem cl_inBuf, unsigned char *outbuf, int size )
{
int status;
if (hb_ocl == NULL)
{
hb_error("hb_read_opencl_suffer: OpenCL support not available");
return 0;
}
status = hb_ocl->clEnqueueReadBuffer(gpu_env.command_queue, cl_inBuf,
CL_TRUE, 0, size, outbuf, 0, 0, 0);
if( status != CL_SUCCESS )
{
hb_log( "OpenCL: av_read_opencl_buffer error '%d'", status );
return 0;
}
return 1;
}
int hb_cl_create_mapped_buffer(cl_mem *mem, unsigned char **addr, int size)
{
int status;
int flags = CL_MEM_ALLOC_HOST_PTR;
if (hb_ocl == NULL)
{
hb_error("hb_cl_create_mapped_buffer: OpenCL support not available");
return 0;
}
//cl_event event;
*mem = hb_ocl->clCreateBuffer(gpu_env.context, flags, size, NULL, &status);
*addr = hb_ocl->clEnqueueMapBuffer(gpu_env.command_queue, *mem, CL_TRUE,
CL_MAP_READ|CL_MAP_WRITE, 0, size, 0,
NULL, NULL/*&event*/, &status);
//hb_log("\t **** context: %.8x cmdqueue: %.8x cl_mem: %.8x mapaddr: %.8x size: %d status: %d", gpu_env.context, gpu_env.command_queue, mem, addr, size, status);
return (status == CL_SUCCESS) ? 1 : 0;
}
int hb_cl_free_mapped_buffer(cl_mem mem, unsigned char *addr)
{
cl_event event;
if (hb_ocl == NULL)
{
hb_error("hb_cl_free_mapped_buffer: OpenCL support not available");
return 0;
}
int status = hb_ocl->clEnqueueUnmapMemObject(gpu_env.command_queue, mem,
addr, 0, NULL, &event);
if (status == CL_SUCCESS)
hb_ocl->clWaitForEvents(1, &event);
else
hb_log("hb_free_mapped_buffer: error %d", status);
return (status == CL_SUCCESS) ? 1 : 0;
}
void hb_opencl_init()
{
hb_get_opencl_env();
}
int hb_use_buffers()
{
return useBuffers;
}
int hb_copy_buffer(cl_mem src_buffer,cl_mem dst_buffer,size_t src_offset,size_t dst_offset,size_t cb)
{
if (hb_ocl == NULL)
{
hb_error("hb_copy_buffer: OpenCL support not available");
return 0;
}
int status = hb_ocl->clEnqueueCopyBuffer(gpu_env.command_queue,
src_buffer, dst_buffer,
src_offset, dst_offset,
cb, 0, 0, 0);
if( status != CL_SUCCESS )
{
av_log(NULL,AV_LOG_ERROR, "hb_read_opencl_buffer error '%d'\n", status );
return 0;
}
return 1;
}
int hb_read_opencl_frame_buffer(cl_mem cl_inBuf,unsigned char *Ybuf,unsigned char *Ubuf,unsigned char *Vbuf,int linesize0,int linesize1,int linesize2,int height)
{
int chrH = -(-height >> 1);
unsigned char *temp = (unsigned char *)av_malloc(sizeof(uint8_t) * (linesize0 * height + linesize1 * chrH * 2));
if(hb_read_opencl_buffer(cl_inBuf,temp,sizeof(uint8_t)*(linesize0 + linesize1)*height))
{
memcpy(Ybuf,temp,linesize0 * height);
memcpy(Ubuf,temp + linesize0 * height,linesize1 *chrH);
memcpy(Vbuf,temp + linesize0 * height + linesize1 * chrH,linesize2 * chrH);
}
av_free(temp);
return 1;
}
int hb_write_opencl_frame_buffer(cl_mem cl_inBuf,unsigned char *Ybuf,unsigned char *Ubuf,unsigned char *Vbuf,int linesize0,int linesize1,int linesize2,int height,int offset)
{
if (hb_ocl == NULL)
{
hb_error("hb_write_opencl_frame_buffer: OpenCL support not available");
return 0;
}
void *mapped = hb_ocl->clEnqueueMapBuffer(gpu_env.command_queue, cl_inBuf,
CL_TRUE,CL_MAP_WRITE, 0,
sizeof(uint8_t) * (linesize0 + linesize1) * height + offset,
0, NULL, NULL, NULL);
uint8_t *temp = (uint8_t *)mapped;
temp += offset;
memcpy(temp,Ybuf,sizeof(uint8_t) * linesize0 * height);
memcpy(temp + sizeof(uint8_t) * linesize0 * height,Ubuf,sizeof(uint8_t) * linesize1 * height/2);
memcpy(temp + sizeof(uint8_t) * (linesize0 * height + linesize1 * height/2),Vbuf,sizeof(uint8_t) * linesize2 * height/2);
hb_ocl->clEnqueueUnmapMemObject(gpu_env.command_queue, cl_inBuf, mapped, 0, NULL, NULL);
return 1;
}
cl_command_queue hb_get_command_queue()
{
return gpu_env.command_queue;
}
cl_context hb_get_context()
{
return gpu_env.context;
}
HandBrake-0.10.2/libhb/colormap.h 0000664 0001752 0001752 00000001211 12463330511 017075 0 ustar handbrake handbrake /* colormap.h
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_COLORMAP_H
#define HB_COLORMAP_H
#define HB_RGB_TO_BGR(c) (((c & 0xff0000) >> 16) | \
((c & 0x00ff00) ) | \
((c & 0x0000ff) << 16))
#define HB_BGR_TO_RGB(c) HB_RGB_TO_BGR(c)
uint32_t hb_rgb_lookup_by_name(const char *color);
#endif // HB_COLORMAP_H
HandBrake-0.10.2/libhb/decomb.c 0000664 0001752 0001752 00000254434 12533617362 016540 0 ustar handbrake handbrake /* decomb.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
The yadif algorithm was created by Michael Niedermayer.
Tritical's work inspired much of the comb detection code:
http://web.missouri.edu/~kes25c/
*/
/*****
Parameters:
Mode : Spatial metric : Motion thresh : Spatial thresh : Mask Filter Mode :
Block thresh : Block width : Block height
Appended for EEDI2:
Magnitude thresh : Variance thresh : Laplacian thresh : Dilation thresh :
Erosion thresh : Noise thresh : Max search distance : Post-processing
Plus:
Parity
Defaults:
391:2:3:3:2:40:16:16:10:20:20:4:2:50:24:1:-1
Original "Faster" settings:
7:2:6:9:1:80:16:16:10:20:20:4:2:50:24:1:-1
*****/
#define MODE_YADIF 1 // Use yadif
#define MODE_BLEND 2 // Use blending interpolation
#define MODE_CUBIC 4 // Use cubic interpolation
#define MODE_EEDI2 8 // Use EEDI2 interpolation
#define MODE_MASK 32 // Output combing masks instead of pictures
#define MODE_BOB 64 // Deinterlace each field to a separate frame
#define MODE_GAMMA 128 // Scale gamma when decombing
#define MODE_FILTER 256 // Filter combing mask
#define MODE_COMPOSITE 512 // Overlay combing mask onto picture
#define FILTER_CLASSIC 1
#define FILTER_ERODE_DILATE 2
/*****
These modes can be layered. For example, Yadif (1) + EEDI2 (8) = 9,
which will feed EEDI2 interpolations to yadif.
** Working combos:
1: Just yadif
2: Just blend
3: Switch between yadif and blend
4: Just cubic interpolate
5: Cubic->yadif
6: Switch between cubic and blend
7: Switch between cubic->yadif and blend
8: Just EEDI2 interpolate
9: EEDI2->yadif
10: Switch between EEDI2 and blend
11: Switch between EEDI2->yadif and blend
...okay I'm getting bored now listing all these different modes
32: Passes through the combing mask for every combed frame (white for combed pixels, otherwise black)
33+: Overlay the combing mask for every combed frame on top of the filtered output (white for combed pixels)
12-15: EEDI2 will override cubic interpolation
*****/
#include "hb.h"
#include "hbffmpeg.h"
#include "eedi2.h"
#include "taskset.h"
#define PARITY_DEFAULT -1
#define ABS(a) ((a) > 0 ? (a) : (-(a)))
#define MIN3(a,b,c) MIN(MIN(a,b),c)
#define MAX3(a,b,c) MAX(MAX(a,b),c)
// Some names to correspond to the pv->eedi_half array's contents
#define SRCPF 0
#define MSKPF 1
#define TMPPF 2
#define DSTPF 3
// Some names to correspond to the pv->eedi_full array's contents
#define DST2PF 0
#define TMP2PF2 1
#define MSK2PF 2
#define TMP2PF 3
#define DST2MPF 4
struct yadif_arguments_s {
hb_buffer_t *dst;
int parity;
int tff;
int is_combed;
};
typedef struct yadif_arguments_s yadif_arguments_t;
typedef struct eedi2_thread_arg_s {
hb_filter_private_t *pv;
int plane;
} eedi2_thread_arg_t;
typedef struct decomb_thread_arg_s {
hb_filter_private_t *pv;
int segment;
int segment_start[3];
int segment_height[3];
} decomb_thread_arg_t;
typedef struct yadif_thread_arg_s {
hb_filter_private_t *pv;
int segment;
int segment_start[3];
int segment_height[3];
} yadif_thread_arg_t;
struct hb_filter_private_s
{
// Decomb parameters
int mode;
int filter_mode;
int spatial_metric;
int motion_threshold;
int spatial_threshold;
int block_threshold;
int block_width;
int block_height;
int * block_score;
int comb_check_complete;
int comb_check_nthreads;
int skip_comb_check;
int is_combed;
float gamma_lut[256];
// EEDI2 parameters
int magnitude_threshold;
int variance_threshold;
int laplacian_threshold;
int dilation_threshold;
int erosion_threshold;
int noise_threshold;
int maximum_search_distance;
int post_processing;
int parity;
int tff;
int yadif_ready;
int deinterlaced_frames;
int blended_frames;
int unfiltered_frames;
hb_buffer_t * ref[3];
/* Make buffers to store a comb masks. */
hb_buffer_t * mask;
hb_buffer_t * mask_filtered;
hb_buffer_t * mask_temp;
int mask_box_x;
int mask_box_y;
uint8_t mask_box_color;
hb_buffer_t * eedi_half[4];
hb_buffer_t * eedi_full[5];
int * cx2;
int * cy2;
int * cxy;
int * tmpc;
int cpu_count;
int segment_height[3];
taskset_t yadif_taskset; // Threads for Yadif - one per CPU
yadif_arguments_t *yadif_arguments; // Arguments to thread for work
taskset_t decomb_filter_taskset; // Threads for comb detection
taskset_t decomb_check_taskset; // Threads for comb check
taskset_t mask_filter_taskset; // Threads for decomb mask filter
taskset_t mask_erode_taskset; // Threads for decomb mask erode
taskset_t mask_dilate_taskset; // Threads for decomb mask dilate
taskset_t eedi2_taskset; // Threads for eedi2 - one per plane
};
typedef struct
{
int tap[5];
int normalize;
} filter_param_t;
static int hb_decomb_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_decomb_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_decomb_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_decomb =
{
.id = HB_FILTER_DECOMB,
.enforce_order = 1,
.name = "Decomb",
.settings = NULL,
.init = hb_decomb_init,
.work = hb_decomb_work,
.close = hb_decomb_close,
};
// Borrowed from libav
#define times4(x) x, x, x, x
#define times1024(x) times4(times4(times4(times4(times4(x)))))
static const uint8_t hb_crop_table[256 + 2 * 1024] = {
times1024(0x00),
0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F,
0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,
0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F,
0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,0x3E,0x3F,
0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x4B,0x4C,0x4D,0x4E,0x4F,
0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5A,0x5B,0x5C,0x5D,0x5E,0x5F,
0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,
0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x7B,0x7C,0x7D,0x7E,0x7F,
0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8A,0x8B,0x8C,0x8D,0x8E,0x8F,
0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9A,0x9B,0x9C,0x9D,0x9E,0x9F,
0xA0,0xA1,0xA2,0xA3,0xA4,0xA5,0xA6,0xA7,0xA8,0xA9,0xAA,0xAB,0xAC,0xAD,0xAE,0xAF,
0xB0,0xB1,0xB2,0xB3,0xB4,0xB5,0xB6,0xB7,0xB8,0xB9,0xBA,0xBB,0xBC,0xBD,0xBE,0xBF,
0xC0,0xC1,0xC2,0xC3,0xC4,0xC5,0xC6,0xC7,0xC8,0xC9,0xCA,0xCB,0xCC,0xCD,0xCE,0xCF,
0xD0,0xD1,0xD2,0xD3,0xD4,0xD5,0xD6,0xD7,0xD8,0xD9,0xDA,0xDB,0xDC,0xDD,0xDE,0xDF,
0xE0,0xE1,0xE2,0xE3,0xE4,0xE5,0xE6,0xE7,0xE8,0xE9,0xEA,0xEB,0xEC,0xED,0xEE,0xEF,
0xF0,0xF1,0xF2,0xF3,0xF4,0xF5,0xF6,0xF7,0xF8,0xF9,0xFA,0xFB,0xFC,0xFD,0xFE,0xFF,
times1024(0xFF)
};
static inline int cubic_interpolate_pixel( int y0, int y1, int y2, int y3 )
{
/* From http://www.neuron2.net/library/cubicinterp.html */
int result = ( y0 * -3 ) + ( y1 * 23 ) + ( y2 * 23 ) + ( y3 * -3 );
result = hb_crop_table[(result / 40) + 1024];
return result;
}
static void cubic_interpolate_line(
uint8_t *dst,
uint8_t *cur,
int width,
int height,
int stride,
int y)
{
int w = width;
int x;
for( x = 0; x < w; x++)
{
int a, b, c, d;
a = b = c = d = 0;
if( y >= 3 )
{
/* Normal top*/
a = cur[-3*stride];
b = cur[-stride];
}
else if( y == 2 || y == 1 )
{
/* There's only one sample above this pixel, use it twice. */
a = cur[-stride];
b = cur[-stride];
}
else if( y == 0 )
{
/* No samples above, triple up on the one below. */
a = cur[+stride];
b = cur[+stride];
}
if( y <= ( height - 4 ) )
{
/* Normal bottom*/
c = cur[+stride];
d = cur[3*stride];
}
else if( y == ( height - 3 ) || y == ( height - 2 ) )
{
/* There's only one sample below, use it twice. */
c = cur[+stride];
d = cur[+stride];
}
else if( y == height - 1)
{
/* No samples below, triple up on the one above. */
c = cur[-stride];
d = cur[-stride];
}
dst[0] = cubic_interpolate_pixel( a, b, c, d );
dst++;
cur++;
}
}
static void draw_mask_box( hb_filter_private_t * pv )
{
int x = pv->mask_box_x;
int y = pv->mask_box_y;
int box_width = pv->block_width;
int box_height = pv->block_height;
int stride;
uint8_t * mskp;
if (pv->mode & MODE_FILTER)
{
mskp = pv->mask_filtered->plane[0].data;
stride = pv->mask_filtered->plane[0].stride;
}
else
{
mskp = pv->mask->plane[0].data;
stride = pv->mask->plane[0].stride;
}
int block_x, block_y;
for( block_x = 0; block_x < box_width; block_x++)
{
mskp[y*stride+x+block_x] = 128;
mskp[(y+box_height)*stride+x+block_x] = 128;
}
for( block_y = 0; block_y < box_height; block_y++)
{
mskp[stride*(y+block_y)+x] = 128;
mskp[stride*(y+block_y) + x + box_width] = 128;
}
}
static void apply_mask_line( uint8_t * srcp,
uint8_t * mskp,
int width )
{
int x;
for( x = 0; x < width; x++ )
{
if( mskp[x] == 1 )
{
srcp[x] = 255;
}
if( mskp[x] == 128 )
{
srcp[x] = 128;
}
}
}
static void apply_mask(hb_filter_private_t * pv, hb_buffer_t * b)
{
/* draw_boxes */
draw_mask_box( pv );
int pp, yy;
hb_buffer_t * m;
if (pv->mode & MODE_FILTER)
{
m = pv->mask_filtered;
}
else
{
m = pv->mask;
}
for (pp = 0; pp < 3; pp++)
{
uint8_t * dstp = b->plane[pp].data;
uint8_t * mskp = m->plane[pp].data;
for( yy = 0; yy < m->plane[pp].height; yy++ )
{
if (!(pv->mode & MODE_COMPOSITE) && pp == 0)
{
memcpy(dstp, mskp, m->plane[pp].width);
}
else if (!(pv->mode & MODE_COMPOSITE))
{
memset(dstp, 128, m->plane[pp].width);
}
if (pp == 0)
{
apply_mask_line(dstp, mskp, m->plane[pp].width);
}
dstp += b->plane[pp].stride;
mskp += m->plane[pp].stride;
}
}
}
static void store_ref(hb_filter_private_t * pv, hb_buffer_t * b)
{
hb_buffer_close(&pv->ref[0]);
memmove(&pv->ref[0], &pv->ref[1], sizeof(hb_buffer_t *) * 2 );
pv->ref[2] = b;
}
static inline int blend_filter_pixel(filter_param_t *filter, int up2, int up1, int current, int down1, int down2)
{
/* Low-pass 5-tap filter */
int result = 0;
result += up2 * filter->tap[0];
result += up1 * filter->tap[1];
result += current * filter->tap[2];
result += down1 * filter->tap[3];
result += down2 * filter->tap[4];
result >>= filter->normalize;
result = hb_crop_table[result + 1024];
return result;
}
static void blend_filter_line(filter_param_t *filter,
uint8_t *dst,
uint8_t *cur,
int width,
int height,
int stride,
int y)
{
int w = width;
int x;
int up1, up2, down1, down2;
if (y > 1 && y < (height - 2))
{
up1 = -1 * stride;
up2 = -2 * stride;
down1 = 1 * stride;
down2 = 2 * stride;
}
else if (y == 0)
{
/* First line, so A and B don't exist.*/
up1 = up2 = 0;
down1 = 1 * stride;
down2 = 2 * stride;
}
else if (y == 1)
{
/* Second line, no A. */
up1 = up2 = -1 * stride;
down1 = 1 * stride;
down2 = 2 * stride;
}
else if (y == (height - 2))
{
/* Second to last line, no E. */
up1 = -1 * stride;
up2 = -2 * stride;
down1 = down2 = 1 * stride;
}
else if (y == (height -1))
{
/* Last line, no D or E. */
up1 = -1 * stride;
up2 = -2 * stride;
down1 = down2 = 0;
}
else
{
hb_error("Invalid value y %d heigh %d", y, height);
return;
}
for( x = 0; x < w; x++)
{
/* Low-pass 5-tap filter */
dst[0] = blend_filter_pixel(filter, cur[up2], cur[up1], cur[0],
cur[down1], cur[down2] );
dst++;
cur++;
}
}
static void reset_combing_results( hb_filter_private_t * pv )
{
pv->comb_check_complete = 0;
int ii;
for (ii = 0; ii < pv->comb_check_nthreads; ii++)
{
pv->block_score[ii] = 0;
}
}
static int check_combing_results( hb_filter_private_t * pv )
{
int threshold = pv->block_threshold;
int send_to_blend = 0;
int ii;
for (ii = 0; ii < pv->comb_check_nthreads; ii++)
{
if( pv->block_score[ii] >= ( threshold / 2 ) )
{
if (pv->block_score[ii] <= threshold)
{
/* Blend video content that scores between
( threshold / 2 ) and threshold. */
send_to_blend = 1;
pv->mask_box_color = 2;
}
else if( pv->block_score[ii] > threshold )
{
/* Yadif deinterlace video content above the threshold. */
pv->mask_box_color = 1;
return 1;
}
}
}
if( send_to_blend )
{
return 2;
}
else
{
/* Consider this frame to be uncombed. */
return 0;
}
}
static void check_filtered_combing_mask( hb_filter_private_t * pv, int segment, int start, int stop )
{
/* Go through the mask in X*Y blocks. If any of these windows
have threshold or more combed pixels, consider the whole
frame to be combed and send it on to be deinterlaced. */
/* Block mask threshold -- The number of pixels
in a block_width * block_height window of
he mask that need to show combing for the
whole frame to be seen as such. */
int threshold = pv->block_threshold;
int block_width = pv->block_width;
int block_height = pv->block_height;
int block_x, block_y;
int block_score = 0;
uint8_t * mask_p;
int x, y, pp;
for( pp = 0; pp < 1; pp++ )
{
int stride = pv->mask_filtered->plane[pp].stride;
int width = pv->mask_filtered->plane[pp].width;
pv->mask_box_x = -1;
pv->mask_box_y = -1;
pv->mask_box_color = 0;
for( y = start; y < ( stop - block_height + 1 ); y = y + block_height )
{
for( x = 0; x < ( width - block_width ); x = x + block_width )
{
block_score = 0;
for( block_y = 0; block_y < block_height; block_y++ )
{
int my = y + block_y;
mask_p = &pv->mask_filtered->plane[pp].data[my*stride + x];
for( block_x = 0; block_x < block_width; block_x++ )
{
block_score += mask_p[0];
mask_p++;
}
}
if (pv->comb_check_complete)
{
// Some other thread found coming before this one
return;
}
if( block_score >= ( threshold / 2 ) )
{
pv->mask_box_x = x;
pv->mask_box_y = y;
pv->block_score[segment] = block_score;
if( block_score > threshold )
{
pv->comb_check_complete = 1;
return;
}
}
}
}
}
}
static void check_combing_mask( hb_filter_private_t * pv, int segment, int start, int stop )
{
/* Go through the mask in X*Y blocks. If any of these windows
have threshold or more combed pixels, consider the whole
frame to be combed and send it on to be deinterlaced. */
/* Block mask threshold -- The number of pixels
in a block_width * block_height window of
he mask that need to show combing for the
whole frame to be seen as such. */
int threshold = pv->block_threshold;
int block_width = pv->block_width;
int block_height = pv->block_height;
int block_x, block_y;
int block_score = 0;
uint8_t * mask_p;
int x, y, pp;
for( pp = 0; pp < 1; pp++ )
{
int stride = pv->mask->plane[pp].stride;
int width = pv->mask->plane[pp].width;
for (y = start; y < (stop - block_height + 1); y = y + block_height)
{
for (x = 0; x < (width - block_width); x = x + block_width)
{
block_score = 0;
for( block_y = 0; block_y < block_height; block_y++ )
{
int mask_y = y + block_y;
mask_p = &pv->mask->plane[pp].data[mask_y * stride + x];
for( block_x = 0; block_x < block_width; block_x++ )
{
/* We only want to mark a pixel in a block as combed
if the adjacent pixels are as well. Got to
handle the sides separately. */
if( (x + block_x) == 0 )
{
block_score += mask_p[0] & mask_p[1];
}
else if( (x + block_x) == (width -1) )
{
block_score += mask_p[-1] & mask_p[0];
}
else
{
block_score += mask_p[-1] & mask_p[0] & mask_p[1];
}
mask_p++;
}
}
if (pv->comb_check_complete)
{
// Some other thread found coming before this one
return;
}
if( block_score >= ( threshold / 2 ) )
{
pv->mask_box_x = x;
pv->mask_box_y = y;
pv->block_score[segment] = block_score;
if( block_score > threshold )
{
pv->comb_check_complete = 1;
return;
}
}
}
}
}
}
static void build_gamma_lut( hb_filter_private_t * pv )
{
int i;
for( i = 0; i < 256; i++ )
{
pv->gamma_lut[i] = pow( ( (float)i / (float)255 ), 2.2f );
}
}
static void detect_gamma_combed_segment( hb_filter_private_t * pv, int segment_start, int segment_stop )
{
/* A mish-mash of various comb detection tricks
picked up from neuron2's Decomb plugin for
AviSynth and tritical's IsCombedT and
IsCombedTIVTC plugins. */
/* Comb scoring algorithm */
/* Motion threshold */
float mthresh = (float)pv->motion_threshold / (float)255;
/* Spatial threshold */
float athresh = (float)pv->spatial_threshold / (float)255;
float athresh6 = 6 *athresh;
/* One pas for Y, one pass for U, one pass for V */
int pp;
for( pp = 0; pp < 1; pp++ )
{
int x, y;
int stride = pv->ref[0]->plane[pp].stride;
int width = pv->ref[0]->plane[pp].width;
int height = pv->ref[0]->plane[pp].height;
/* Comb detection has to start at y = 2 and end at
y = height - 2, because it needs to examine
2 pixels above and 2 below the current pixel. */
if( segment_start < 2 )
segment_start = 2;
if( segment_stop > height - 2 )
segment_stop = height - 2;
for( y = segment_start; y < segment_stop; y++ )
{
/* These are just to make the buffer locations easier to read. */
int up_2 = -2 * stride ;
int up_1 = -1 * stride;
int down_1 = stride;
int down_2 = 2 * stride;
/* We need to examine a column of 5 pixels
in the prev, cur, and next frames. */
uint8_t * prev = &pv->ref[0]->plane[pp].data[y * stride];
uint8_t * cur = &pv->ref[1]->plane[pp].data[y * stride];
uint8_t * next = &pv->ref[2]->plane[pp].data[y * stride];
uint8_t * mask = &pv->mask->plane[pp].data[y * stride];
memset(mask, 0, stride);
for( x = 0; x < width; x++ )
{
float up_diff, down_diff;
up_diff = pv->gamma_lut[cur[0]] - pv->gamma_lut[cur[up_1]];
down_diff = pv->gamma_lut[cur[0]] - pv->gamma_lut[cur[down_1]];
if( ( up_diff > athresh && down_diff > athresh ) ||
( up_diff < -athresh && down_diff < -athresh ) )
{
/* The pixel above and below are different,
and they change in the same "direction" too.*/
int motion = 0;
if( mthresh > 0 )
{
/* Make sure there's sufficient motion between frame t-1 to frame t+1. */
if( fabs( pv->gamma_lut[prev[0]] - pv->gamma_lut[cur[0]] ) > mthresh &&
fabs( pv->gamma_lut[cur[up_1]] - pv->gamma_lut[next[up_1]] ) > mthresh &&
fabs( pv->gamma_lut[cur[down_1]] - pv->gamma_lut[next[down_1]] ) > mthresh )
motion++;
if( fabs( pv->gamma_lut[next[0]] - pv->gamma_lut[cur[0]] ) > mthresh &&
fabs( pv->gamma_lut[prev[up_1]] - pv->gamma_lut[cur[up_1]] ) > mthresh &&
fabs( pv->gamma_lut[prev[down_1]] - pv->gamma_lut[cur[down_1]] ) > mthresh )
motion++;
}
else
{
/* User doesn't want to check for motion,
so move on to the spatial check. */
motion = 1;
}
if( motion || ( pv->deinterlaced_frames==0 && pv->blended_frames==0 && pv->unfiltered_frames==0) )
{
/* Tritical's noise-resistant combing scorer.
The check is done on a bob+blur convolution. */
float combing = fabs( pv->gamma_lut[cur[up_2]]
+ ( 4 * pv->gamma_lut[cur[0]] )
+ pv->gamma_lut[cur[down_2]]
- ( 3 * ( pv->gamma_lut[cur[up_1]]
+ pv->gamma_lut[cur[down_1]] ) ) );
/* If the frame is sufficiently combed,
then mark it down on the mask as 1. */
if( combing > athresh6 )
{
mask[0] = 1;
}
}
}
cur++;
prev++;
next++;
mask++;
}
}
}
}
static void detect_combed_segment( hb_filter_private_t * pv, int segment_start, int segment_stop )
{
/* A mish-mash of various comb detection tricks
picked up from neuron2's Decomb plugin for
AviSynth and tritical's IsCombedT and
IsCombedTIVTC plugins. */
/* Comb scoring algorithm */
int spatial_metric = pv->spatial_metric;
/* Motion threshold */
int mthresh = pv->motion_threshold;
/* Spatial threshold */
int athresh = pv->spatial_threshold;
int athresh_squared = athresh * athresh;
int athresh6 = 6 * athresh;
/* One pas for Y, one pass for U, one pass for V */
int pp;
for( pp = 0; pp < 1; pp++ )
{
int x, y;
int stride = pv->ref[0]->plane[pp].stride;
int width = pv->ref[0]->plane[pp].width;
int height = pv->ref[0]->plane[pp].height;
/* Comb detection has to start at y = 2 and end at
y = height - 2, because it needs to examine
2 pixels above and 2 below the current pixel. */
if( segment_start < 2 )
segment_start = 2;
if( segment_stop > height - 2 )
segment_stop = height - 2;
for( y = segment_start; y < segment_stop; y++ )
{
/* These are just to make the buffer locations easier to read. */
int up_2 = -2 * stride ;
int up_1 = -1 * stride;
int down_1 = stride;
int down_2 = 2 * stride;
/* We need to examine a column of 5 pixels
in the prev, cur, and next frames. */
uint8_t * prev = &pv->ref[0]->plane[pp].data[y * stride];
uint8_t * cur = &pv->ref[1]->plane[pp].data[y * stride];
uint8_t * next = &pv->ref[2]->plane[pp].data[y * stride];
uint8_t * mask = &pv->mask->plane[pp].data[y * stride];
memset(mask, 0, stride);
for( x = 0; x < width; x++ )
{
int up_diff = cur[0] - cur[up_1];
int down_diff = cur[0] - cur[down_1];
if( ( up_diff > athresh && down_diff > athresh ) ||
( up_diff < -athresh && down_diff < -athresh ) )
{
/* The pixel above and below are different,
and they change in the same "direction" too.*/
int motion = 0;
if( mthresh > 0 )
{
/* Make sure there's sufficient motion between frame t-1 to frame t+1. */
if( abs( prev[0] - cur[0] ) > mthresh &&
abs( cur[up_1] - next[up_1] ) > mthresh &&
abs( cur[down_1] - next[down_1] ) > mthresh )
motion++;
if( abs( next[0] - cur[0] ) > mthresh &&
abs( prev[up_1] - cur[up_1] ) > mthresh &&
abs( prev[down_1] - cur[down_1] ) > mthresh )
motion++;
}
else
{
/* User doesn't want to check for motion,
so move on to the spatial check. */
motion = 1;
}
if( motion || ( pv->deinterlaced_frames==0 && pv->blended_frames==0 && pv->unfiltered_frames==0) )
{
/* That means it's time for the spatial check.
We've got several options here. */
if( spatial_metric == 0 )
{
/* Simple 32detect style comb detection */
if( ( abs( cur[0] - cur[down_2] ) < 10 ) &&
( abs( cur[0] - cur[down_1] ) > 15 ) )
{
mask[0] = 1;
}
}
else if( spatial_metric == 1 )
{
/* This, for comparison, is what IsCombed uses.
It's better, but still noise senstive. */
int combing = ( cur[up_1] - cur[0] ) *
( cur[down_1] - cur[0] );
if( combing > athresh_squared )
{
mask[0] = 1;
}
}
else if( spatial_metric == 2 )
{
/* Tritical's noise-resistant combing scorer.
The check is done on a bob+blur convolution. */
int combing = abs( cur[up_2]
+ ( 4 * cur[0] )
+ cur[down_2]
- ( 3 * ( cur[up_1]
+ cur[down_1] ) ) );
/* If the frame is sufficiently combed,
then mark it down on the mask as 1. */
if( combing > athresh6 )
{
mask[0] = 1;
}
}
}
}
cur++;
prev++;
next++;
mask++;
}
}
}
}
// This function calls all the eedi2 filters in sequence for a given plane.
// It outputs the final interpolated image to pv->eedi_full[DST2PF].
static void eedi2_interpolate_plane( hb_filter_private_t * pv, int plane )
{
/* We need all these pointers. No, seriously.
I swear. It's not a joke. They're used.
All nine of them. */
uint8_t * mskp = pv->eedi_half[MSKPF]->plane[plane].data;
uint8_t * srcp = pv->eedi_half[SRCPF]->plane[plane].data;
uint8_t * tmpp = pv->eedi_half[TMPPF]->plane[plane].data;
uint8_t * dstp = pv->eedi_half[DSTPF]->plane[plane].data;
uint8_t * dst2p = pv->eedi_full[DST2PF]->plane[plane].data;
uint8_t * tmp2p2 = pv->eedi_full[TMP2PF2]->plane[plane].data;
uint8_t * msk2p = pv->eedi_full[MSK2PF]->plane[plane].data;
uint8_t * tmp2p = pv->eedi_full[TMP2PF]->plane[plane].data;
uint8_t * dst2mp = pv->eedi_full[DST2MPF]->plane[plane].data;
int * cx2 = pv->cx2;
int * cy2 = pv->cy2;
int * cxy = pv->cxy;
int * tmpc = pv->tmpc;
int pitch = pv->eedi_full[0]->plane[plane].stride;
int height = pv->eedi_full[0]->plane[plane].height;
int width = pv->eedi_full[0]->plane[plane].width;
int half_height = pv->eedi_half[0]->plane[plane].height;
// edge mask
eedi2_build_edge_mask( mskp, pitch, srcp, pitch,
pv->magnitude_threshold, pv->variance_threshold, pv->laplacian_threshold,
half_height, width );
eedi2_erode_edge_mask( mskp, pitch, tmpp, pitch, pv->erosion_threshold, half_height, width );
eedi2_dilate_edge_mask( tmpp, pitch, mskp, pitch, pv->dilation_threshold, half_height, width );
eedi2_erode_edge_mask( mskp, pitch, tmpp, pitch, pv->erosion_threshold, half_height, width );
eedi2_remove_small_gaps( tmpp, pitch, mskp, pitch, half_height, width );
// direction mask
eedi2_calc_directions( plane, mskp, pitch, srcp, pitch, tmpp, pitch,
pv->maximum_search_distance, pv->noise_threshold,
half_height, width );
eedi2_filter_dir_map( mskp, pitch, tmpp, pitch, dstp, pitch, half_height, width );
eedi2_expand_dir_map( mskp, pitch, dstp, pitch, tmpp, pitch, half_height, width );
eedi2_filter_map( mskp, pitch, tmpp, pitch, dstp, pitch, half_height, width );
// upscale 2x vertically
eedi2_upscale_by_2( srcp, dst2p, half_height, pitch );
eedi2_upscale_by_2( dstp, tmp2p2, half_height, pitch );
eedi2_upscale_by_2( mskp, msk2p, half_height, pitch );
// upscale the direction mask
eedi2_mark_directions_2x( msk2p, pitch, tmp2p2, pitch, tmp2p, pitch, pv->tff, height, width );
eedi2_filter_dir_map_2x( msk2p, pitch, tmp2p, pitch, dst2mp, pitch, pv->tff, height, width );
eedi2_expand_dir_map_2x( msk2p, pitch, dst2mp, pitch, tmp2p, pitch, pv->tff, height, width );
eedi2_fill_gaps_2x( msk2p, pitch, tmp2p, pitch, dst2mp, pitch, pv->tff, height, width );
eedi2_fill_gaps_2x( msk2p, pitch, dst2mp, pitch, tmp2p, pitch, pv->tff, height, width );
// interpolate a full-size plane
eedi2_interpolate_lattice( plane, tmp2p, pitch, dst2p, pitch, tmp2p2, pitch, pv->tff,
pv->noise_threshold, height, width );
if( pv->post_processing == 1 || pv->post_processing == 3 )
{
// make sure the edge directions are consistent
eedi2_bit_blit( tmp2p2, pitch, tmp2p, pitch, width, height );
eedi2_filter_dir_map_2x( msk2p, pitch, tmp2p, pitch, dst2mp, pitch, pv->tff, height, width );
eedi2_expand_dir_map_2x( msk2p, pitch, dst2mp, pitch, tmp2p, pitch, pv->tff, height, width );
eedi2_post_process( tmp2p, pitch, tmp2p2, pitch, dst2p, pitch, pv->tff, height, width );
}
if( pv->post_processing == 2 || pv->post_processing == 3 )
{
// filter junctions and corners
eedi2_gaussian_blur1( srcp, pitch, tmpp, pitch, srcp, pitch, half_height, width );
eedi2_calc_derivatives( srcp, pitch, half_height, width, cx2, cy2, cxy );
eedi2_gaussian_blur_sqrt2( cx2, tmpc, cx2, pitch, half_height, width);
eedi2_gaussian_blur_sqrt2( cy2, tmpc, cy2, pitch, half_height, width);
eedi2_gaussian_blur_sqrt2( cxy, tmpc, cxy, pitch, half_height, width);
eedi2_post_process_corner( cx2, cy2, cxy, pitch, tmp2p2, pitch, dst2p, pitch, height, width, pv->tff );
}
}
/*
* eedi2 interpolate this plane in a single thread.
*/
static void eedi2_filter_thread( void *thread_args_v )
{
hb_filter_private_t * pv;
int plane;
eedi2_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
plane = thread_args->plane;
hb_log("eedi2 thread started for plane %d", plane);
while (1)
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->eedi2_taskset, plane );
if( taskset_thread_stop( &pv->eedi2_taskset, plane ) )
{
/*
* No more work to do, exit this thread.
*/
break;
}
/*
* Process plane
*/
eedi2_interpolate_plane( pv, plane );
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->eedi2_taskset, plane );
}
taskset_thread_complete( &pv->eedi2_taskset, plane );
}
// Sets up the input field planes for EEDI2 in pv->eedi_half[SRCPF]
// and then runs eedi2_filter_thread for each plane.
static void eedi2_planer( hb_filter_private_t * pv )
{
/* Copy the first field from the source to a half-height frame. */
int pp;
for( pp = 0; pp < 3; pp++ )
{
int pitch = pv->ref[1]->plane[pp].stride;
int height = pv->ref[1]->plane[pp].height;
int start_line = !pv->tff;
eedi2_fill_half_height_buffer_plane(
&pv->ref[1]->plane[pp].data[pitch * start_line],
pv->eedi_half[SRCPF]->plane[pp].data, pitch, height );
}
/*
* Now that all data is ready for our threads, fire them off
* and wait for their completion.
*/
taskset_cycle( &pv->eedi2_taskset );
}
static void mask_dilate_thread( void *thread_args_v )
{
hb_filter_private_t * pv;
int segment, segment_start, segment_stop;
decomb_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("mask dilate thread started for segment %d", segment);
while (1)
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->mask_dilate_taskset, segment );
if (taskset_thread_stop(&pv->mask_dilate_taskset, segment))
{
/*
* No more work to do, exit this thread.
*/
break;
}
int xx, yy, pp;
int count;
int dilation_threshold = 4;
for( pp = 0; pp < 1; pp++ )
{
int width = pv->mask_filtered->plane[pp].width;
int height = pv->mask_filtered->plane[pp].height;
int stride = pv->mask_filtered->plane[pp].stride;
int start, stop, p, c, n;
segment_start = thread_args->segment_start[pp];
segment_stop = segment_start + thread_args->segment_height[pp];
if (segment_start == 0)
{
start = 1;
p = 0;
c = 1;
n = 2;
}
else
{
start = segment_start;
p = segment_start - 1;
c = segment_start;
n = segment_start + 1;
}
if (segment_stop == height)
{
stop = height -1;
}
else
{
stop = segment_stop;
}
uint8_t *curp = &pv->mask_filtered->plane[pp].data[p * stride + 1];
uint8_t *cur = &pv->mask_filtered->plane[pp].data[c * stride + 1];
uint8_t *curn = &pv->mask_filtered->plane[pp].data[n * stride + 1];
uint8_t *dst = &pv->mask_temp->plane[pp].data[c * stride + 1];
for( yy = start; yy < stop; yy++ )
{
for( xx = 1; xx < width - 1; xx++ )
{
if (cur[xx])
{
dst[xx] = 1;
continue;
}
count = curp[xx-1] + curp[xx] + curp[xx+1] +
cur [xx-1] + cur [xx+1] +
curn[xx-1] + curn[xx] + curn[xx+1];
dst[xx] = count >= dilation_threshold;
}
curp += stride;
cur += stride;
curn += stride;
dst += stride;
}
}
taskset_thread_complete( &pv->mask_dilate_taskset, segment );
}
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->mask_dilate_taskset, segment );
}
static void mask_erode_thread( void *thread_args_v )
{
hb_filter_private_t * pv;
int segment, segment_start, segment_stop;
decomb_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("mask erode thread started for segment %d", segment);
while (1)
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->mask_erode_taskset, segment );
if( taskset_thread_stop( &pv->mask_erode_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
break;
}
int xx, yy, pp;
int count;
int erosion_threshold = 2;
for( pp = 0; pp < 1; pp++ )
{
int width = pv->mask_filtered->plane[pp].width;
int height = pv->mask_filtered->plane[pp].height;
int stride = pv->mask_filtered->plane[pp].stride;
int start, stop, p, c, n;
segment_start = thread_args->segment_start[pp];
segment_stop = segment_start + thread_args->segment_height[pp];
if (segment_start == 0)
{
start = 1;
p = 0;
c = 1;
n = 2;
}
else
{
start = segment_start;
p = segment_start - 1;
c = segment_start;
n = segment_start + 1;
}
if (segment_stop == height)
{
stop = height -1;
}
else
{
stop = segment_stop;
}
uint8_t *curp = &pv->mask_temp->plane[pp].data[p * stride + 1];
uint8_t *cur = &pv->mask_temp->plane[pp].data[c * stride + 1];
uint8_t *curn = &pv->mask_temp->plane[pp].data[n * stride + 1];
uint8_t *dst = &pv->mask_filtered->plane[pp].data[c * stride + 1];
for( yy = start; yy < stop; yy++ )
{
for( xx = 1; xx < width - 1; xx++ )
{
if( cur[xx] == 0 )
{
dst[xx] = 0;
continue;
}
count = curp[xx-1] + curp[xx] + curp[xx+1] +
cur [xx-1] + cur [xx+1] +
curn[xx-1] + curn[xx] + curn[xx+1];
dst[xx] = count >= erosion_threshold;
}
curp += stride;
cur += stride;
curn += stride;
dst += stride;
}
}
taskset_thread_complete( &pv->mask_erode_taskset, segment );
}
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->mask_erode_taskset, segment );
}
static void mask_filter_thread( void *thread_args_v )
{
hb_filter_private_t * pv;
int segment, segment_start, segment_stop;
decomb_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("mask filter thread started for segment %d", segment);
while (1)
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->mask_filter_taskset, segment );
if( taskset_thread_stop( &pv->mask_filter_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
break;
}
int xx, yy, pp;
for( pp = 0; pp < 1; pp++ )
{
int width = pv->mask->plane[pp].width;
int height = pv->mask->plane[pp].height;
int stride = pv->mask->plane[pp].stride;
int start, stop, p, c, n;
segment_start = thread_args->segment_start[pp];
segment_stop = segment_start + thread_args->segment_height[pp];
if (segment_start == 0)
{
start = 1;
p = 0;
c = 1;
n = 2;
}
else
{
start = segment_start;
p = segment_start - 1;
c = segment_start;
n = segment_start + 1;
}
if (segment_stop == height)
{
stop = height - 1;
}
else
{
stop = segment_stop;
}
uint8_t *curp = &pv->mask->plane[pp].data[p * stride + 1];
uint8_t *cur = &pv->mask->plane[pp].data[c * stride + 1];
uint8_t *curn = &pv->mask->plane[pp].data[n * stride + 1];
uint8_t *dst = (pv->filter_mode == FILTER_CLASSIC ) ?
&pv->mask_filtered->plane[pp].data[c * stride + 1] :
&pv->mask_temp->plane[pp].data[c * stride + 1] ;
for( yy = start; yy < stop; yy++ )
{
for( xx = 1; xx < width - 1; xx++ )
{
int h_count, v_count;
h_count = cur[xx-1] & cur[xx] & cur[xx+1];
v_count = curp[xx] & cur[xx] & curn[xx];
if (pv->filter_mode == FILTER_CLASSIC)
{
dst[xx] = h_count;
}
else
{
dst[xx] = h_count & v_count;
}
}
curp += stride;
cur += stride;
curn += stride;
dst += stride;
}
}
taskset_thread_complete( &pv->mask_filter_taskset, segment );
}
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->mask_filter_taskset, segment );
}
static void decomb_check_thread( void *thread_args_v )
{
hb_filter_private_t * pv;
int segment, segment_start, segment_stop;
decomb_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("decomb check thread started for segment %d", segment);
while (1)
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->decomb_check_taskset, segment );
if( taskset_thread_stop( &pv->decomb_check_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
break;
}
segment_start = thread_args->segment_start[0];
segment_stop = segment_start + thread_args->segment_height[0];
if( pv->mode & MODE_FILTER )
{
check_filtered_combing_mask(pv, segment, segment_start, segment_stop);
}
else
{
check_combing_mask(pv, segment, segment_start, segment_stop);
}
taskset_thread_complete( &pv->decomb_check_taskset, segment );
}
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->decomb_check_taskset, segment );
}
/*
* comb detect this segment of all three planes in a single thread.
*/
static void decomb_filter_thread( void *thread_args_v )
{
hb_filter_private_t * pv;
int segment, segment_start, segment_stop;
decomb_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("decomb filter thread started for segment %d", segment);
while (1)
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->decomb_filter_taskset, segment );
if( taskset_thread_stop( &pv->decomb_filter_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
break;
}
/*
* Process segment (for now just from luma)
*/
int pp;
for( pp = 0; pp < 1; pp++)
{
segment_start = thread_args->segment_start[pp];
segment_stop = segment_start + thread_args->segment_height[pp];
if( pv->mode & MODE_GAMMA )
{
detect_gamma_combed_segment( pv, segment_start, segment_stop );
}
else
{
detect_combed_segment( pv, segment_start, segment_stop );
}
}
taskset_thread_complete( &pv->decomb_filter_taskset, segment );
}
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->decomb_filter_taskset, segment );
}
static int comb_segmenter( hb_filter_private_t * pv )
{
/*
* Now that all data for decomb detection is ready for
* our threads, fire them off and wait for their completion.
*/
taskset_cycle( &pv->decomb_filter_taskset );
if( pv->mode & MODE_FILTER )
{
taskset_cycle( &pv->mask_filter_taskset );
if( pv->filter_mode == FILTER_ERODE_DILATE )
{
taskset_cycle( &pv->mask_erode_taskset );
taskset_cycle( &pv->mask_dilate_taskset );
taskset_cycle( &pv->mask_erode_taskset );
}
//return check_filtered_combing_mask( pv );
}
else
{
//return check_combing_mask( pv );
}
reset_combing_results(pv);
taskset_cycle( &pv->decomb_check_taskset );
return check_combing_results(pv);
}
/* EDDI: Edge Directed Deinterlacing Interpolation
Checks 4 different slopes to see if there is more similarity along a diagonal
than there was vertically. If a diagonal is more similar, then it indicates
an edge, so interpolate along that instead of a vertical line, using either
linear or cubic interpolation depending on mode. */
#define YADIF_CHECK(j) {\
int score = ABS(cur[-stride-1+j] - cur[+stride-1-j])\
+ ABS(cur[-stride +j] - cur[+stride -j])\
+ ABS(cur[-stride+1+j] - cur[+stride+1-j]);\
if( score < spatial_score ){\
spatial_score = score;\
if( ( pv->mode & MODE_CUBIC ) && !vertical_edge )\
{\
switch(j)\
{\
case -1:\
spatial_pred = cubic_interpolate_pixel(cur[-3 * stride - 3], cur[-stride -1], cur[+stride + 1], cur[3* stride + 3] );\
break;\
case -2:\
spatial_pred = cubic_interpolate_pixel( ( ( cur[-3*stride - 4] + cur[-stride - 4] ) / 2 ) , cur[-stride -2], cur[+stride + 2], ( ( cur[3*stride + 4] + cur[stride + 4] ) / 2 ) );\
break;\
case 1:\
spatial_pred = cubic_interpolate_pixel(cur[-3 * stride +3], cur[-stride +1], cur[+stride - 1], cur[3* stride -3] );\
break;\
case 2:\
spatial_pred = cubic_interpolate_pixel(( ( cur[-3*stride + 4] + cur[-stride + 4] ) / 2 ), cur[-stride +2], cur[+stride - 2], ( ( cur[3*stride - 4] + cur[stride - 4] ) / 2 ) );\
break;\
}\
}\
else\
{\
spatial_pred = ( cur[-stride +j] + cur[+stride -j] ) >>1;\
}\
static void yadif_filter_line(
hb_filter_private_t * pv,
uint8_t * dst,
uint8_t * prev,
uint8_t * cur,
uint8_t * next,
int plane,
int width,
int height,
int stride,
int parity,
int y)
{
/* While prev and next point to the previous and next frames,
prev2 and next2 will shift depending on the parity, usually 1.
They are the previous and next fields, the fields temporally adjacent
to the other field in the current frame--the one not being filtered. */
uint8_t *prev2 = parity ? prev : cur ;
uint8_t *next2 = parity ? cur : next;
int x;
int eedi2_mode = ( pv->mode & MODE_EEDI2 );
/* We can replace spatial_pred with this interpolation*/
uint8_t * eedi2_guess = NULL;
if (eedi2_mode)
{
eedi2_guess = &pv->eedi_full[DST2PF]->plane[plane].data[y*stride];
}
/* Decomb's cubic interpolation can only function when there are
three samples above and below, so regress to yadif's traditional
two-tap interpolation when filtering at the top and bottom edges. */
int vertical_edge = 0;
if( ( y < 3 ) || ( y > ( height - 4 ) ) )
vertical_edge = 1;
for( x = 0; x < width; x++)
{
/* Pixel above*/
int c = cur[-stride];
/* Temporal average: the current location in the adjacent fields */
int d = (prev2[0] + next2[0])>>1;
/* Pixel below */
int e = cur[+stride];
/* How the current pixel changes between the adjacent fields */
int temporal_diff0 = ABS(prev2[0] - next2[0]);
/* The average of how much the pixels above and below change from the frame before to now. */
int temporal_diff1 = ( ABS(prev[-stride] - cur[-stride]) + ABS(prev[+stride] - cur[+stride]) ) >> 1;
/* The average of how much the pixels above and below change from now to the next frame. */
int temporal_diff2 = ( ABS(next[-stride] - cur[-stride]) + ABS(next[+stride] - cur[+stride]) ) >> 1;
/* For the actual difference, use the largest of the previous average diffs. */
int diff = MAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2);
int spatial_pred;
if( eedi2_mode )
{
/* Who needs yadif's spatial predictions when we can have EEDI2's? */
spatial_pred = eedi2_guess[0];
eedi2_guess++;
}
else // Yadif spatial interpolation
{
/* SAD of how the pixel-1, the pixel, and the pixel+1 change from the line above to below. */
int spatial_score = ABS(cur[-stride-1] - cur[+stride-1]) + ABS(cur[-stride]-cur[+stride]) +
ABS(cur[-stride+1] - cur[+stride+1]) - 1;
/* Spatial pred is either a bilinear or cubic vertical interpolation. */
if( ( pv->mode & MODE_CUBIC ) && !vertical_edge)
{
spatial_pred = cubic_interpolate_pixel( cur[-3*stride], cur[-stride], cur[+stride], cur[3*stride] );
}
else
{
spatial_pred = (c+e)>>1;
}
// YADIF_CHECK requires a margin to avoid invalid memory access.
// In MODE_CUBIC, margin needed is 2 + ABS(param).
// Else, the margin needed is 1 + ABS(param).
int margin = 2;
if (pv->mode & MODE_CUBIC)
margin = 3;
if (x >= margin && x <= width - (margin + 1))
{
YADIF_CHECK(-1)
if (x >= margin + 1 && x <= width - (margin + 2))
YADIF_CHECK(-2) }} }}
}
if (x >= margin && x <= width - (margin + 1))
{
YADIF_CHECK(1)
if (x >= margin + 1 && x <= width - (margin + 2))
YADIF_CHECK(2) }} }}
}
}
/* Temporally adjust the spatial prediction by
comparing against lines in the adjacent fields. */
int b = (prev2[-2*stride] + next2[-2*stride])>>1;
int f = (prev2[+2*stride] + next2[+2*stride])>>1;
/* Find the median value */
int max = MAX3(d-e, d-c, MIN(b-c, f-e));
int min = MIN3(d-e, d-c, MAX(b-c, f-e));
diff = MAX3( diff, min, -max );
if( spatial_pred > d + diff )
{
spatial_pred = d + diff;
}
else if( spatial_pred < d - diff )
{
spatial_pred = d - diff;
}
dst[0] = spatial_pred;
dst++;
cur++;
prev++;
next++;
prev2++;
next2++;
}
}
/*
* deinterlace this segment of all three planes in a single thread.
*/
static void yadif_decomb_filter_thread( void *thread_args_v )
{
yadif_arguments_t *yadif_work = NULL;
hb_filter_private_t * pv;
int segment, segment_start, segment_stop;
yadif_thread_arg_t *thread_args = thread_args_v;
filter_param_t filter;
filter.tap[0] = -1;
filter.tap[1] = 2;
filter.tap[2] = 6;
filter.tap[3] = 2;
filter.tap[4] = -1;
filter.normalize = 3;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("yadif thread started for segment %d", segment);
while (1)
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->yadif_taskset, segment );
if( taskset_thread_stop( &pv->yadif_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
break;
}
yadif_work = &pv->yadif_arguments[segment];
/*
* Process all three planes, but only this segment of it.
*/
hb_buffer_t *dst;
int parity, tff, is_combed;
is_combed = pv->yadif_arguments[segment].is_combed;
dst = yadif_work->dst;
tff = yadif_work->tff;
parity = yadif_work->parity;
int pp;
for (pp = 0; pp < 3; pp++)
{
int yy;
int width = dst->plane[pp].width;
int stride = dst->plane[pp].stride;
int height = dst->plane[pp].height_stride;
int penultimate = height - 2;
segment_start = thread_args->segment_start[pp];
segment_stop = segment_start + thread_args->segment_height[pp];
// Filter parity lines
int start = parity ? (segment_start + 1) & ~1 : segment_start | 1;
uint8_t *dst2 = &dst->plane[pp].data[start * stride];
uint8_t *prev = &pv->ref[0]->plane[pp].data[start * stride];
uint8_t *cur = &pv->ref[1]->plane[pp].data[start * stride];
uint8_t *next = &pv->ref[2]->plane[pp].data[start * stride];
if( is_combed == 2 )
{
/* These will be useful if we ever do temporal blending. */
for( yy = start; yy < segment_stop; yy += 2 )
{
/* This line gets blend filtered, not yadif filtered. */
blend_filter_line(&filter, dst2, cur, width, height, stride, yy);
dst2 += stride * 2;
cur += stride * 2;
}
}
else if (pv->mode == MODE_CUBIC && is_combed)
{
for( yy = start; yy < segment_stop; yy += 2 )
{
/* Just apply vertical cubic interpolation */
cubic_interpolate_line(dst2, cur, width, height, stride, yy);
dst2 += stride * 2;
cur += stride * 2;
}
}
else if ((pv->mode & MODE_YADIF) && is_combed == 1)
{
for( yy = start; yy < segment_stop; yy += 2 )
{
if( yy > 1 && yy < penultimate )
{
// This isn't the top or bottom,
// proceed as normal to yadif
yadif_filter_line(pv, dst2, prev, cur, next, pp,
width, height, stride,
parity ^ tff, yy);
}
else
{
// parity == 0 (TFF), y1 = y0
// parity == 1 (BFF), y0 = y1
// parity == 0 (TFF), yu = yp
// parity == 1 (BFF), yp = yu
int yp = (yy ^ parity) * stride;
memcpy(dst2, &pv->ref[1]->plane[pp].data[yp], width);
}
dst2 += stride * 2;
prev += stride * 2;
cur += stride * 2;
next += stride * 2;
}
}
else
{
// No combing, copy frame
for( yy = start; yy < segment_stop; yy += 2 )
{
memcpy(dst2, cur, width);
dst2 += stride * 2;
cur += stride * 2;
}
}
// Copy unfiltered lines
start = !parity ? (segment_start + 1) & ~1 : segment_start | 1;
dst2 = &dst->plane[pp].data[start * stride];
prev = &pv->ref[0]->plane[pp].data[start * stride];
cur = &pv->ref[1]->plane[pp].data[start * stride];
next = &pv->ref[2]->plane[pp].data[start * stride];
for( yy = start; yy < segment_stop; yy += 2 )
{
memcpy(dst2, cur, width);
dst2 += stride * 2;
cur += stride * 2;
}
}
taskset_thread_complete( &pv->yadif_taskset, segment );
}
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->yadif_taskset, segment );
}
static void yadif_filter( hb_filter_private_t * pv,
hb_buffer_t * dst,
int parity,
int tff)
{
/* If we're running comb detection, do it now, otherwise default to true. */
int is_combed;
if (!pv->skip_comb_check)
{
is_combed = pv->spatial_metric >= 0 ? comb_segmenter( pv ) : 1;
}
else
{
is_combed = pv->is_combed;
}
/* The comb detector suggests three different values:
0: Don't comb this frame.
1: Deinterlace this frame.
2: Blend this frame.
Since that might conflict with the filter's mode,
it may be necesary to adjust this value. */
if( is_combed == 1 && (pv->mode == MODE_BLEND) )
{
/* All combed frames are getting blended */
is_combed = 2;
}
else if( is_combed == 2 && !( pv->mode & MODE_BLEND ) )
{
/* Blending is disabled, so force interpolation of these frames. */
is_combed = 1;
}
if( is_combed == 1 &&
( pv->mode & MODE_BLEND ) &&
!( pv->mode & ( MODE_YADIF | MODE_EEDI2 | MODE_CUBIC ) ) )
{
/* Deinterlacers are disabled, blending isn't, so blend these frames. */
is_combed = 2;
}
else if( is_combed &&
!( pv->mode & ( MODE_BLEND | MODE_YADIF | MODE_EEDI2 | MODE_CUBIC | MODE_MASK ) ) )
{
/* No deinterlacer or mask chosen, pass the frame through. */
is_combed = 0;
}
if( is_combed == 1 )
{
pv->deinterlaced_frames++;
}
else if( is_combed == 2 )
{
pv->blended_frames++;
}
else
{
pv->unfiltered_frames++;
}
if( is_combed == 1 && ( pv->mode & MODE_EEDI2 ) )
{
/* Generate an EEDI2 interpolation */
eedi2_planer( pv );
}
pv->is_combed = is_combed;
if( is_combed )
{
if( ( pv->mode & MODE_EEDI2 ) && !( pv->mode & MODE_YADIF ) && is_combed == 1 )
{
// Just pass through the EEDI2 interpolation
int pp;
for( pp = 0; pp < 3; pp++ )
{
uint8_t * ref = pv->eedi_full[DST2PF]->plane[pp].data;
int ref_stride = pv->eedi_full[DST2PF]->plane[pp].stride;
uint8_t * dest = dst->plane[pp].data;
int width = dst->plane[pp].width;
int height = dst->plane[pp].height;
int stride = dst->plane[pp].stride;
int yy;
for( yy = 0; yy < height; yy++ )
{
memcpy(dest, ref, width);
dest += stride;
ref += ref_stride;
}
}
}
else
{
int segment;
for( segment = 0; segment < pv->cpu_count; segment++ )
{
/*
* Setup the work for this plane.
*/
pv->yadif_arguments[segment].parity = parity;
pv->yadif_arguments[segment].tff = tff;
pv->yadif_arguments[segment].dst = dst;
pv->yadif_arguments[segment].is_combed = is_combed;
}
/*
* Allow the taskset threads to make one pass over the data.
*/
taskset_cycle( &pv->yadif_taskset );
/*
* Entire frame is now deinterlaced.
*/
}
}
else
{
/* Just passing through... */
pv->yadif_arguments[0].is_combed = is_combed; // 0
hb_buffer_copy(dst, pv->ref[1]);
}
}
static int hb_decomb_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
build_gamma_lut( pv );
pv->deinterlaced_frames = 0;
pv->blended_frames = 0;
pv->unfiltered_frames = 0;
pv->yadif_ready = 0;
pv->mode = MODE_YADIF | MODE_BLEND | MODE_CUBIC |
MODE_GAMMA | MODE_FILTER;
pv->filter_mode = FILTER_ERODE_DILATE;
pv->spatial_metric = 2;
pv->motion_threshold = 3;
pv->spatial_threshold = 3;
pv->block_threshold = 40;
pv->block_width = 16;
pv->block_height = 16;
pv->magnitude_threshold = 10;
pv->variance_threshold = 20;
pv->laplacian_threshold = 20;
pv->dilation_threshold = 4;
pv->erosion_threshold = 2;
pv->noise_threshold = 50;
pv->maximum_search_distance = 24;
pv->post_processing = 1;
pv->parity = PARITY_DEFAULT;
if( filter->settings )
{
sscanf( filter->settings, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d",
&pv->mode,
&pv->spatial_metric,
&pv->motion_threshold,
&pv->spatial_threshold,
&pv->filter_mode,
&pv->block_threshold,
&pv->block_width,
&pv->block_height,
&pv->magnitude_threshold,
&pv->variance_threshold,
&pv->laplacian_threshold,
&pv->dilation_threshold,
&pv->erosion_threshold,
&pv->noise_threshold,
&pv->maximum_search_distance,
&pv->post_processing,
&pv->parity );
}
pv->cpu_count = hb_get_cpu_count();
// Make segment sizes an even number of lines
int height = hb_image_height(init->pix_fmt, init->height, 0);
// Each segment must begin on the even "parity" row.
// I.e. each segment of each plane must begin on an even row.
pv->segment_height[0] = (height / pv->cpu_count) & ~3;
pv->segment_height[1] = hb_image_height(init->pix_fmt, pv->segment_height[0], 1);
pv->segment_height[2] = hb_image_height(init->pix_fmt, pv->segment_height[0], 2);
/* Allocate buffers to store comb masks. */
pv->mask = hb_frame_buffer_init(init->pix_fmt, init->width, init->height);
pv->mask_filtered = hb_frame_buffer_init(init->pix_fmt, init->width, init->height);
pv->mask_temp = hb_frame_buffer_init(init->pix_fmt, init->width, init->height);
memset(pv->mask->data, 0, pv->mask->size);
memset(pv->mask_filtered->data, 0, pv->mask_filtered->size);
memset(pv->mask_temp->data, 0, pv->mask_temp->size);
int ii;
if( pv->mode & MODE_EEDI2 )
{
/* Allocate half-height eedi2 buffers */
for( ii = 0; ii < 4; ii++ )
{
pv->eedi_half[ii] = hb_frame_buffer_init(
init->pix_fmt, init->width, init->height / 2);
}
/* Allocate full-height eedi2 buffers */
for( ii = 0; ii < 5; ii++ )
{
pv->eedi_full[ii] = hb_frame_buffer_init(
init->pix_fmt, init->width, init->height);
}
}
/*
* Setup yadif taskset.
*/
pv->yadif_arguments = malloc( sizeof( yadif_arguments_t ) * pv->cpu_count );
if( pv->yadif_arguments == NULL ||
taskset_init( &pv->yadif_taskset, pv->cpu_count,
sizeof( yadif_thread_arg_t ) ) == 0 )
{
hb_error( "yadif could not initialize taskset" );
}
yadif_thread_arg_t *yadif_prev_thread_args = NULL;
for( ii = 0; ii < pv->cpu_count; ii++ )
{
yadif_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->yadif_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
int pp;
for (pp = 0; pp < 3; pp++)
{
if (yadif_prev_thread_args != NULL)
{
thread_args->segment_start[pp] =
yadif_prev_thread_args->segment_start[pp] +
yadif_prev_thread_args->segment_height[pp];
}
if( ii == pv->cpu_count - 1 )
{
/*
* Final segment
*/
thread_args->segment_height[pp] =
((hb_image_height(init->pix_fmt, init->height, pp)
+ 3) & ~3) - thread_args->segment_start[pp];
} else {
thread_args->segment_height[pp] = pv->segment_height[pp];
}
}
pv->yadif_arguments[ii].dst = NULL;
if( taskset_thread_spawn( &pv->yadif_taskset, ii,
"yadif_filter_segment",
yadif_decomb_filter_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "yadif could not spawn thread" );
}
yadif_prev_thread_args = thread_args;
}
/*
* Create comb detection taskset.
*/
if( taskset_init( &pv->decomb_filter_taskset, pv->cpu_count,
sizeof( decomb_thread_arg_t ) ) == 0 )
{
hb_error( "decomb could not initialize taskset" );
}
decomb_thread_arg_t *decomb_prev_thread_args = NULL;
for( ii = 0; ii < pv->cpu_count; ii++ )
{
decomb_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->decomb_filter_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
int pp;
for (pp = 0; pp < 3; pp++)
{
if (decomb_prev_thread_args != NULL)
{
thread_args->segment_start[pp] =
decomb_prev_thread_args->segment_start[pp] +
decomb_prev_thread_args->segment_height[pp];
}
if( ii == pv->cpu_count - 1 )
{
/*
* Final segment
*/
thread_args->segment_height[pp] =
hb_image_height(init->pix_fmt, init->height, pp) -
thread_args->segment_start[pp];
} else {
thread_args->segment_height[pp] = pv->segment_height[pp];
}
}
if( taskset_thread_spawn( &pv->decomb_filter_taskset, ii,
"decomb_filter_segment",
decomb_filter_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "decomb could not spawn thread" );
}
decomb_prev_thread_args = thread_args;
}
pv->comb_check_nthreads = init->height / pv->block_height;
if (pv->comb_check_nthreads > pv->cpu_count)
pv->comb_check_nthreads = pv->cpu_count;
pv->block_score = calloc(pv->comb_check_nthreads, sizeof(int));
/*
* Create comb check taskset.
*/
if( taskset_init( &pv->decomb_check_taskset, pv->comb_check_nthreads,
sizeof( decomb_thread_arg_t ) ) == 0 )
{
hb_error( "decomb check could not initialize taskset" );
}
decomb_prev_thread_args = NULL;
for( ii = 0; ii < pv->comb_check_nthreads; ii++ )
{
decomb_thread_arg_t *thread_args, *decomb_prev_thread_args = NULL;
thread_args = taskset_thread_args( &pv->decomb_check_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
int pp;
for (pp = 0; pp < 3; pp++)
{
if (decomb_prev_thread_args != NULL)
{
thread_args->segment_start[pp] =
decomb_prev_thread_args->segment_start[pp] +
decomb_prev_thread_args->segment_height[pp];
}
// Make segment hight a multiple of block_height
int h = hb_image_height(init->pix_fmt, init->height, pp) / pv->comb_check_nthreads;
h = h / pv->block_height * pv->block_height;
if (h == 0)
h = pv->block_height;
if (ii == pv->comb_check_nthreads - 1)
{
/*
* Final segment
*/
thread_args->segment_height[pp] =
hb_image_height(init->pix_fmt, init->height, pp) -
thread_args->segment_start[pp];
} else {
thread_args->segment_height[pp] = h;
}
}
if( taskset_thread_spawn( &pv->decomb_check_taskset, ii,
"decomb_check_segment",
decomb_check_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "decomb check could not spawn thread" );
}
decomb_prev_thread_args = thread_args;
}
if( pv->mode & MODE_FILTER )
{
if( taskset_init( &pv->mask_filter_taskset, pv->cpu_count,
sizeof( decomb_thread_arg_t ) ) == 0 )
{
hb_error( "maske filter could not initialize taskset" );
}
decomb_prev_thread_args = NULL;
for( ii = 0; ii < pv->cpu_count; ii++ )
{
decomb_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->mask_filter_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
int pp;
for (pp = 0; pp < 3; pp++)
{
if (decomb_prev_thread_args != NULL)
{
thread_args->segment_start[pp] =
decomb_prev_thread_args->segment_start[pp] +
decomb_prev_thread_args->segment_height[pp];
}
if( ii == pv->cpu_count - 1 )
{
/*
* Final segment
*/
thread_args->segment_height[pp] =
hb_image_height(init->pix_fmt, init->height, pp) -
thread_args->segment_start[pp];
} else {
thread_args->segment_height[pp] = pv->segment_height[pp];
}
}
if( taskset_thread_spawn( &pv->mask_filter_taskset, ii,
"mask_filter_segment",
mask_filter_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "mask filter could not spawn thread" );
}
decomb_prev_thread_args = thread_args;
}
if( pv->filter_mode == FILTER_ERODE_DILATE )
{
if( taskset_init( &pv->mask_erode_taskset, pv->cpu_count,
sizeof( decomb_thread_arg_t ) ) == 0 )
{
hb_error( "mask erode could not initialize taskset" );
}
decomb_prev_thread_args = NULL;
for( ii = 0; ii < pv->cpu_count; ii++ )
{
decomb_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->mask_erode_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
int pp;
for (pp = 0; pp < 3; pp++)
{
if (decomb_prev_thread_args != NULL)
{
thread_args->segment_start[pp] =
decomb_prev_thread_args->segment_start[pp] +
decomb_prev_thread_args->segment_height[pp];
}
if( ii == pv->cpu_count - 1 )
{
/*
* Final segment
*/
thread_args->segment_height[pp] =
hb_image_height(init->pix_fmt, init->height, pp) -
thread_args->segment_start[pp];
} else {
thread_args->segment_height[pp] = pv->segment_height[pp];
}
}
if( taskset_thread_spawn( &pv->mask_erode_taskset, ii,
"mask_erode_segment",
mask_erode_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "mask erode could not spawn thread" );
}
decomb_prev_thread_args = thread_args;
}
if( taskset_init( &pv->mask_dilate_taskset, pv->cpu_count,
sizeof( decomb_thread_arg_t ) ) == 0 )
{
hb_error( "mask dilate could not initialize taskset" );
}
decomb_prev_thread_args = NULL;
for( ii = 0; ii < pv->cpu_count; ii++ )
{
decomb_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->mask_dilate_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
int pp;
for (pp = 0; pp < 3; pp++)
{
if (decomb_prev_thread_args != NULL)
{
thread_args->segment_start[pp] =
decomb_prev_thread_args->segment_start[pp] +
decomb_prev_thread_args->segment_height[pp];
}
if( ii == pv->cpu_count - 1 )
{
/*
* Final segment
*/
thread_args->segment_height[pp] =
hb_image_height(init->pix_fmt, init->height, pp) -
thread_args->segment_start[pp];
} else {
thread_args->segment_height[pp] = pv->segment_height[pp];
}
}
if( taskset_thread_spawn( &pv->mask_dilate_taskset, ii,
"mask_dilate_segment",
mask_dilate_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "mask dilate could not spawn thread" );
}
decomb_prev_thread_args = thread_args;
}
}
}
if( pv->mode & MODE_EEDI2 )
{
/*
* Create eedi2 taskset.
*/
if( taskset_init( &pv->eedi2_taskset, /*thread_count*/3,
sizeof( eedi2_thread_arg_t ) ) == 0 )
{
hb_error( "eedi2 could not initialize taskset" );
}
if( pv->post_processing > 1 )
{
int stride = hb_image_stride(init->pix_fmt, init->width, 0);
pv->cx2 = (int*)eedi2_aligned_malloc(
init->height * stride * sizeof(int), 16);
pv->cy2 = (int*)eedi2_aligned_malloc(
init->height * stride * sizeof(int), 16);
pv->cxy = (int*)eedi2_aligned_malloc(
init->height * stride * sizeof(int), 16);
pv->tmpc = (int*)eedi2_aligned_malloc(
init->height * stride * sizeof(int), 16);
if( !pv->cx2 || !pv->cy2 || !pv->cxy || !pv->tmpc )
hb_log("EEDI2: failed to malloc derivative arrays");
else
hb_log("EEDI2: successfully mallloced derivative arrays");
}
for( ii = 0; ii < 3; ii++ )
{
eedi2_thread_arg_t *eedi2_thread_args;
eedi2_thread_args = taskset_thread_args( &pv->eedi2_taskset, ii );
eedi2_thread_args->pv = pv;
eedi2_thread_args->plane = ii;
if( taskset_thread_spawn( &pv->eedi2_taskset, ii,
"eedi2_filter_segment",
eedi2_filter_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "eedi2 could not spawn thread" );
}
}
}
return 0;
}
static void hb_decomb_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
hb_log("decomb: deinterlaced %i | blended %i | unfiltered %i | total %i", pv->deinterlaced_frames, pv->blended_frames, pv->unfiltered_frames, pv->deinterlaced_frames + pv->blended_frames + pv->unfiltered_frames);
taskset_fini( &pv->yadif_taskset );
taskset_fini( &pv->decomb_filter_taskset );
taskset_fini( &pv->decomb_check_taskset );
if( pv->mode & MODE_FILTER )
{
taskset_fini( &pv->mask_filter_taskset );
if( pv->filter_mode == FILTER_ERODE_DILATE )
{
taskset_fini( &pv->mask_erode_taskset );
taskset_fini( &pv->mask_dilate_taskset );
}
}
if( pv->mode & MODE_EEDI2 )
{
taskset_fini( &pv->eedi2_taskset );
}
/* Cleanup reference buffers. */
int ii;
for (ii = 0; ii < 3; ii++)
{
hb_buffer_close(&pv->ref[ii]);
}
/* Cleanup combing masks. */
hb_buffer_close(&pv->mask);
hb_buffer_close(&pv->mask_filtered);
hb_buffer_close(&pv->mask_temp);
if( pv->mode & MODE_EEDI2 )
{
/* Cleanup eedi-half buffers */
int ii;
for( ii = 0; ii < 4; ii++ )
{
hb_buffer_close(&pv->eedi_half[ii]);
}
/* Cleanup eedi-full buffers */
for( ii = 0; ii < 5; ii++ )
{
hb_buffer_close(&pv->eedi_full[ii]);
}
}
if( pv->post_processing > 1 && ( pv->mode & MODE_EEDI2 ) )
{
if (pv->cx2) eedi2_aligned_free(pv->cx2);
if (pv->cy2) eedi2_aligned_free(pv->cy2);
if (pv->cxy) eedi2_aligned_free(pv->cxy);
if (pv->tmpc) eedi2_aligned_free(pv->tmpc);
}
free(pv->block_score);
/*
* free memory for yadif structs
*/
free( pv->yadif_arguments );
free( pv );
filter->private_data = NULL;
}
// Fill rows above height with copy of last row to prevent color distortion
// during blending
static void fill_stride(hb_buffer_t * buf)
{
int pp, ii;
for (pp = 0; pp < 3; pp++)
{
uint8_t * src, * dst;
src = buf->plane[pp].data + (buf->plane[pp].height - 1) *
buf->plane[pp].stride;
dst = buf->plane[pp].data + buf->plane[pp].height *
buf->plane[pp].stride;
for (ii = 0; ii < 3; ii++)
{
memcpy(dst, src, buf->plane[pp].stride);
dst += buf->plane[pp].stride;
}
}
}
static int hb_decomb_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * last = NULL, * out = NULL;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
/* Store current frame in yadif cache */
*buf_in = NULL;
fill_stride(in);
store_ref(pv, in);
// yadif requires 3 buffers, prev, cur, and next. For the first
// frame, there can be no prev, so we duplicate the first frame.
if (!pv->yadif_ready)
{
// If yadif is not ready, store another ref and return HB_FILTER_DELAY
store_ref(pv, hb_buffer_dup(in));
pv->yadif_ready = 1;
// Wait for next
return HB_FILTER_DELAY;
}
/* Determine if top-field first layout */
int tff;
if( pv->parity < 0 )
{
tff = !!(in->s.flags & PIC_FLAG_TOP_FIELD_FIRST);
}
else
{
tff = (pv->parity & 1) ^ 1;
}
/* deinterlace both fields if bob */
int frame, num_frames = 1;
if (pv->mode & MODE_BOB)
{
num_frames = 2;
}
// Will need up to 2 buffers simultaneously
int idx = 0;
hb_buffer_t * o_buf[2] = {NULL,};
/* Perform yadif filtering */
for( frame = 0; frame < num_frames; frame++ )
{
int parity = frame ^ tff ^ 1;
/* Skip the second run if the frame is uncombed */
if (frame && pv->is_combed == 0)
{
break;
}
// tff for eedi2
pv->tff = !parity;
if (o_buf[idx] == NULL)
{
o_buf[idx] = hb_video_buffer_init(in->f.width, in->f.height);
}
if (frame)
pv->skip_comb_check = 1;
else
pv->skip_comb_check = 0;
yadif_filter(pv, o_buf[idx], parity, tff);
// If bob, add all frames to output
// else, if not combed, add frame to output
// else if final iteration, add frame to output
if ((pv->mode & MODE_BOB) ||
pv->is_combed == 0 ||
frame == num_frames - 1)
{
if ( out == NULL )
{
last = out = o_buf[idx];
}
else
{
last->next = o_buf[idx];
last = last->next;
}
last->next = NULL;
// Indicate that buffer was consumed
o_buf[idx] = NULL;
/* Copy buffered settings to output buffer settings */
last->s = pv->ref[1]->s;
idx ^= 1;
if ((pv->mode & MODE_MASK) && pv->spatial_metric >= 0 )
{
if (pv->mode == MODE_MASK ||
((pv->mode & MODE_MASK) && (pv->mode & MODE_FILTER)) ||
((pv->mode & MODE_MASK) && (pv->mode & MODE_GAMMA)) ||
pv->is_combed)
{
apply_mask(pv, last);
}
}
}
}
// Copy subs only to first output buffer
hb_buffer_move_subs( out, pv->ref[1] );
hb_buffer_close(&o_buf[0]);
hb_buffer_close(&o_buf[1]);
/* if this frame was deinterlaced and bob mode is engaged, halve
the duration of the saved timestamps. */
if ((pv->mode & MODE_BOB) && pv->is_combed)
{
out->s.stop -= (out->s.stop - out->s.start) / 2LL;
last->s.start = out->s.stop;
last->s.new_chap = 0;
}
*buf_out = out;
return HB_FILTER_OK;
}
void hb_deinterlace(hb_buffer_t *dst, hb_buffer_t *src)
{
int pp;
filter_param_t filter;
filter.tap[0] = -1;
filter.tap[1] = 4;
filter.tap[2] = 2;
filter.tap[3] = 4;
filter.tap[4] = -1;
filter.normalize = 3;
fill_stride(src);
for (pp = 0; pp < 3; pp++)
{
int yy;
int width = src->plane[pp].width;
int stride = src->plane[pp].stride;
int height = src->plane[pp].height_stride;
// Filter parity lines
uint8_t *pdst = &dst->plane[pp].data[0];
uint8_t *psrc = &src->plane[pp].data[0];
/* These will be useful if we ever do temporal blending. */
for( yy = 0; yy < height - 1; yy += 2 )
{
/* This line gets blend filtered, not yadif filtered. */
memcpy(pdst, psrc, width);
pdst += stride;
psrc += stride;
blend_filter_line(&filter, pdst, psrc, width, height, stride, yy + 1);
pdst += stride;
psrc += stride;
}
}
}
HandBrake-0.10.2/libhb/sync.c 0000664 0001752 0001752 00000160073 12463330511 016244 0 ustar handbrake handbrake /* sync.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
#include
#include "samplerate.h"
#ifdef INT64_MIN
#undef INT64_MIN /* Because it isn't defined correctly in Zeta */
#endif
#define INT64_MIN (-9223372036854775807LL-1)
typedef struct
{
hb_lock_t * mutex;
int ref; /* Reference count to tell us when it's unused */
int count_frames;
int64_t audio_pts_slip;
int64_t video_pts_slip;
int64_t pts_offset;
/* Frame based point-to-point support */
int64_t audio_pts_thresh;
int start_found;
hb_cond_t * next_frame;
int pts_count;
int64_t * first_pts;
} hb_sync_common_t;
typedef struct
{
int index;
double next_start; /* start time of next output frame */
int64_t first_drop; /* PTS of first 'went backwards' frame dropped */
int drop_count; /* count of 'time went backwards' drops */
/* Raw */
SRC_STATE * state;
SRC_DATA data;
int silence_size;
uint8_t * silence_buf;
int drop_video_to_sync;
double gain_factor;
} hb_sync_audio_t;
typedef struct
{
int link;
int merge;
hb_buffer_t * list_current;
hb_buffer_t * last;
} subtitle_sanitizer_t;
typedef struct
{
/* Video */
int first_frame;
int64_t pts_skip;
int64_t next_start; /* start time of next output frame */
int64_t first_drop; /* PTS of first 'went backwards' frame dropped */
int drop_count; /* count of 'time went backwards' drops */
int drops; /* frames dropped to make a cbr video stream */
int dups; /* frames duplicated to make a cbr video stream */
int video_sequence;
int count_frames_max;
int chap_mark; /* to propagate chapter mark across a drop */
hb_buffer_t * cur; /* The next picture to process */
subtitle_sanitizer_t *subtitle_sanitizer;
/* Statistics */
uint64_t st_counts[4];
uint64_t st_dates[4];
uint64_t st_first;
} hb_sync_video_t;
struct hb_work_private_s
{
hb_job_t * job;
hb_sync_common_t * common;
union
{
hb_sync_video_t video;
hb_sync_audio_t audio;
} type;
};
/***********************************************************************
* Local prototypes
**********************************************************************/
static void getPtsOffset( hb_work_object_t * w );
static int checkPtsOffset( hb_work_object_t * w );
static void InitAudio( hb_job_t * job, hb_sync_common_t * common, int i );
static void InitSubtitle( hb_job_t * job, hb_sync_video_t * sync, int i );
static void InsertSilence( hb_work_object_t * w, int64_t d );
static void UpdateState( hb_work_object_t * w );
static void UpdateSearchState( hb_work_object_t * w, int64_t start );
static hb_buffer_t * OutputAudioFrame( hb_audio_t *audio, hb_buffer_t *buf,
hb_sync_audio_t *sync );
/***********************************************************************
* hb_work_sync_init
***********************************************************************
* Initialize the work object
**********************************************************************/
hb_work_object_t * hb_sync_init( hb_job_t * job )
{
hb_title_t * title = job->title;
hb_chapter_t * chapter;
int i;
uint64_t duration;
hb_work_private_t * pv;
hb_sync_video_t * sync;
hb_work_object_t * w;
hb_work_object_t * ret = NULL;
pv = calloc( 1, sizeof( hb_work_private_t ) );
sync = &pv->type.video;
pv->common = calloc( 1, sizeof( hb_sync_common_t ) );
pv->common->ref++;
pv->common->mutex = hb_lock_init();
pv->common->audio_pts_thresh = AV_NOPTS_VALUE;
pv->common->next_frame = hb_cond_init();
pv->common->pts_count = 1;
if ( job->frame_to_start || job->pts_to_start )
{
pv->common->start_found = 0;
}
else
{
pv->common->start_found = 1;
}
ret = w = hb_get_work( WORK_SYNC_VIDEO );
w->private_data = pv;
w->fifo_in = job->fifo_raw;
// When doing subtitle indepth scan, the pipeline ends at sync
if ( !job->indepth_scan )
w->fifo_out = job->fifo_sync;
else
w->fifo_out = NULL;
pv->job = job;
pv->common->pts_offset = INT64_MIN;
sync->first_frame = 1;
if( job->pass == 2 )
{
/* We already have an accurate frame count from pass 1 */
hb_interjob_t * interjob = hb_interjob_get( job->h );
sync->count_frames_max = interjob->frame_count;
}
else
{
/* Calculate how many video frames we are expecting */
if ( job->pts_to_stop )
{
duration = job->pts_to_stop + 90000;
}
else if( job->frame_to_stop )
{
/* Set the duration to a rough estimate */
duration = ( job->frame_to_stop / ( title->rate / title->rate_base ) ) * 90000;
}
else
{
duration = 0;
for( i = job->chapter_start; i <= job->chapter_end; i++ )
{
chapter = hb_list_item( job->list_chapter, i - 1 );
duration += chapter->duration;
}
}
sync->count_frames_max = duration * title->rate / title->rate_base / 90000;
}
hb_log( "sync: expecting %d video frames", sync->count_frames_max );
/* Initialize libsamplerate for every audio track we have */
if ( ! job->indepth_scan )
{
for( i = 0; i < hb_list_count( job->list_audio ); i++ )
{
InitAudio( job, pv->common, i );
}
}
pv->common->first_pts = malloc( sizeof(int64_t) * pv->common->pts_count );
for ( i = 0; i < pv->common->pts_count; i++ )
pv->common->first_pts[i] = INT64_MAX;
int count = hb_list_count(job->list_subtitle);
sync->subtitle_sanitizer = calloc(count, sizeof(subtitle_sanitizer_t));
for( i = 0; i < count; i++ )
{
InitSubtitle(job, sync, i);
}
return ret;
}
static void InitSubtitle( hb_job_t * job, hb_sync_video_t * sync, int i )
{
hb_subtitle_t * subtitle;
subtitle = hb_list_item( job->list_subtitle, i );
if (subtitle->format == TEXTSUB &&
subtitle->config.dest == PASSTHRUSUB &&
(job->mux & HB_MUX_MASK_MP4))
{
// Merge overlapping subtitles since mpv tx3g does not support them
sync->subtitle_sanitizer[i].merge = 1;
}
// PGS subtitles don't need to be linked because there are explicit
// "clear" subtitle packets that indicate the end time of the
// previous subtitle
if (subtitle->config.dest == PASSTHRUSUB &&
subtitle->source != PGSSUB)
{
// Fill in stop time when it is missing
sync->subtitle_sanitizer[i].link = 1;
}
}
static void CloseSubtitle(hb_sync_video_t * sync, int ii)
{
hb_buffer_close(&sync->subtitle_sanitizer[ii].list_current);
}
/***********************************************************************
* Close Video
***********************************************************************
*
**********************************************************************/
void syncVideoClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
hb_sync_video_t * sync = &pv->type.video;
int ii;
// Wake up audio sync if it's still waiting on condition.
pv->common->pts_offset = 0;
pv->common->start_found = 1;
hb_cond_broadcast( pv->common->next_frame );
if( sync->cur )
{
hb_buffer_close( &sync->cur );
}
hb_log( "sync: got %d frames, %d expected",
pv->common->count_frames, sync->count_frames_max );
/* save data for second pass */
if( job->pass == 1 )
{
/* Preserve frame count for better accuracy in pass 2 */
hb_interjob_t * interjob = hb_interjob_get( job->h );
interjob->frame_count = pv->common->count_frames;
interjob->last_job = job->sequence_id;
}
if (sync->drops || sync->dups )
{
hb_log( "sync: %d frames dropped, %d duplicated",
sync->drops, sync->dups );
}
int count = hb_list_count(job->list_subtitle);
for( ii = 0; ii < count; ii++ )
{
CloseSubtitle(sync, ii);
}
free(sync->subtitle_sanitizer);
hb_lock( pv->common->mutex );
if ( --pv->common->ref == 0 )
{
hb_unlock( pv->common->mutex );
hb_cond_close( &pv->common->next_frame );
hb_lock_close( &pv->common->mutex );
free( pv->common->first_pts );
free( pv->common );
}
else
{
hb_unlock( pv->common->mutex );
}
free( pv );
w->private_data = NULL;
}
#define ABS(a) ((a) < 0 ? -(a) : (a))
static hb_buffer_t * merge_ssa(hb_buffer_t *a, hb_buffer_t *b)
{
int len, ii;
char *text;
hb_buffer_t *buf = hb_buffer_init(a->size + b->size);
buf->s = a->s;
// Find the text in the second SSA sub
text = (char*)b->data;
for (ii = 0; ii < 8; ii++)
{
text = strchr(text, ',');
if (text == NULL)
break;
text++;
}
if (text != NULL)
{
len = sprintf((char*)buf->data, "%s\n%s", a->data, text);
if (len >= 0)
buf->size = len + 1;
}
else
{
memcpy(buf->data, a->data, a->size);
buf->size = a->size;
}
return buf;
}
static hb_buffer_t * mergeSubtitles(subtitle_sanitizer_t *sanitizer, int end)
{
hb_buffer_t *a, *b, *buf, *out = NULL, *last = NULL;
do
{
a = sanitizer->list_current;
b = a != NULL ? a->next : NULL;
buf = NULL;
if (a != NULL && b == NULL && end)
{
sanitizer->list_current = a->next;
if (sanitizer->list_current == NULL)
sanitizer->last = NULL;
a->next = NULL;
buf = a;
}
else if (a != NULL && a->s.stop != AV_NOPTS_VALUE)
{
if (!sanitizer->merge)
{
sanitizer->list_current = a->next;
if (sanitizer->list_current == NULL)
sanitizer->last = NULL;
a->next = NULL;
buf = a;
}
else if (b != NULL && a->s.stop > b->s.start)
{
// Overlap
if (ABS(a->s.start - b->s.start) <= 18000)
{
// subtitles start within 1/5 second of eachother, merge
if (a->s.stop > b->s.stop)
{
// a continues after b, reorder the list and swap
hb_buffer_t *tmp = a;
a->next = b->next;
b->next = a;
if (sanitizer->last == b)
{
sanitizer->last = a;
}
a = b;
b = tmp;
sanitizer->list_current = a;
}
a->next = NULL;
b->s.start = a->s.stop;
buf = merge_ssa(a, b);
hb_buffer_close(&a);
a = buf;
buf = NULL;
sanitizer->list_current = a;
if (b->s.stop != AV_NOPTS_VALUE &&
ABS(b->s.stop - b->s.start) <= 18000)
{
// b and a completely overlap, remove b
a->next = b->next;
b->next = NULL;
if (sanitizer->last == b)
{
sanitizer->last = a;
}
hb_buffer_close(&b);
}
else
{
a->next = b;
}
}
else
{
// a starts before b, output copy of a and
buf = hb_buffer_dup(a);
buf->s.stop = b->s.start;
a->s.start = b->s.start;
}
}
else if (b != NULL && a->s.stop <= b->s.start)
{
sanitizer->list_current = a->next;
if (sanitizer->list_current == NULL)
sanitizer->last = NULL;
a->next = NULL;
buf = a;
}
}
if (buf != NULL)
{
if (buf->s.stop != AV_NOPTS_VALUE)
buf->s.duration = buf->s.stop - buf->s.start;
else
buf->s.duration = AV_NOPTS_VALUE;
if (last == NULL)
{
out = last = buf;
}
else
{
last->next = buf;
last = buf;
}
}
} while (buf != NULL);
return out;
}
static hb_buffer_t * sanitizeSubtitle(
hb_work_private_t * pv,
int i,
hb_buffer_t * sub)
{
hb_sync_video_t * sync;
subtitle_sanitizer_t * sanitizer;
sync = &pv->type.video;
sanitizer = &sync->subtitle_sanitizer[i];
if (!sanitizer->link && !sanitizer->merge)
{
if (sub != NULL)
{
if (sub->s.stop != AV_NOPTS_VALUE)
sub->s.duration = sub->s.stop - sub->s.start;
else
sub->s.duration = 0;
sub->s.start -= pv->common->video_pts_slip;
if (sub->s.stop != AV_NOPTS_VALUE)
sub->s.stop -= pv->common->video_pts_slip;
if (sub->s.renderOffset != AV_NOPTS_VALUE)
sub->s.renderOffset -= pv->common->video_pts_slip;
}
return sub;
}
if (sub == NULL)
{
return mergeSubtitles(sanitizer, 1);
}
hb_lock( pv->common->mutex );
sub->s.start -= pv->common->video_pts_slip;
if (sub->s.stop != AV_NOPTS_VALUE)
sub->s.stop -= pv->common->video_pts_slip;
if (sub->s.renderOffset != AV_NOPTS_VALUE)
sub->s.renderOffset -= pv->common->video_pts_slip;
hb_unlock( pv->common->mutex );
if (sanitizer->last != NULL && sanitizer->last->s.stop == AV_NOPTS_VALUE)
{
sanitizer->last->s.stop = sub->s.start;
}
if (sub->s.start == sub->s.stop)
{
// Used to indicate "clear" subtitles when the duration
// of subtitles is not encoded in the stream
hb_buffer_close(&sub);
}
if (sub != NULL)
{
if (sanitizer->last == NULL)
{
sanitizer->list_current = sanitizer->last = sub;
}
else
{
sanitizer->last->next = sub;
sanitizer->last = sub;
}
}
return mergeSubtitles(sanitizer, 0);
}
/***********************************************************************
* syncVideoWork
***********************************************************************
*
**********************************************************************/
int syncVideoWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_buffer_t * cur, * next, * sub = NULL;
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
hb_subtitle_t * subtitle;
hb_sync_video_t * sync = &pv->type.video;
int i;
int64_t next_start;
*buf_out = NULL;
next = *buf_in;
*buf_in = NULL;
/* Wait till we can determine the initial pts of all streams */
if( next->size != 0 && pv->common->pts_offset == INT64_MIN )
{
pv->common->first_pts[0] = next->s.start;
hb_lock( pv->common->mutex );
while( pv->common->pts_offset == INT64_MIN && !*w->done )
{
// Full fifos will make us wait forever, so get the
// pts offset from the available streams if full
if ( hb_fifo_is_full( job->fifo_raw ) )
{
getPtsOffset( w );
hb_cond_broadcast( pv->common->next_frame );
}
else if ( checkPtsOffset( w ) )
hb_cond_broadcast( pv->common->next_frame );
else
hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
}
hb_unlock( pv->common->mutex );
}
hb_lock( pv->common->mutex );
next_start = next->s.start - pv->common->video_pts_slip;
hb_unlock( pv->common->mutex );
/* Wait for start of point-to-point encoding */
if( !pv->common->start_found )
{
hb_sync_video_t * sync = &pv->type.video;
if( next->size == 0 )
{
*buf_out = next;
pv->common->start_found = 1;
pv->common->first_pts[0] = INT64_MAX - 1;
hb_cond_broadcast( pv->common->next_frame );
/*
* Push through any subtitle EOFs in case they
* were not synced through.
*/
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
subtitle = hb_list_item( job->list_subtitle, i );
if( subtitle->config.dest == PASSTHRUSUB )
{
hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
}
}
return HB_WORK_DONE;
}
if ( pv->common->count_frames < job->frame_to_start ||
next->s.start < job->pts_to_start )
{
// Flush any subtitles that have pts prior to the
// current frame
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
subtitle = hb_list_item( job->list_subtitle, i );
while( ( sub = hb_fifo_see( subtitle->fifo_raw ) ) )
{
if ( sub->s.start > next->s.start )
break;
sub = hb_fifo_get( subtitle->fifo_raw );
hb_buffer_close( &sub );
}
}
hb_lock( pv->common->mutex );
if (job->frame_to_start > 0)
{
// When doing frame based p-to-p we must update the audio
// start point with each frame skipped.
//
// Tell the audio threads what must be dropped
pv->common->audio_pts_thresh = next->s.start;
}
hb_cond_broadcast( pv->common->next_frame );
hb_unlock( pv->common->mutex );
UpdateSearchState( w, next_start );
#ifdef USE_QSV
// reclaim QSV resources before dropping the buffer
// when decoding without QSV, the QSV atom will be NULL
if (job != NULL && job->qsv.ctx != NULL &&
next->qsv_details.qsv_atom != NULL)
{
av_qsv_stage *stage = av_qsv_get_last_stage(next->qsv_details.qsv_atom);
if (stage != NULL)
{
av_qsv_wait_on_sync(job->qsv.ctx, stage);
if (stage->out.sync->in_use > 0)
{
ff_qsv_atomic_dec(&stage->out.sync->in_use);
}
if (stage->out.p_surface->Data.Locked > 0)
{
ff_qsv_atomic_dec(&stage->out.p_surface->Data.Locked);
}
}
av_qsv_flush_stages(job->qsv.ctx->pipes,
&next->qsv_details.qsv_atom);
}
#endif
hb_buffer_close( &next );
return HB_WORK_OK;
}
hb_lock( pv->common->mutex );
pv->common->audio_pts_thresh = 0;
pv->common->audio_pts_slip += next_start;
pv->common->video_pts_slip += next_start;
next_start = 0;
pv->common->start_found = 1;
pv->common->count_frames = 0;
hb_cond_broadcast( pv->common->next_frame );
hb_unlock( pv->common->mutex );
sync->st_first = 0;
}
if( !sync->cur )
{
sync->cur = next;
if (next->size == 0)
{
/* we got an end-of-stream as our first video packet?
* Feed it downstream & signal that we're done.
*/
*buf_out = next;
sync->cur = NULL;
pv->common->start_found = 1;
pv->common->first_pts[0] = INT64_MAX - 1;
hb_cond_broadcast( pv->common->next_frame );
/*
* Push through any subtitle EOFs in case they
* were not synced through.
*/
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
subtitle = hb_list_item( job->list_subtitle, i );
if( subtitle->config.dest == PASSTHRUSUB )
{
hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
}
}
return HB_WORK_DONE;
}
return HB_WORK_OK;
}
cur = sync->cur;
/* At this point we have a frame to process. Let's check
1) if we will be able to push into the fifo ahead
2) if the next frame is there already, since we need it to
compute the duration of the current frame*/
if( next->size == 0 )
{
hb_buffer_close( &next );
pv->common->first_pts[0] = INT64_MAX - 1;
cur->s.start = sync->next_start;
cur->s.stop = cur->s.start + 90000. / ((double)job->vrate / (double)job->vrate_base);
sync->next_start += cur->s.stop - cur->s.start;;
/* Make sure last frame is reflected in frame count */
pv->common->count_frames++;
/* Push the frame to the renderer */
*buf_out = cur;
sync->cur = NULL;
/* we got an end-of-stream. Feed it downstream & signal that
* we're done. Note that this means we drop the final frame of
* video (we don't know its duration). On DVDs the final frame
* is often strange and dropping it seems to be a good idea. */
(*buf_out)->next = hb_buffer_init( 0 );
/*
* Push through any subtitle EOFs in case they were not synced through.
*/
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
subtitle = hb_list_item( job->list_subtitle, i );
// flush out any pending subtitle buffers in the sanitizer
hb_buffer_t *out = sanitizeSubtitle(pv, i, NULL);
if (out != NULL)
hb_fifo_push( subtitle->fifo_out, out );
if( subtitle->config.dest == PASSTHRUSUB )
{
hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
}
}
pv->common->start_found = 1;
hb_cond_broadcast( pv->common->next_frame );
return HB_WORK_DONE;
}
/* Check for end of point-to-point frame encoding */
if( job->frame_to_stop && pv->common->count_frames > job->frame_to_stop )
{
// Drop an empty buffer into our output to ensure that things
// get flushed all the way out.
hb_buffer_close( &sync->cur );
hb_buffer_close( &next );
*buf_out = hb_buffer_init( 0 );
hb_log( "sync: reached %d frames, exiting early",
pv->common->count_frames );
/*
* Push through any subtitle EOFs in case they were not synced through.
*/
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
subtitle = hb_list_item( job->list_subtitle, i );
// flush out any pending subtitle buffers in the sanitizer
hb_buffer_t *out = sanitizeSubtitle(pv, i, NULL);
if (out != NULL)
hb_fifo_push( subtitle->fifo_out, out );
if( subtitle->config.dest == PASSTHRUSUB )
{
hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
}
}
return HB_WORK_DONE;
}
/* Check for end of point-to-point pts encoding */
if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
{
// Drop an empty buffer into our output to ensure that things
// get flushed all the way out.
hb_log( "sync: reached pts %"PRId64", exiting early", cur->s.start );
hb_buffer_close( &sync->cur );
hb_buffer_close( &next );
*buf_out = hb_buffer_init( 0 );
/*
* Push through any subtitle EOFs in case they were not synced through.
*/
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
subtitle = hb_list_item( job->list_subtitle, i );
// flush out any pending subtitle buffers in the sanitizer
hb_buffer_t *out = sanitizeSubtitle(pv, i, NULL);
if (out != NULL)
hb_fifo_push( subtitle->fifo_out, out );
if( subtitle->config.dest == PASSTHRUSUB )
{
hb_fifo_push( subtitle->fifo_out, hb_buffer_init( 0 ) );
}
}
return HB_WORK_DONE;
}
if( sync->first_frame )
{
/* This is our first frame */
if ( cur->s.start > 0 )
{
/*
* The first pts from a dvd should always be zero but
* can be non-zero with a transport or program stream since
* we're not guaranteed to start on an IDR frame. If we get
* a non-zero initial PTS extend its duration so it behaves
* as if it started at zero so that our audio timing will
* be in sync.
*/
hb_log( "sync: first pts is %"PRId64, cur->s.start );
cur->s.start = 0;
}
sync->first_frame = 0;
}
/*
* since the first frame is always 0 and the upstream reader code
* is taking care of adjusting for pts discontinuities, we just have
* to deal with the next frame's start being in the past. This can
* happen when the PTS is adjusted after data loss but video frame
* reordering causes some frames with the old clock to appear after
* the clock change. This creates frames that overlap in time which
* looks to us like time going backward. The downstream muxing code
* can deal with overlaps of up to a frame time but anything larger
* we handle by dropping frames here.
*/
if ( next_start - cur->s.start <= 0 )
{
if ( sync->first_drop == 0 )
{
sync->first_drop = next_start;
}
++sync->drop_count;
if ( next->s.new_chap )
{
// don't drop a chapter mark when we drop the buffer
sync->chap_mark = next->s.new_chap;
}
#ifdef USE_QSV
// reclaim QSV resources before dropping the buffer
// when decoding without QSV, the QSV atom will be NULL
if (job != NULL && job->qsv.ctx != NULL &&
next->qsv_details.qsv_atom != NULL)
{
av_qsv_stage *stage = av_qsv_get_last_stage(next->qsv_details.qsv_atom);
if (stage != NULL)
{
av_qsv_wait_on_sync(job->qsv.ctx, stage);
if (stage->out.sync->in_use > 0)
{
ff_qsv_atomic_dec(&stage->out.sync->in_use);
}
if (stage->out.p_surface->Data.Locked > 0)
{
ff_qsv_atomic_dec(&stage->out.p_surface->Data.Locked);
}
}
av_qsv_flush_stages(job->qsv.ctx->pipes,
&next->qsv_details.qsv_atom);
}
#endif
hb_buffer_close( &next );
return HB_WORK_OK;
}
if ( sync->first_drop )
{
hb_log( "sync: video time didn't advance - dropped %d frames "
"(delta %d ms, current %"PRId64", next %"PRId64", dur %d)",
sync->drop_count, (int)( cur->s.start - sync->first_drop ) / 90,
cur->s.start, next_start, (int)( next_start - cur->s.start ) );
sync->first_drop = 0;
sync->drop_count = 0;
}
/*
* Track the video sequence number locally so that we can sync the audio
* to it using the sequence number as well as the PTS.
*/
sync->video_sequence = cur->sequence;
/* Process subtitles that apply to this video frame */
// NOTE: There is no logic in either subtitle-sync algorithm that waits
// for the subtitle-decoder if it is lagging behind the video-decoder.
//
// Therefore there is the implicit assumption that the subtitle-decoder
// is always faster than the video-decoder. This assumption is definitely
// incorrect in some cases where the SSA subtitle decoder is used.
for( i = 0; i < hb_list_count( job->list_subtitle ); i++)
{
hb_buffer_t *out;
subtitle = hb_list_item( job->list_subtitle, i );
// Sanitize subtitle start and stop times, then pass to
// muxer or renderer filter.
while ( ( sub = hb_fifo_get( subtitle->fifo_raw ) ) != NULL )
{
if (sub->size > 0)
{
out = sanitizeSubtitle(pv, i, sub);
if (out != NULL)
hb_fifo_push( subtitle->fifo_out, out );
}
else
{
// Push the end of stream marker
hb_fifo_push( subtitle->fifo_out, sub );
}
}
}
/*
* Adjust the pts of the current frame so that it's contiguous
* with the previous frame. The start time of the current frame
* has to be the end time of the previous frame and the stop
* time has to be the start of the next frame. We don't
* make any adjustments to the source timestamps other than removing
* the clock offsets (which also removes pts discontinuities).
* This means we automatically encode at the source's frame rate.
* MP2 uses an implicit duration (frames end when the next frame
* starts) but more advanced containers like MP4 use an explicit
* duration. Since we're looking ahead one frame we set the
* explicit stop time from the start time of the next frame.
*/
*buf_out = cur;
int64_t duration = next_start - cur->s.start;
sync->cur = cur = next;
cur->sub = NULL;
cur->s.start -= pv->common->video_pts_slip;
if (cur->s.renderOffset != AV_NOPTS_VALUE)
cur->s.renderOffset -= pv->common->video_pts_slip;
cur->s.stop -= pv->common->video_pts_slip;
sync->pts_skip = 0;
if ( duration <= 0 )
{
hb_log( "sync: invalid video duration %"PRId64", start %"PRId64", next %"PRId64"",
duration, cur->s.start, next_start );
}
(*buf_out)->s.start = sync->next_start;
sync->next_start += duration;
(*buf_out)->s.stop = sync->next_start;
if ( sync->chap_mark )
{
// we have a pending chapter mark from a recent drop - put it on this
// buffer (this may make it one frame late but we can't do any better).
(*buf_out)->s.new_chap = sync->chap_mark;
sync->chap_mark = 0;
}
/* Update UI */
UpdateState( w );
return HB_WORK_OK;
}
// sync*Init does nothing because sync has a special initializer
// that takes care of initializing video and all audio tracks
int syncVideoInit( hb_work_object_t * w, hb_job_t * job)
{
return 0;
}
hb_work_object_t hb_sync_video =
{
WORK_SYNC_VIDEO,
"Video Synchronization",
syncVideoInit,
syncVideoWork,
syncVideoClose
};
/***********************************************************************
* Close Audio
***********************************************************************
*
**********************************************************************/
void syncAudioClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
hb_sync_audio_t * sync = &pv->type.audio;
if( sync->silence_buf )
{
free( sync->silence_buf );
}
if ( sync->state )
{
src_delete( sync->state );
}
hb_lock( pv->common->mutex );
if ( --pv->common->ref == 0 )
{
hb_unlock( pv->common->mutex );
hb_cond_close( &pv->common->next_frame );
hb_lock_close( &pv->common->mutex );
free( pv->common->first_pts );
free( pv->common );
}
else
{
hb_unlock( pv->common->mutex );
}
free( pv );
w->private_data = NULL;
}
int syncAudioInit( hb_work_object_t * w, hb_job_t * job)
{
return 0;
}
/***********************************************************************
* SyncAudio
***********************************************************************
*
**********************************************************************/
static int syncAudioWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
hb_sync_audio_t * sync = &pv->type.audio;
hb_buffer_t * buf;
int64_t start;
*buf_out = NULL;
buf = *buf_in;
*buf_in = NULL;
/* if the next buffer is an eof send it downstream */
if ( buf->size <= 0 )
{
hb_buffer_close( &buf );
*buf_out = hb_buffer_init( 0 );
pv->common->first_pts[sync->index+1] = INT64_MAX - 1;
return HB_WORK_DONE;
}
/* Wait till we can determine the initial pts of all streams */
if( pv->common->pts_offset == INT64_MIN )
{
pv->common->first_pts[sync->index+1] = buf->s.start;
hb_lock( pv->common->mutex );
while( pv->common->pts_offset == INT64_MIN && !*w->done)
{
// Full fifos will make us wait forever, so get the
// pts offset from the available streams if full
if (hb_fifo_is_full(w->fifo_in))
{
getPtsOffset( w );
hb_cond_broadcast( pv->common->next_frame );
}
else if ( checkPtsOffset( w ) )
hb_cond_broadcast( pv->common->next_frame );
else
hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 200 );
}
hb_unlock( pv->common->mutex );
}
// Wait for start frame if doing point-to-point
//
// When doing p-to-p, video leads the way. The video thead will set
// start_found when we have reached the start point.
//
// When doing frame based p-to-p, as each video frame is processed
// it advances audio_pts_thresh which informs us how much audio must
// be dropped.
hb_lock( pv->common->mutex );
while ( !pv->common->start_found && !*w->done )
{
if ( pv->common->audio_pts_thresh < 0 )
{
// I would initialize this in hb_sync_init, but
// job->pts_to_start can be modified by reader
// after hb_sync_init is called.
pv->common->audio_pts_thresh = job->pts_to_start;
}
if ( buf->s.start < pv->common->audio_pts_thresh )
{
hb_buffer_close( &buf );
hb_unlock( pv->common->mutex );
return HB_WORK_OK;
}
// We should only get here when doing frame based p-to-p.
// In frame based p-to-p, the video sync thread updates
// audio_pts_thresh as it discards frames. So wait here
// until the current audio frame needs to be discarded
// or start point is found.
while ( !pv->common->start_found &&
buf->s.start >= pv->common->audio_pts_thresh && !*w->done )
{
hb_cond_timedwait( pv->common->next_frame, pv->common->mutex, 10 );
// There is an unfortunate unavoidable deadlock that can occur.
// Since we need to wait for a specific frame in syncVideoWork,
// syncAudioWork can be stalled indefinitely. The video decoder
// often drops multiple of the initial frames after starting
// because they require references that have not been decoded yet.
// This allows a lot of audio to be queued in the fifo and the
// audio fifo fills before we get a single video frame. So we
// must drop some audio to unplug the pipeline and allow the first
// video frame to be decoded.
if ( hb_fifo_is_full(w->fifo_in) )
{
hb_buffer_t *tmp;
tmp = buf = hb_fifo_get( w->fifo_in );
while ( tmp )
{
tmp = hb_fifo_get( w->fifo_in );
if ( tmp )
{
hb_buffer_close( &buf );
buf = tmp;
}
}
}
}
}
start = buf->s.start - pv->common->audio_pts_slip;
hb_unlock( pv->common->mutex );
// When doing p-to-p, video determines when the start point has been
// found. Since audio and video are processed asynchronously, there
// may yet be audio packets in the pipe that are before the start point.
// These packets will have negative start times after applying
// audio_pts_slip.
if (start < 0)
{
hb_buffer_close(&buf);
return HB_WORK_OK;
}
if( job->frame_to_stop && pv->common->count_frames >= job->frame_to_stop )
{
hb_buffer_close( &buf );
*buf_out = hb_buffer_init( 0 );
return HB_WORK_DONE;
}
if( job->pts_to_stop && sync->next_start >= job->pts_to_stop )
{
hb_buffer_close( &buf );
*buf_out = hb_buffer_init( 0 );
return HB_WORK_DONE;
}
// audio time went backwards.
// If our output clock is more than a half frame ahead of the
// input clock drop this frame to move closer to sync.
// Otherwise drop frames until the input clock matches the output clock.
if ( sync->next_start - start > 90*15 )
{
// Discard data that's in the past.
if ( sync->first_drop == 0 )
{
sync->first_drop = start;
}
++sync->drop_count;
hb_buffer_close( &buf );
return HB_WORK_OK;
}
if ( sync->first_drop )
{
// we were dropping old data but input buf time is now current
hb_log( "sync: audio 0x%x time went backwards %d ms, dropped %d frames "
"(start %"PRId64", next %"PRId64")", w->audio->id,
(int)( sync->next_start - sync->first_drop ) / 90,
sync->drop_count, sync->first_drop, (int64_t)sync->next_start );
sync->first_drop = 0;
sync->drop_count = 0;
}
if ( start - sync->next_start >= (90 * 70) )
{
if ( start - sync->next_start > (90000LL * 60) )
{
// there's a gap of more than a minute between the last
// frame and this. assume we got a corrupted timestamp
// and just drop the next buf.
hb_log( "sync: %d minute time gap in audio 0x%x - dropping buf"
" start %"PRId64", next %"PRId64,
(int)((start - sync->next_start) / (90000*60)),
w->audio->id, start, (int64_t)sync->next_start );
hb_buffer_close( &buf );
return HB_WORK_OK;
}
/*
* there's a gap of at least 70ms between the last
* frame we processed & the next. Fill it with silence.
* Or in the case of DCA, skip some frames from the
* other streams.
*/
if ( sync->drop_video_to_sync )
{
hb_log( "sync: audio gap %d ms. Skipping frames. Audio 0x%x"
" start %"PRId64", next %"PRId64,
(int)((start - sync->next_start) / 90),
w->audio->id, start, (int64_t)sync->next_start );
hb_lock( pv->common->mutex );
pv->common->audio_pts_slip += (start - sync->next_start);
pv->common->video_pts_slip += (start - sync->next_start);
hb_unlock( pv->common->mutex );
*buf_out = OutputAudioFrame( w->audio, buf, sync );
return HB_WORK_OK;
}
hb_log( "sync: adding %d ms of silence to audio 0x%x"
" start %"PRId64", next %"PRId64,
(int)((start - sync->next_start) / 90),
w->audio->id, start, (int64_t)sync->next_start );
InsertSilence( w, start - sync->next_start );
}
/*
* When we get here we've taken care of all the dups and gaps in the
* audio stream and are ready to inject the next input frame into
* the output stream.
*/
*buf_out = OutputAudioFrame( w->audio, buf, sync );
return HB_WORK_OK;
}
hb_work_object_t hb_sync_audio =
{
WORK_SYNC_AUDIO,
"AudioSynchronization",
syncAudioInit,
syncAudioWork,
syncAudioClose
};
static void InitAudio( hb_job_t * job, hb_sync_common_t * common, int i )
{
hb_work_object_t * w;
hb_work_private_t * pv;
hb_sync_audio_t * sync;
pv = calloc( 1, sizeof( hb_work_private_t ) );
sync = &pv->type.audio;
sync->index = i;
pv->job = job;
pv->common = common;
pv->common->ref++;
pv->common->pts_count++;
w = hb_get_work( WORK_SYNC_AUDIO );
w->private_data = pv;
w->audio = hb_list_item( job->list_audio, i );
w->fifo_in = w->audio->priv.fifo_raw;
if ( w->audio->config.out.codec & HB_ACODEC_PASS_FLAG )
{
w->fifo_out = w->audio->priv.fifo_out;
}
else
{
w->fifo_out = w->audio->priv.fifo_sync;
}
if( w->audio->config.out.codec == HB_ACODEC_AC3_PASS ||
w->audio->config.out.codec == HB_ACODEC_AAC_PASS )
{
/* Have a silent AC-3/AAC frame ready in case we have to fill a
gap */
AVCodec * codec;
AVCodecContext * c;
switch ( w->audio->config.out.codec )
{
case HB_ACODEC_AC3_PASS:
{
codec = avcodec_find_encoder( AV_CODEC_ID_AC3 );
} break;
case HB_ACODEC_AAC_PASS:
{
codec = avcodec_find_encoder_by_name("aac");
} break;
default:
{
// Never gets here
codec = NULL; // Silence compiler warning
} break;
}
c = avcodec_alloc_context3(codec);
c->bit_rate = w->audio->config.in.bitrate;
c->sample_rate = w->audio->config.in.samplerate;
c->channels =
av_get_channel_layout_nb_channels(w->audio->config.in.channel_layout);
hb_ff_set_sample_fmt(c, codec, AV_SAMPLE_FMT_FLT);
if (w->audio->config.in.channel_layout == AV_CH_LAYOUT_STEREO_DOWNMIX)
{
c->channel_layout = AV_CH_LAYOUT_STEREO;
}
else
{
c->channel_layout = w->audio->config.in.channel_layout;
}
if (hb_avcodec_open(c, codec, NULL, 0) < 0)
{
hb_log("sync: track %d, hb_avcodec_open() failed, dropping video to sync",
w->audio->config.out.track);
sync->drop_video_to_sync = 1;
}
else
{
// Prepare input frame
AVFrame frame = { .nb_samples = c->frame_size, .pts = 0, };
int input_size = av_samples_get_buffer_size(NULL, c->channels,
frame.nb_samples,
c->sample_fmt, 1);
uint8_t *zeros = calloc(1, input_size);
avcodec_fill_audio_frame(&frame, c->channels, c->sample_fmt, zeros,
input_size, 1);
// Allocate enough space for the encoded silence
// The output should be < the input
sync->silence_buf = malloc( input_size );
// There is some delay in getting output from some audio encoders.
// So encode a few packets till we get output.
int ii;
for ( ii = 0; ii < 10; ii++ )
{
// Prepare output packet
AVPacket pkt;
int got_packet;
av_init_packet(&pkt);
pkt.data = sync->silence_buf;
pkt.size = input_size;
int ret = avcodec_encode_audio2( c, &pkt, &frame, &got_packet);
if ( ret < 0 )
{
hb_log("sync: track %d, avcodec_encode_audio() failed, dropping video to sync",
w->audio->config.out.track);
sync->drop_video_to_sync = 1;
break;
}
if ( got_packet )
{
sync->silence_size = pkt.size;
break;
}
else if (ii + 1 == 10)
{
hb_log("sync: track %d, failed to get output packet, dropping video to sync",
w->audio->config.out.track);
sync->drop_video_to_sync = 1;
}
}
free( zeros );
hb_avcodec_close( c );
}
av_free( c );
}
else
{
if( w->audio->config.out.codec & HB_ACODEC_PASS_FLAG )
{
sync->drop_video_to_sync = 1;
}
else
{
/* Not passthru, initialize libsamplerate */
int error;
sync->state = src_new( SRC_SINC_MEDIUM_QUALITY,
hb_mixdown_get_discrete_channel_count( w->audio->config.out.mixdown ),
&error );
sync->data.end_of_input = 0;
}
}
sync->gain_factor = pow(10, w->audio->config.out.gain / 20);
hb_list_add( job->list_work, w );
}
static hb_buffer_t * OutputAudioFrame( hb_audio_t *audio, hb_buffer_t *buf,
hb_sync_audio_t *sync )
{
int64_t start = (int64_t)sync->next_start;
// Can't count of buf->s.stop - buf->s.start for accurate duration
// due to integer rounding, so use buf->s.duration when it is set
// (which should be always if I didn't miss anything)
double duration;
if ( buf->s.duration > 0 )
duration = buf->s.duration;
else
duration = buf->s.stop - buf->s.start;
if ( !( audio->config.out.codec & HB_ACODEC_PASS_FLAG ) )
{
// Audio is not passthru. Check if we need to modify the audio
// in any way.
if( audio->config.in.samplerate != audio->config.out.samplerate )
{
/* do sample rate conversion */
int count_in, count_out;
hb_buffer_t * buf_raw = buf;
int sample_size = hb_mixdown_get_discrete_channel_count( audio->config.out.mixdown ) *
sizeof( float );
count_in = buf_raw->size / sample_size;
/*
* When using stupid rates like 44.1 there will always be some
* truncation error. E.g., a 1536 sample AC3 frame will turn into a
* 1536*44.1/48.0 = 1411.2 sample frame. If we just truncate the .2
* the error will build up over time and eventually the audio will
* substantially lag the video. libsamplerate will keep track of the
* fractional sample & give it to us when appropriate if we give it
* an extra sample of space in the output buffer.
*/
count_out = ( duration * audio->config.out.samplerate ) / 90000 + 1;
sync->data.input_frames = count_in;
sync->data.output_frames = count_out;
sync->data.src_ratio = (double)audio->config.out.samplerate /
(double)audio->config.in.samplerate;
buf = hb_buffer_init( count_out * sample_size );
sync->data.data_in = (float *) buf_raw->data;
sync->data.data_out = (float *) buf->data;
if( src_process( sync->state, &sync->data ) )
{
/* XXX If this happens, we're screwed */
hb_log( "sync: audio 0x%x src_process failed", audio->id );
}
hb_buffer_close( &buf_raw );
if (sync->data.output_frames_gen <= 0)
{
// XXX: don't send empty buffers downstream (EOF)
// possibly out-of-sync audio is better than no audio at all
hb_buffer_close(&buf);
return NULL;
}
buf->size = sync->data.output_frames_gen * sample_size;
duration = (double)( sync->data.output_frames_gen * 90000 ) /
audio->config.out.samplerate;
}
if( audio->config.out.gain > 0.0 )
{
int count, ii;
count = buf->size / sizeof(float);
for ( ii = 0; ii < count; ii++ )
{
double sample;
sample = (double)*(((float*)buf->data)+ii);
sample *= sync->gain_factor;
if (sample > 0)
sample = MIN(sample, 1.0);
else
sample = MAX(sample, -1.0);
*(((float*)buf->data)+ii) = sample;
}
}
else if( audio->config.out.gain < 0.0 )
{
int count, ii;
count = buf->size / sizeof(float);
for ( ii = 0; ii < count; ii++ )
{
double sample;
sample = (double)*(((float*)buf->data)+ii);
sample *= sync->gain_factor;
*(((float*)buf->data)+ii) = sample;
}
}
}
buf->s.type = AUDIO_BUF;
buf->s.frametype = HB_FRAME_AUDIO;
buf->s.start = start;
sync->next_start += duration;
buf->s.stop = (int64_t)sync->next_start;
return buf;
}
static void InsertSilence( hb_work_object_t * w, int64_t duration )
{
hb_work_private_t * pv = w->private_data;
hb_sync_audio_t *sync = &pv->type.audio;
hb_buffer_t *buf;
hb_fifo_t *fifo;
int frame_dur;
// to keep pass-thru and regular audio in sync we generate silence in
// frame-sized units. If the silence duration isn't an integer multiple
// of the frame duration we will truncate or round up depending on
// which minimizes the timing error.
if( w->audio->config.out.codec & HB_ACODEC_PASS_FLAG )
{
frame_dur = ( 90000 * w->audio->config.in.samples_per_frame ) /
w->audio->config.in.samplerate;
}
else
{
frame_dur = ( 90000 * w->audio->config.out.samples_per_frame ) /
w->audio->config.in.samplerate;
}
while (duration >= frame_dur >> 2)
{
if( w->audio->config.out.codec & HB_ACODEC_PASS_FLAG )
{
buf = hb_buffer_init( sync->silence_size );
buf->s.start = sync->next_start;
buf->s.stop = buf->s.start + frame_dur;
memcpy( buf->data, sync->silence_buf, buf->size );
fifo = w->audio->priv.fifo_out;
duration -= frame_dur;
}
else
{
int channel_count = hb_mixdown_get_discrete_channel_count( w->audio->config.out.mixdown );
int size = sizeof( float ) *
w->audio->config.out.samples_per_frame *
channel_count;
if (frame_dur > duration)
{
int samples = duration * w->audio->config.in.samplerate / 90000;
if (samples == 0)
{
break;
}
size = sizeof(float) * samples * channel_count;
frame_dur = (90000 * samples) / w->audio->config.in.samplerate;
}
buf = hb_buffer_init(size);
buf->s.start = sync->next_start;
buf->s.duration = frame_dur;
buf->s.stop = buf->s.start + frame_dur;
memset( buf->data, 0, buf->size );
fifo = w->audio->priv.fifo_sync;
duration -= frame_dur;
}
buf = OutputAudioFrame( w->audio, buf, sync );
hb_fifo_push( fifo, buf );
}
}
static void UpdateState( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
hb_sync_video_t * sync = &pv->type.video;
hb_state_t state;
if( !pv->common->count_frames )
{
sync->st_first = hb_get_date();
pv->job->st_pause_date = -1;
pv->job->st_paused = 0;
}
pv->common->count_frames++;
if (pv->job->indepth_scan)
{
// Progress for indept scan is handled by reader
// pv->common->count_frames is used during indepth_scan
// to find start & end points.
return;
}
if( hb_get_date() > sync->st_dates[3] + 1000 )
{
memmove( &sync->st_dates[0], &sync->st_dates[1],
3 * sizeof( uint64_t ) );
memmove( &sync->st_counts[0], &sync->st_counts[1],
3 * sizeof( uint64_t ) );
sync->st_dates[3] = hb_get_date();
sync->st_counts[3] = pv->common->count_frames;
}
#define p state.param.working
state.state = HB_STATE_WORKING;
p.progress = (float) pv->common->count_frames / (float) sync->count_frames_max;
if( p.progress > 1.0 )
{
p.progress = 1.0;
}
p.rate_cur = 1000.0 *
(float) ( sync->st_counts[3] - sync->st_counts[0] ) /
(float) ( sync->st_dates[3] - sync->st_dates[0] );
if( hb_get_date() > sync->st_first + 4000 )
{
int eta;
p.rate_avg = 1000.0 * (float) sync->st_counts[3] /
(float) ( sync->st_dates[3] - sync->st_first - pv->job->st_paused);
eta = (float) ( sync->count_frames_max - sync->st_counts[3] ) /
p.rate_avg;
p.hours = eta / 3600;
p.minutes = ( eta % 3600 ) / 60;
p.seconds = eta % 60;
}
else
{
p.rate_avg = 0.0;
p.hours = -1;
p.minutes = -1;
p.seconds = -1;
}
#undef p
hb_set_state( pv->job->h, &state );
}
static void UpdateSearchState( hb_work_object_t * w, int64_t start )
{
hb_work_private_t * pv = w->private_data;
hb_sync_video_t * sync = &pv->type.video;
hb_state_t state;
uint64_t now;
double avg;
now = hb_get_date();
if( !pv->common->count_frames )
{
sync->st_first = now;
pv->job->st_pause_date = -1;
pv->job->st_paused = 0;
}
pv->common->count_frames++;
if (pv->job->indepth_scan)
{
// Progress for indept scan is handled by reader
// pv->common->count_frames is used during indepth_scan
// to find start & end points.
return;
}
#define p state.param.working
state.state = HB_STATE_SEARCHING;
if ( pv->job->frame_to_start )
p.progress = (float) pv->common->count_frames /
(float) pv->job->frame_to_start;
else if ( pv->job->pts_to_start )
p.progress = (float) start / (float) pv->job->pts_to_start;
else
p.progress = 0;
if( p.progress > 1.0 )
{
p.progress = 1.0;
}
if (now > sync->st_first)
{
int eta = 0;
if ( pv->job->frame_to_start )
{
avg = 1000.0 * (double)pv->common->count_frames / (now - sync->st_first);
eta = ( pv->job->frame_to_start - pv->common->count_frames ) / avg;
}
else if ( pv->job->pts_to_start )
{
avg = 1000.0 * (double)start / (now - sync->st_first);
eta = ( pv->job->pts_to_start - start ) / avg;
}
p.hours = eta / 3600;
p.minutes = ( eta % 3600 ) / 60;
p.seconds = eta % 60;
}
else
{
p.rate_avg = 0.0;
p.hours = -1;
p.minutes = -1;
p.seconds = -1;
}
#undef p
hb_set_state( pv->job->h, &state );
}
static void getPtsOffset( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
int i ;
int64_t first_pts = INT64_MAX;
for( i = 0; i < pv->common->pts_count; i++ )
{
if ( pv->common->first_pts[i] < first_pts )
first_pts = pv->common->first_pts[i];
}
pv->common->video_pts_slip = pv->common->audio_pts_slip = pv->common->pts_offset = first_pts;
return;
}
static int checkPtsOffset( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
int i ;
for( i = 0; i < pv->common->pts_count; i++ )
{
if ( pv->common->first_pts[i] == INT64_MAX )
return 0;
}
getPtsOffset( w );
return 1;
}
HandBrake-0.10.2/libhb/lang.h 0000664 0001752 0001752 00000002525 12463330511 016213 0 ustar handbrake handbrake /* lang.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_LANG_H
#define HB_LANG_H
typedef struct iso639_lang_t
{
char * eng_name; /* Description in English */
char * native_name; /* Description in native language */
char * iso639_1; /* ISO-639-1 (2 characters) code */
char * iso639_2; /* ISO-639-2/t (3 character) code */
char * iso639_2b; /* ISO-639-2/b code (if different from above) */
} iso639_lang_t;
#ifdef __cplusplus
extern "C" {
#endif
/* find language associated with ISO-639-1 language code */
iso639_lang_t * lang_for_code( int code );
/* find language associated with ISO-639-2 language code */
iso639_lang_t * lang_for_code2( const char *code2 );
/* ISO-639-1 code for language */
int lang_to_code(const iso639_lang_t *lang);
iso639_lang_t * lang_for_english( const char * english );
/*
* Get the next language in the list.
* Returns NULL if there are no more languages.
* Pass NULL to get the first language in the list.
*/
const iso639_lang_t* lang_get_next(const iso639_lang_t *last);
#ifdef __cplusplus
}
#endif
#endif
HandBrake-0.10.2/libhb/oclnv12toyuv.h 0000664 0001752 0001752 00000001554 12463330511 017666 0 ustar handbrake handbrake /* oclnv12toyuv.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifndef HB_OCLNV12TOYUV_H
#define HB_OCLNV12TOYUV_H
#include "common.h"
#include "extras/cl.h"
#include "openclwrapper.h"
/*
* nv12 to yuv interface
* bufi is input frame of nv12, w is input frame width, h is input frame height
*/
int hb_ocl_nv12toyuv(uint8_t *bufi[], int p, int w, int h, int *crop, hb_va_dxva2_t *dxva2, int decomb, int detelecine);
#endif // HB_OCLNV12TOYUV_H
HandBrake-0.10.2/libhb/deblock.c 0000664 0001752 0001752 00000026502 12265031673 016700 0 ustar handbrake handbrake /*
Copyright (C) 2005 Michael Niedermayer
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "hb.h"
#include "hbffmpeg.h"
#define PP7_QP_DEFAULT 5
#define PP7_MODE_DEFAULT 2
#define XMIN(a,b) ((a) < (b) ? (a) : (b))
#define XMAX(a,b) ((a) > (b) ? (a) : (b))
typedef short DCTELEM;
//===========================================================================//
static const uint8_t __attribute__((aligned(8))) pp7_dither[8][8] =
{
{ 0, 48, 12, 60, 3, 51, 15, 63, },
{ 32, 16, 44, 28, 35, 19, 47, 31, },
{ 8, 56, 4, 52, 11, 59, 7, 55, },
{ 40, 24, 36, 20, 43, 27, 39, 23, },
{ 2, 50, 14, 62, 1, 49, 13, 61, },
{ 34, 18, 46, 30, 33, 17, 45, 29, },
{ 10, 58, 6, 54, 9, 57, 5, 53, },
{ 42, 26, 38, 22, 41, 25, 37, 21, },
};
struct hb_filter_private_s
{
int pp7_qp;
int pp7_mode;
int pp7_mpeg2;
int pp7_temp_stride;
uint8_t * pp7_src;
};
static int hb_deblock_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_deblock_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_deblock_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_deblock =
{
.id = HB_FILTER_DEBLOCK,
.enforce_order = 1,
.name = "Deblock (pp7)",
.settings = NULL,
.init = hb_deblock_init,
.work = hb_deblock_work,
.close = hb_deblock_close,
};
static inline void pp7_dct_a( DCTELEM * dst, uint8_t * src, int stride )
{
int i;
for( i = 0; i < 4; i++ )
{
int s0 = src[0*stride] + src[6*stride];
int s1 = src[1*stride] + src[5*stride];
int s2 = src[2*stride] + src[4*stride];
int s3 = src[3*stride];
int s = s3+s3;
s3 = s - s0;
s0 = s + s0;
s = s2 + s1;
s2 = s2 - s1;
dst[0] = s0 + s;
dst[2] = s0 - s;
dst[1] = 2*s3 + s2;
dst[3] = s3 - s2*2;
src++;
dst += 4;
}
}
static void pp7_dct_b( DCTELEM * dst, DCTELEM * src )
{
int i;
for( i = 0; i < 4; i++ )
{
int s0 = src[0*4] + src[6*4];
int s1 = src[1*4] + src[5*4];
int s2 = src[2*4] + src[4*4];
int s3 = src[3*4];
int s = s3+s3;
s3 = s - s0;
s0 = s + s0;
s = s2 + s1;
s2 = s2 - s1;
dst[0*4] = s0 + s;
dst[2*4] = s0 - s;
dst[1*4] = 2*s3 + s2;
dst[3*4] = s3 - s2*2;
src++;
dst++;
}
}
#define N (1<<16)
#define N0 4
#define N1 5
#define N2 10
#define SN0 2
#define SN1 2.2360679775
#define SN2 3.16227766017
static const int pp7_factor[16] =
{
N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
N/(N1*N0), N/(N1*N1), N/(N1*N0),N/(N1*N2),
N/(N0*N0), N/(N0*N1), N/(N0*N0),N/(N0*N2),
N/(N2*N0), N/(N2*N1), N/(N2*N0),N/(N2*N2),
};
static int pp7_threshold[99][16];
static void pp7_init_threshold( void )
{
int qp, i;
int bias = 0;
for( qp = 0; qp < 99; qp++ )
{
for( i = 0; i < 16; i++ )
{
pp7_threshold[qp][i] =
((i&1)?SN2:SN0) * ((i&4)?SN2:SN0) *
XMAX(1,qp) * (1<<2) - 1 - bias;
}
}
}
static int pp7_hard_threshold( DCTELEM * src, int qp )
{
int i;
int a;
a = src[0] * pp7_factor[0];
for( i = 1; i < 16; i++ )
{
unsigned int threshold1 = pp7_threshold[qp][i];
unsigned int threshold2 = (threshold1<<1);
int level= src[i];
if( ((unsigned)(level+threshold1)) > threshold2 )
{
a += level * pp7_factor[i];
}
}
return (a + (1<<11)) >> 12;
}
static int pp7_medium_threshold( DCTELEM * src, int qp )
{
int i;
int a;
a = src[0] * pp7_factor[0];
for( i = 1; i < 16; i++ )
{
unsigned int threshold1 = pp7_threshold[qp][i];
unsigned int threshold2 = (threshold1<<1);
int level= src[i];
if( ((unsigned)(level+threshold1)) > threshold2 )
{
if( ((unsigned)(level+2*threshold1)) > 2*threshold2 )
{
a += level * pp7_factor[i];
}
else
{
if( level>0 )
{
a += 2*(level - (int)threshold1) * pp7_factor[i];
}
else
{
a += 2*(level + (int)threshold1) * pp7_factor[i];
}
}
}
}
return (a + (1<<11)) >> 12;
}
static int pp7_soft_threshold( DCTELEM * src, int qp )
{
int i;
int a;
a = src[0] * pp7_factor[0];
for( i = 1; i < 16; i++ )
{
unsigned int threshold1 = pp7_threshold[qp][i];
unsigned int threshold2 = (threshold1<<1);
int level= src[i];
if( ((unsigned)(level+threshold1))>threshold2 )
{
if( level>0 )
{
a += (level - (int)threshold1) * pp7_factor[i];
}
else
{
a += (level + (int)threshold1) * pp7_factor[i];
}
}
}
return (a + (1<<11)) >> 12;
}
static int ( * pp7_requantize )( DCTELEM * src, int qp ) = pp7_hard_threshold;
static void pp7_filter( hb_filter_private_t * pv,
uint8_t * dst,
uint8_t * src,
int width,
int height,
uint8_t * qp_store,
int qp_stride,
int is_luma)
{
int x, y;
const int stride = is_luma ? pv->pp7_temp_stride : ((width+16+15)&(~15));
uint8_t * p_src = pv->pp7_src + 8*stride;
DCTELEM * block = (DCTELEM *)(pv->pp7_src);
DCTELEM * temp = (DCTELEM *)(pv->pp7_src + 32);
if( !src || !dst )
{
return;
}
for( y = 0; y < height; y++ )
{
int index = 8 + 8*stride + y*stride;
memcpy( p_src + index, src + y*width, width );
for( x = 0; x < 8; x++ )
{
p_src[index - x - 1] = p_src[index + x ];
p_src[index + width + x ] = p_src[index + width - x - 1];
}
}
for( y = 0; y < 8; y++ )
{
memcpy( p_src + ( 7-y)*stride,
p_src + ( y+8)*stride, stride );
memcpy( p_src + (height+8+y)*stride,
p_src + (height-y+7)*stride, stride );
}
for( y = 0; y < height; y++ )
{
for( x = -8; x < 0; x += 4 )
{
const int index = x + y*stride + (8-3)*(1+stride) + 8;
uint8_t * src = p_src + index;
DCTELEM * tp = temp+4*x;
pp7_dct_a( tp+4*8, src, stride );
}
for( x = 0; x < width; )
{
const int qps = 3 + is_luma;
int end = XMIN(x+8, width);
int qp;
if( pv->pp7_qp )
{
qp = pv->pp7_qp;
}
else
{
qp = qp_store[ (XMIN(x, width-1)>>qps) +
(XMIN(y, height-1)>>qps) * qp_stride ];
if( pv->pp7_mpeg2 )
{
qp >>= 1;
}
}
for( ; x < end; x++ )
{
const int index = x + y*stride + (8-3)*(1+stride) + 8;
uint8_t * src = p_src + index;
DCTELEM * tp = temp+4*x;
int v;
if( (x&3) == 0 )
{
pp7_dct_a( tp+4*8, src, stride );
}
pp7_dct_b( block, tp );
v = pp7_requantize( block, qp );
v = (v + pp7_dither[y&7][x&7]) >> 6;
if( (unsigned)v > 255 )
{
v = (-v) >> 31;
}
dst[x + y*width] = v;
}
}
}
}
static int hb_deblock_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( sizeof(struct hb_filter_private_s), 1 );
hb_filter_private_t * pv = filter->private_data;
pv->pp7_qp = PP7_QP_DEFAULT;
pv->pp7_mode = PP7_MODE_DEFAULT;
pv->pp7_mpeg2 = 1; /*mpi->qscale_type;*/
if( filter->settings )
{
sscanf( filter->settings, "%d:%d", &pv->pp7_qp, &pv->pp7_mode );
}
if( pv->pp7_qp < 0 )
{
pv->pp7_qp = 0;
}
pp7_init_threshold();
switch( pv->pp7_mode )
{
case 0:
pp7_requantize = pp7_hard_threshold;
break;
case 1:
pp7_requantize = pp7_soft_threshold;
break;
case 2:
pp7_requantize = pp7_medium_threshold;
break;
}
int h = (init->height+16+15)&(~15);
pv->pp7_temp_stride = (init->width+16+15)&(~15);
pv->pp7_src = (uint8_t*)malloc( pv->pp7_temp_stride*(h+8)*sizeof(uint8_t) );
return 0;
}
static void hb_deblock_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
free( pv );
filter->private_data = NULL;
}
static int hb_deblock_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in, * out;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
if( /*TODO: mpi->qscale ||*/ pv->pp7_qp )
{
out = hb_video_buffer_init( in->f.width, in->f.height );
pp7_filter( pv,
out->plane[0].data,
in->plane[0].data,
in->plane[0].stride,
in->plane[0].height,
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
1 );
pp7_filter( pv,
out->plane[1].data,
in->plane[1].data,
in->plane[1].stride,
in->plane[1].height,
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
0 );
pp7_filter( pv,
out->plane[2].data,
in->plane[2].data,
in->plane[2].stride,
in->plane[2].height,
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
0 );
out->s = in->s;
hb_buffer_move_subs( out, in );
*buf_out = out;
}
else
{
*buf_in = NULL;
*buf_out = in;
}
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/qsv_filter_pp.h 0000664 0001752 0001752 00000010126 12205472744 020154 0 ustar handbrake handbrake /* ********************************************************************* *\
Copyright (C) 2013 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\* ********************************************************************* */
#ifndef QSV_FILTER_PP_H
#define QSV_FILTER_PP_H
#include "msdk/mfxplugin.h"
extern hb_buffer_t *link_buf_list( hb_filter_private_t *pv );
struct qsv_filter_task_s;
typedef struct{
mfxFrameAllocator *alloc;
mfxStatus (*process)(struct qsv_filter_task_s*,void*);
mfxCoreInterface *core;
}qsv_filter_processor_t;
typedef struct qsv_filter_task_s{
mfxFrameSurface1 *in;
mfxFrameSurface1 *out;
int busy;
hb_filter_private_t *pv;
qsv_filter_processor_t processor;
} qsv_filter_task_t;
typedef struct qsv_filter_private_s{
int is_init_done;
mfxCoreInterface *core;
mfxVideoParam *videoparam;
mfxPluginParam pluginparam;
hb_filter_private_t *pv;
mfxPlugin plug;
hb_list_t *tasks;
} qsv_filter_t;
typedef struct hb_qsv_sync_s{
int frame_go;
int status;
hb_cond_t *frame_completed;
hb_lock_t *frame_completed_lock;
hb_buffer_t *in;
hb_buffer_t *out;
} hb_qsv_sync_t;
typedef struct hb_filter_private_s
{
hb_job_t *job;
hb_list_t *list;
hb_qsv_sync_t pre;
hb_qsv_sync_t pre_busy;
hb_qsv_sync_t post;
hb_qsv_sync_t post_busy;
av_qsv_space *vpp_space;
hb_list_t *qsv_user;
struct SwsContext* sws_context_to_nv12;
struct SwsContext* sws_context_from_nv12;
} hb_filter_private_t_qsv;
// methods to be called by Media SDK
mfxStatus MFX_CDECL qsv_PluginInit(mfxHDL pthis, mfxCoreInterface *core);
mfxStatus MFX_CDECL qsv_PluginClose (mfxHDL pthis);
mfxStatus MFX_CDECL qsv_GetPluginParam(mfxHDL pthis, mfxPluginParam *par);
mfxStatus MFX_CDECL qsv_Submit(mfxHDL pthis, const mfxHDL *in, mfxU32 in_num, const mfxHDL *out, mfxU32 out_num, mfxThreadTask *task);
mfxStatus MFX_CDECL qsv_Execute(mfxHDL pthis, mfxThreadTask task, mfxU32 uid_p, mfxU32 uid_a);
mfxStatus MFX_CDECL qsv_FreeResources(mfxHDL pthis, mfxThreadTask task, mfxStatus sts);
// methods to be called by us
mfxStatus plugin_init(qsv_filter_t*,mfxVideoParam*);
mfxStatus plugin_close(qsv_filter_t*);
//internal functions
mfxExtBuffer* get_ext_buffer(mfxExtBuffer**, mfxU32, mfxU32);
int get_free_task(hb_list_t*);
mfxStatus process_filter(qsv_filter_task_t*,void*);
mfxStatus lock_frame(mfxFrameAllocator *,mfxFrameSurface1*);
mfxStatus unlock_frame(mfxFrameAllocator *,mfxFrameSurface1*);
#endif //QSV_FILTER_PP_H
HandBrake-0.10.2/libhb/vadxva2.c 0000664 0001752 0001752 00000057570 12463330511 016652 0 ustar handbrake handbrake /* vadxva2.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifdef USE_HWD
#include "vadxva2.h"
#include "extras/cl.h"
#include "oclnv12toyuv.h"
static int hb_va_setup( hb_va_dxva2_t *dxva2, void **hw, int width, int height );
static int hb_va_get( hb_va_dxva2_t *dxva2, AVFrame *frame );
static int hb_d3d_create_device( hb_va_dxva2_t *dxva2 );
static void hb_d3d_destroy_device( hb_va_dxva2_t *dxvva2 );
static int hb_d3d_create_device_manager( hb_va_dxva2_t *dxva2 );
static void hb_d3d_destroy_device_manager( hb_va_dxva2_t *dxva2 );
static int hb_dx_create_video_service( hb_va_dxva2_t *dxva2 );
static void hb_dx_destroy_video_service( hb_va_dxva2_t *dxva2 );
static int hb_dx_find_video_service_conversion( hb_va_dxva2_t *dxva2, GUID *input, D3DFORMAT *output );
static int hb_dx_create_video_decoder( hb_va_dxva2_t *dxva2, int codec_id, const hb_title_t* fmt );
static void hb_dx_create_video_conversion( hb_va_dxva2_t *dxva2 );
static const hb_d3d_format_t *hb_d3d_find_format( D3DFORMAT format );
static const hb_dx_mode_t *hb_dx_find_mode( const GUID *guid );
static void hb_dx_destroy_video_decoder( hb_va_dxva2_t *dxva2 );
/**
* It destroys a Direct3D device manager
*/
static void hb_d3d_destroy_device_manager( hb_va_dxva2_t *dxva2 )
{
if( dxva2->devmng )
IDirect3DDeviceManager9_Release( dxva2->devmng );
}
/**
* It releases a Direct3D device and its resources.
*/
static void hb_d3d_destroy_device( hb_va_dxva2_t *dxva2 )
{
if( dxva2->d3ddev )
IDirect3DDevice9_Release( dxva2->d3ddev );
if( dxva2->d3dobj )
IDirect3D9_Release( dxva2->d3dobj );
}
/**
* It destroys a DirectX video service
*/
static void hb_dx_destroy_video_service( hb_va_dxva2_t *dxva2 )
{
if( dxva2->device )
IDirect3DDeviceManager9_CloseDeviceHandle( dxva2->devmng, dxva2->device );
if( dxva2->vs )
IDirectXVideoDecoderService_Release( dxva2->vs );
}
static const hb_d3d_format_t *hb_d3d_find_format( D3DFORMAT format )
{
unsigned i;
for( i = 0; d3d_formats[i].name; i++ )
{
if( d3d_formats[i].format == format )
return &d3d_formats[i];
}
return NULL;
}
static void hb_dx_create_video_conversion( hb_va_dxva2_t *dxva2 )
{
switch( dxva2->render )
{
case MAKEFOURCC( 'N', 'V', '1', '2' ):
dxva2->output = MAKEFOURCC( 'Y', 'V', '1', '2' );
break;
default:
dxva2->output = dxva2->render;
break;
}
}
void hb_va_release( hb_va_dxva2_t *dxva2, AVFrame *frame )
{
LPDIRECT3DSURFACE9 d3d = (LPDIRECT3DSURFACE9)(uintptr_t)frame->data[3];
unsigned i;
for( i = 0; i < dxva2->surface_count; i++ )
{
hb_va_surface_t *surface = &dxva2->surface[i];
if( surface->d3d == d3d )
surface->refcount--;
}
}
void hb_va_close( hb_va_dxva2_t *dxva2 )
{
hb_dx_destroy_video_decoder( dxva2 );
hb_dx_destroy_video_service( dxva2 );
hb_d3d_destroy_device_manager( dxva2 );
hb_d3d_destroy_device( dxva2 );
if( dxva2->hdxva2_dll )
FreeLibrary( dxva2->hdxva2_dll );
if( dxva2->hd3d9_dll )
FreeLibrary( dxva2->hd3d9_dll );
if ( dxva2->nv12toyuv_tmp_in )
free( dxva2->nv12toyuv_tmp_in );
if ( dxva2->nv12toyuv_tmp_out )
free( dxva2->nv12toyuv_tmp_out );
dxva2->description = NULL;
free( dxva2 );
}
/**
* It creates a DXVA2 decoder using the given video format
*/
static int hb_dx_create_video_decoder( hb_va_dxva2_t *dxva2, int codec_id, const hb_title_t* fmt )
{
dxva2->width = fmt->width;
dxva2->height = fmt->height;
dxva2->surface_width = (fmt->width + 15) & ~15;
dxva2->surface_height = (fmt->height + 15) & ~15;
switch( codec_id )
{
case AV_CODEC_ID_H264:
dxva2->surface_count = 16 + 1;
break;
default:
dxva2->surface_count = 2 + 1;
break;
}
LPDIRECT3DSURFACE9 surface_list[VA_DXVA2_MAX_SURFACE_COUNT];
if( FAILED( IDirectXVideoDecoderService_CreateSurface( dxva2->vs,
dxva2->surface_width,
dxva2->surface_height,
dxva2->surface_count - 1,
dxva2->render,
D3DPOOL_DEFAULT,
0,
DXVA2_VideoDecoderRenderTarget,
surface_list, NULL )))
{
hb_log( "dxva2:IDirectXVideoAccelerationService_CreateSurface failed" );
dxva2->surface_count = 0;
return HB_WORK_ERROR;
}
unsigned i;
for( i = 0; isurface_count; i++ )
{
hb_va_surface_t *surface = &dxva2->surface[i];
surface->d3d = surface_list[i];
surface->refcount = 0;
surface->order = 0;
}
hb_log( "dxva2:CreateSurface succeed with %d, fmt (%dx%d) surfaces (%dx%d)", dxva2->surface_count,
fmt->width, fmt->height, dxva2->surface_width, dxva2->surface_height );
DXVA2_VideoDesc dsc;
memset( &dsc, 0, sizeof(dsc));
dsc.SampleWidth = fmt->width;
dsc.SampleHeight = fmt->height;
dsc.Format = dxva2->render;
if( fmt->rate> 0 && fmt->rate_base> 0 )
{
dsc.InputSampleFreq.Numerator = fmt->rate;
dsc.InputSampleFreq.Denominator = fmt->rate_base;
}
else
{
dsc.InputSampleFreq.Numerator = 0;
dsc.InputSampleFreq.Denominator = 0;
}
dsc.OutputFrameFreq = dsc.InputSampleFreq;
dsc.UABProtectionLevel = FALSE;
dsc.Reserved = 0;
/* FIXME I am unsure we can let unknown everywhere */
DXVA2_ExtendedFormat *ext = &dsc.SampleFormat;
ext->SampleFormat = 0; //DXVA2_SampleUnknown;
ext->VideoChromaSubsampling = 0; //DXVA2_VideoChromaSubsampling_Unknown;
ext->NominalRange = 0; //DXVA2_NominalRange_Unknown;
ext->VideoTransferMatrix = 0; //DXVA2_VideoTransferMatrix_Unknown;
ext->VideoLighting = 0; //DXVA2_VideoLighting_Unknown;
ext->VideoPrimaries = 0; //DXVA2_VideoPrimaries_Unknown;
ext->VideoTransferFunction = 0; //DXVA2_VideoTransFunc_Unknown;
/* List all configurations available for the decoder */
UINT cfg_count = 0;
DXVA2_ConfigPictureDecode *cfg_list = NULL;
if( FAILED( IDirectXVideoDecoderService_GetDecoderConfigurations( dxva2->vs, &dxva2->input, &dsc, NULL, &cfg_count, &cfg_list )))
{
hb_log( "dxva2:IDirectXVideoDecoderService_GetDecoderConfigurations failed" );
return HB_WORK_ERROR;
}
hb_log( "dxva2:we got %d decoder configurations", cfg_count );
/* Select the best decoder configuration */
int cfg_score = 0;
for( i = 0; i < cfg_count; i++ )
{
const DXVA2_ConfigPictureDecode *cfg = &cfg_list[i];
hb_log( "dxva2:configuration[%d] ConfigBitstreamRaw %d", i, cfg->ConfigBitstreamRaw );
int score;
if( cfg->ConfigBitstreamRaw == 1 )
score = 1;
else if( codec_id == AV_CODEC_ID_H264 && cfg->ConfigBitstreamRaw == 2 )
score = 2;
else
continue;
if( IsEqualGUID( &cfg->guidConfigBitstreamEncryption, &DXVA_NoEncrypt ))
score += 16;
if( cfg_score < score )
{
dxva2->cfg = *cfg;
cfg_score = score;
}
}
//my_release(cfg_list);
if( cfg_score <= 0 )
{
hb_log( "dxva2:Failed to find a supported decoder configuration" );
return HB_WORK_ERROR;
}
/* Create the decoder */
IDirectXVideoDecoder *decoder;
if( FAILED( IDirectXVideoDecoderService_CreateVideoDecoder( dxva2->vs, &dxva2->input, &dsc, &dxva2->cfg, surface_list, dxva2->surface_count, &decoder )))
{
hb_log( "dxva2:IDirectXVideoDecoderService_CreateVideoDecoder failed" );
return HB_WORK_ERROR;
}
dxva2->decoder = decoder;
hb_log( "dxva2:IDirectXVideoDecoderService_CreateVideoDecoder succeed" );
return HB_WORK_OK;
}
typedef HWND (WINAPI *PROCGETSHELLWND)();
/**
* It creates a DirectX video service
*/
static int hb_d3d_create_device( hb_va_dxva2_t *dxva2 )
{
LPDIRECT3D9 (WINAPI *Create9)( UINT SDKVersion );
Create9 = (void*)GetProcAddress( dxva2->hd3d9_dll, TEXT( "Direct3DCreate9" ));
if( !Create9 )
{
hb_log( "dxva2:Cannot locate reference to Direct3DCreate9 ABI in DLL" );
return HB_WORK_ERROR;
}
LPDIRECT3D9 d3dobj;
d3dobj = Create9( D3D_SDK_VERSION );
if( !d3dobj )
{
hb_log( "dxva2:Direct3DCreate9 failed" );
return HB_WORK_ERROR;
}
dxva2->d3dobj = d3dobj;
D3DADAPTER_IDENTIFIER9 *d3dai = &dxva2->d3dai;
if( FAILED( IDirect3D9_GetAdapterIdentifier( dxva2->d3dobj, D3DADAPTER_DEFAULT, 0, d3dai )))
{
hb_log( "dxva2:IDirect3D9_GetAdapterIdentifier failed" );
memset( d3dai, 0, sizeof(*d3dai));
}
PROCGETSHELLWND GetShellWindow;
HMODULE hUser32 = GetModuleHandle( "user32" );
GetShellWindow = (PROCGETSHELLWND)
GetProcAddress( hUser32, "GetShellWindow" );
D3DPRESENT_PARAMETERS *d3dpp = &dxva2->d3dpp;
memset( d3dpp, 0, sizeof(*d3dpp));
d3dpp->Flags = D3DPRESENTFLAG_VIDEO;
d3dpp->Windowed = TRUE;
d3dpp->hDeviceWindow = NULL;
d3dpp->SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dpp->MultiSampleType = D3DMULTISAMPLE_NONE;
d3dpp->PresentationInterval = D3DPRESENT_INTERVAL_DEFAULT;
d3dpp->BackBufferCount = 0; /* FIXME what to put here */
d3dpp->BackBufferFormat = D3DFMT_X8R8G8B8; /* FIXME what to put here */
d3dpp->BackBufferWidth = 0;
d3dpp->BackBufferHeight = 0;
d3dpp->EnableAutoDepthStencil = FALSE;
LPDIRECT3DDEVICE9 d3ddev;
//if (FAILED(IDirect3D9_CreateDevice(d3dobj, D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, GetShellWindow(), D3DCREATE_SOFTWARE_VERTEXPROCESSING|D3DCREATE_MULTITHREADED, d3dpp, &d3ddev)))
if( FAILED( IDirect3D9_CreateDevice( d3dobj,
D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
GetShellWindow(),
D3DCREATE_HARDWARE_VERTEXPROCESSING|D3DCREATE_MULTITHREADED,
d3dpp,
&d3ddev )))
{
hb_log( "dxva2:IDirect3D9_CreateDevice failed" );
return HB_WORK_ERROR;
}
dxva2->d3ddev = d3ddev;
return HB_WORK_OK;
}
/**
* It creates a Direct3D device manager
*/
static int hb_d3d_create_device_manager( hb_va_dxva2_t *dxva2 )
{
HRESULT(WINAPI *CreateDeviceManager9)( UINT *pResetToken, IDirect3DDeviceManager9 ** );
CreateDeviceManager9 = (void*)GetProcAddress( dxva2->hdxva2_dll, TEXT( "DXVA2CreateDirect3DDeviceManager9" ));
if( !CreateDeviceManager9 )
{
hb_log( "dxva2:cannot load function" );
return HB_WORK_ERROR;
}
UINT token;
IDirect3DDeviceManager9 *devmng;
if( FAILED( CreateDeviceManager9( &token, &devmng )))
{
hb_log( "dxva2:OurDirect3DCreateDeviceManager9 failed" );
return HB_WORK_ERROR;
}
dxva2->token = token;
dxva2->devmng = devmng;
long hr = IDirect3DDeviceManager9_ResetDevice( devmng, dxva2->d3ddev, token );
if( FAILED( hr ))
{
hb_log( "dxva2:IDirect3DDeviceManager9_ResetDevice failed: %08x", (unsigned)hr );
return HB_WORK_ERROR;
}
return HB_WORK_OK;
}
/**
* It creates a DirectX video service
*/
static int hb_dx_create_video_service( hb_va_dxva2_t *dxva2 )
{
HRESULT (WINAPI *CreateVideoService)( IDirect3DDevice9 *, REFIID riid, void **ppService );
CreateVideoService = (void*)GetProcAddress( dxva2->hdxva2_dll, TEXT( "DXVA2CreateVideoService" ));
if( !CreateVideoService )
{
hb_log( "dxva2:cannot load function" );
return HB_WORK_ERROR;
}
HRESULT hr;
HANDLE device;
hr = IDirect3DDeviceManager9_OpenDeviceHandle( dxva2->devmng, &device );
if( FAILED( hr ))
{
hb_log( "dxva2:OpenDeviceHandle failed" );
return HB_WORK_ERROR;
}
dxva2->device = device;
IDirectXVideoDecoderService *vs;
hr = IDirect3DDeviceManager9_GetVideoService( dxva2->devmng, device, &IID_IDirectXVideoDecoderService, (void*)&vs );
if( FAILED( hr ))
{
hb_log( "dxva2:GetVideoService failed" );
return HB_WORK_ERROR;
}
dxva2->vs = vs;
return HB_WORK_OK;
}
/**
* Find the best suited decoder mode GUID and render format.
*/
static int hb_dx_find_video_service_conversion( hb_va_dxva2_t *dxva2, GUID *input, D3DFORMAT *output )
{
unsigned int input_count = 0;
GUID *input_list = NULL;
if( FAILED( IDirectXVideoDecoderService_GetDecoderDeviceGuids( dxva2->vs, &input_count, &input_list )))
{
hb_log( "dxva2:IDirectXVideoDecoderService_GetDecoderDeviceGuids failed" );
return HB_WORK_ERROR;
}
unsigned i, j;
for( i = 0; i < input_count; i++ )
{
const GUID *g = &input_list[i];
const hb_dx_mode_t *mode = hb_dx_find_mode( g );
}
for( i = 0; dxva2_modes[i].name; i++ )
{
const hb_dx_mode_t *mode = &dxva2_modes[i];
if( !mode->codec || mode->codec != dxva2->codec_id )
continue;
int is_suported = 0;
const GUID *g;
for( g = &input_list[0]; !is_suported && g < &input_list[input_count]; g++ )
{
is_suported = IsEqualGUID( mode->guid, g );
}
if( !is_suported )
continue;
unsigned int output_count = 0;
D3DFORMAT *output_list = NULL;
if( FAILED( IDirectXVideoDecoderService_GetDecoderRenderTargets( dxva2->vs, mode->guid, &output_count, &output_list )))
{
hb_log( "dxva2:IDirectXVideoDecoderService_GetDecoderRenderTargets failed" );
continue;
}
for( j = 0; j < output_count; j++ )
{
const D3DFORMAT f = output_list[j];
const hb_d3d_format_t *format = hb_d3d_find_format( f );
if( format )
{
//hb_log( "dxva2:%s is supported for output", format->name );
}
else
{
hb_log( "dxvar2:%d is supported for output (%4.4s)", f, (const char*)&f );
}
}
for( j = 0; d3d_formats[j].name; j++ )
{
const hb_d3d_format_t *format = &d3d_formats[j];
int is_suported = 0;
unsigned k;
for( k = 0; !is_suported && k < output_count; k++ )
{
is_suported = format->format == output_list[k];
}
if( !is_suported )
continue;
*input = *mode->guid;
*output = format->format;
return HB_WORK_OK;
}
}
return HB_WORK_ERROR;
}
static const hb_dx_mode_t *hb_dx_find_mode( const GUID *guid )
{
unsigned i;
for( i = 0; dxva2_modes[i].name; i++ )
{
if( IsEqualGUID( dxva2_modes[i].guid, guid ))
return &dxva2_modes[i];
}
return NULL;
}
static void hb_dx_destroy_video_decoder( hb_va_dxva2_t *dxva2 )
{
if( dxva2->decoder )
IDirectXVideoDecoder_Release( dxva2->decoder );
dxva2->decoder = NULL;
unsigned i;
for( i = 0; isurface_count; i++ )
IDirect3DSurface9_Release( dxva2->surface[i].d3d );
dxva2->surface_count = 0;
}
/**
* setup dxva2
*/
static int hb_va_setup( hb_va_dxva2_t *dxva2, void **hw, int width, int height )
{
if( dxva2->width == width && dxva2->height == height && dxva2->decoder )
goto ok;
hb_dx_destroy_video_decoder( dxva2 );
*hw = NULL;
dxva2->i_chroma = 0;
if( width <= 0 || height <= 0 )
return HB_WORK_ERROR;
hb_title_t fmt;
memset( &fmt, 0, sizeof(fmt));
fmt.width = width;
fmt.height = height;
if( hb_dx_create_video_decoder( dxva2, dxva2->codec_id, &fmt ) == HB_WORK_ERROR )
return HB_WORK_ERROR;
dxva2->hw.decoder = dxva2->decoder;
dxva2->hw.cfg = &dxva2->cfg;
dxva2->hw.surface_count = dxva2->surface_count;
dxva2->hw.surface = dxva2->hw_surface;
unsigned i;
for( i = 0; i < dxva2->surface_count; i++ )
dxva2->hw.surface[i] = dxva2->surface[i].d3d;
hb_dx_create_video_conversion( dxva2 );
ok:
*hw = &dxva2->hw;
const hb_d3d_format_t *output = hb_d3d_find_format( dxva2->output );
dxva2->i_chroma = output->codec;
return HB_WORK_OK;
}
static int hb_va_get( hb_va_dxva2_t *dxva2, AVFrame *frame )
{
unsigned i, old;
for( i = 0, old = 0; i < dxva2->surface_count; i++ )
{
hb_va_surface_t *surface = &dxva2->surface[i];
if( !surface->refcount )
break;
if( surface->order < dxva2->surface[old].order )
old = i;
}
if( i >= dxva2->surface_count )
i = old;
hb_va_surface_t *surface = &dxva2->surface[i];
surface->refcount = 1;
surface->order = dxva2->surface_order++;
for( i = 0; i < 4; i++ )
{
frame->data[i] = NULL;
frame->linesize[i] = 0;
if( i == 0 || i == 3 )
frame->data[i] = (void*)surface->d3d;
}
return HB_WORK_OK;
}
/**
* nv12 to yuv of c reference
*/
static void hb_copy_from_nv12( uint8_t *dst, uint8_t *src[2], size_t src_pitch[2], unsigned width, unsigned height )
{
unsigned int i, j;
uint8_t *dstU, *dstV;
dstU = dst + width*height;
dstV = dstU + width*height/4;
unsigned int heithtUV, widthUV;
heithtUV = height/2;
widthUV = width/2;
for( i = 0; i < height; i++ ) //Y
{
memcpy( dst + i * width, src[0] + i * src_pitch[0], width );
}
for( i = 0; i < heithtUV; i++ )
{
for( j = 0; j < widthUV; j++ )
{
dstU[i * widthUV + j] = *(src[1] + i * src_pitch[1] + 2 * j);
dstV[i *widthUV + j] = *(src[1] + i * src_pitch[1] + 2 * j + 1);
}
}
}
/**
* lock frame data form surface.
* nv12 to yuv with opencl and with C reference
* scale with opencl
*/
int hb_va_extract( hb_va_dxva2_t *dxva2, uint8_t *dst, AVFrame *frame, int job_w, int job_h, int *crop, hb_oclscale_t *os, int use_opencl, int use_decomb, int use_detelecine )
{
LPDIRECT3DSURFACE9 d3d = (LPDIRECT3DSURFACE9)(uintptr_t)frame->data[3];
D3DLOCKED_RECT lock;
if( FAILED( IDirect3DSurface9_LockRect( d3d, &lock, NULL, D3DLOCK_READONLY )))
{
hb_log( "dxva2:Failed to lock surface" );
return HB_WORK_ERROR;
}
if( dxva2->render == MAKEFOURCC( 'N', 'V', '1', '2' ))
{
uint8_t *plane[2] =
{
lock.pBits,
(uint8_t*)lock.pBits + lock.Pitch * dxva2->surface_height
};
size_t pitch[2] =
{
lock.Pitch,
lock.Pitch,
};
hb_copy_from_nv12( dst, plane, pitch, dxva2->width, dxva2->height );
}
IDirect3DSurface9_UnlockRect( d3d );
return HB_WORK_OK;
}
/**
* create dxva2 service
* load library D3D9.dll
*/
hb_va_dxva2_t * hb_va_create_dxva2( hb_va_dxva2_t *dxva2, int codec_id )
{
if( dxva2 )
{
hb_va_close( dxva2 );
dxva2 = NULL;
}
hb_va_dxva2_t *dxva = calloc( 1, sizeof(*dxva) );
if( !dxva ) return NULL;
dxva->codec_id = codec_id;
dxva->hd3d9_dll = LoadLibrary( TEXT( "D3D9.DLL" ) );
if( !dxva->hd3d9_dll )
{
hb_log( "dxva2:cannot load d3d9.dll" );
goto error;
}
dxva->hdxva2_dll = LoadLibrary( TEXT( "DXVA2.DLL" ) );
if( !dxva->hdxva2_dll )
{
hb_log( "dxva2:cannot load DXVA2.dll" );
goto error;
}
if( hb_d3d_create_device( dxva ) == HB_WORK_ERROR )
{
hb_log( "dxva2:Failed to create Direct3D device" );
goto error;
}
if( hb_d3d_create_device_manager( dxva ) == HB_WORK_ERROR )
{
hb_log( "dxva2:D3dCreateDeviceManager failed" );
goto error;
}
if( hb_dx_create_video_service( dxva ) == HB_WORK_ERROR )
{
hb_log( "dxva2:DxCreateVideoService failed" );
goto error;
}
if( hb_dx_find_video_service_conversion( dxva, &dxva->input, &dxva->render ) == HB_WORK_ERROR )
{
hb_log( "dxva2:DxFindVideoServiceConversion failed" );
goto error;
}
dxva->do_job = HB_WORK_OK;
dxva->description = "DXVA2";
return dxva;
error:
hb_va_close( dxva );
return NULL;
}
void hb_va_new_dxva2( hb_va_dxva2_t *dxva2, AVCodecContext *p_context )
{
if( p_context->width > 0 && p_context->height > 0 )
{
if( hb_va_setup( dxva2, &p_context->hwaccel_context, p_context->width, p_context->height ) == HB_WORK_ERROR )
{
hb_log( "dxva2:hb_va_Setup failed" );
hb_va_close( dxva2 );
dxva2 = NULL;
}
}
if( dxva2 )
{
dxva2->input_pts[0] = 0;
dxva2->input_pts[1] = 0;
if( dxva2->description )
hb_log( "dxva2:Using %s for hardware decoding", dxva2->description );
p_context->draw_horiz_band = NULL;
}
}
char* hb_get_pix_fmt_name( int pix_fmt )
{
static const char *ppsz_name[AV_PIX_FMT_NB] =
{
[AV_PIX_FMT_VDPAU_H264] = "AV_PIX_FMT_VDPAU_H264",
[AV_PIX_FMT_VAAPI_IDCT] = "AV_PIX_FMT_VAAPI_IDCT",
[AV_PIX_FMT_VAAPI_VLD] = "AV_PIX_FMT_VAAPI_VLD",
[AV_PIX_FMT_VAAPI_MOCO] = "AV_PIX_FMT_VAAPI_MOCO",
[AV_PIX_FMT_DXVA2_VLD] = "AV_PIX_FMT_DXVA2_VLD",
[AV_PIX_FMT_YUYV422] = "AV_PIX_FMT_YUYV422",
[AV_PIX_FMT_YUV420P] = "AV_PIX_FMT_YUV420P",
};
return ppsz_name[pix_fmt];
}
enum PixelFormat hb_ffmpeg_get_format( AVCodecContext *p_context, const enum PixelFormat *pi_fmt )
{
int i;
for( i = 0; pi_fmt[i] != AV_PIX_FMT_NONE; i++ )
{
hb_log( "dxva2:Available decoder output format %d (%s)", pi_fmt[i], hb_get_pix_fmt_name(pi_fmt[i]) ? : "Unknown" );
if( pi_fmt[i] == AV_PIX_FMT_DXVA2_VLD )
{
return pi_fmt[i];
}
}
return avcodec_default_get_format( p_context, pi_fmt );
}
int hb_va_get_frame_buf( hb_va_dxva2_t *dxva2, AVCodecContext *p_context, AVFrame *frame )
{
frame->type = FF_BUFFER_TYPE_USER;
if( hb_va_get( dxva2, frame ) == HB_WORK_ERROR )
{
hb_log( "VaGrabSurface failed" );
return HB_WORK_ERROR;
}
return HB_WORK_OK;
}
int hb_check_hwd_fmt( int fmt )
{
int result = 1;
switch ( fmt )
{
case AV_PIX_FMT_YUV420P16LE:
case AV_PIX_FMT_YUV420P16BE:
case AV_PIX_FMT_YUV422P16LE:
case AV_PIX_FMT_YUV422P16BE:
case AV_PIX_FMT_YUV444P16LE:
case AV_PIX_FMT_YUV444P16BE:
case AV_PIX_FMT_YUV420P9BE:
case AV_PIX_FMT_YUV420P9LE:
case AV_PIX_FMT_YUV420P10BE:
case AV_PIX_FMT_YUV420P10LE:
case AV_PIX_FMT_YUV422P10BE:
case AV_PIX_FMT_YUV422P10LE:
case AV_PIX_FMT_YUV444P9BE:
case AV_PIX_FMT_YUV444P9LE:
case AV_PIX_FMT_YUV444P10BE:
case AV_PIX_FMT_YUV444P10LE:
case AV_PIX_FMT_YUV422P9BE:
case AV_PIX_FMT_YUV422P9LE:
case AV_PIX_FMT_GBRP9BE:
case AV_PIX_FMT_GBRP9LE:
case AV_PIX_FMT_GBRP10BE:
case AV_PIX_FMT_GBRP10LE:
case AV_PIX_FMT_GBRP16BE:
case AV_PIX_FMT_GBRP16LE:
case AV_PIX_FMT_YUVA420P9BE:
case AV_PIX_FMT_YUVA420P9LE:
case AV_PIX_FMT_YUVA422P9BE:
case AV_PIX_FMT_YUVA422P9LE:
case AV_PIX_FMT_YUVA444P9BE:
case AV_PIX_FMT_YUVA444P9LE:
case AV_PIX_FMT_YUVA420P10BE:
case AV_PIX_FMT_YUVA420P10LE:
result = 0;
}
return result;
}
#endif // USE_HWD
HandBrake-0.10.2/libhb/encavcodec.c 0000664 0001752 0001752 00000046036 12463330511 017364 0 ustar handbrake handbrake /* encavcodec.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hb_dict.h"
#include "hbffmpeg.h"
/*
* The frame info struct remembers information about each frame across calls
* to avcodec_encode_video. Since frames are uniquely identified by their
* frame number, we use this as an index.
*
* The size of the array is chosen so that two frames can't use the same
* slot during the encoder's max frame delay (set by the standard as 16
* frames) and so that, up to some minimum frame rate, frames are guaranteed
* to map to * different slots.
*/
#define FRAME_INFO_SIZE 32
#define FRAME_INFO_MASK (FRAME_INFO_SIZE - 1)
struct hb_work_private_s
{
hb_job_t * job;
AVCodecContext * context;
FILE * file;
int frameno_in;
int frameno_out;
hb_buffer_t * delay_head;
hb_buffer_t * delay_tail;
int64_t dts_delay;
struct {
int64_t start;
int64_t duration;
} frame_info[FRAME_INFO_SIZE];
};
int encavcodecInit( hb_work_object_t *, hb_job_t * );
int encavcodecWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
void encavcodecClose( hb_work_object_t * );
hb_work_object_t hb_encavcodec =
{
WORK_ENCAVCODEC,
"FFMPEG encoder (libavcodec)",
encavcodecInit,
encavcodecWork,
encavcodecClose
};
int encavcodecInit( hb_work_object_t * w, hb_job_t * job )
{
AVCodec * codec;
AVCodecContext * context;
AVRational fps;
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
switch ( w->codec_param )
{
case AV_CODEC_ID_MPEG4:
{
hb_log("encavcodecInit: MPEG-4 ASP encoder");
} break;
case AV_CODEC_ID_MPEG2VIDEO:
{
hb_log("encavcodecInit: MPEG-2 encoder");
} break;
case AV_CODEC_ID_VP8:
{
hb_log("encavcodecInit: VP8 encoder");
} break;
default:
{
hb_error("encavcodecInit: unsupported encoder!");
return 1;
}
}
codec = avcodec_find_encoder( w->codec_param );
if( !codec )
{
hb_log( "encavcodecInit: avcodec_find_encoder "
"failed" );
return 1;
}
context = avcodec_alloc_context3( codec );
// Set things in context that we will allow the user to
// override with advanced settings.
if( job->pass == 2 )
{
hb_interjob_t * interjob = hb_interjob_get( job->h );
fps.den = interjob->vrate_base;
fps.num = interjob->vrate;
}
else
{
fps.den = job->vrate_base;
fps.num = job->vrate;
}
// If the fps.num is 27000000, there's a good chance this is
// a standard rate that we have in our hb_video_rates table.
// Because of rounding errors and approximations made while
// measuring framerate, the actual value may not be exact. So
// we look for rates that are "close" and make an adjustment
// to fps.den.
if (fps.num == 27000000)
{
const hb_rate_t *video_framerate = NULL;
while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
{
if (abs(fps.den - video_framerate->rate) < 10)
{
fps.den = video_framerate->rate;
break;
}
}
}
hb_reduce(&fps.den, &fps.num, fps.den, fps.num);
// Check that the framerate is supported. If not, pick the closest.
// The mpeg2 codec only supports a specific list of frame rates.
if (codec->supported_framerates)
{
AVRational supported_fps;
supported_fps = codec->supported_framerates[av_find_nearest_q_idx(fps, codec->supported_framerates)];
if (supported_fps.num != fps.num || supported_fps.den != fps.den)
{
hb_log( "encavcodec: framerate %d / %d is not supported. Using %d / %d.",
fps.num, fps.den, supported_fps.num, supported_fps.den );
fps = supported_fps;
}
}
else if ((fps.num & ~0xFFFF) || (fps.den & ~0xFFFF))
{
// This may only be required for mpeg4 video. But since
// our only supported options are mpeg2 and mpeg4, there is
// no need to check codec type.
hb_log( "encavcodec: truncating framerate %d / %d",
fps.num, fps.den );
while ((fps.num & ~0xFFFF) || (fps.den & ~0xFFFF))
{
fps.num >>= 1;
fps.den >>= 1;
}
}
context->time_base.den = fps.num;
context->time_base.num = fps.den;
context->gop_size = 10 * (int)( (double)job->vrate / (double)job->vrate_base + 0.5 );
/* place job->encoder_options in an hb_dict_t for convenience */
hb_dict_t * lavc_opts = NULL;
if (job->encoder_options != NULL && *job->encoder_options)
{
lavc_opts = hb_encopts_to_dict(job->encoder_options, job->vcodec);
}
/* iterate through lavc_opts and have avutil parse the options for us */
AVDictionary * av_opts = NULL;
hb_dict_entry_t * entry = NULL;
while( ( entry = hb_dict_next( lavc_opts, entry ) ) )
{
/* Here's where the strings are passed to avutil for parsing. */
av_dict_set( &av_opts, entry->key, entry->value, 0 );
}
hb_dict_free( &lavc_opts );
// Now set the things in context that we don't want to allow
// the user to override.
if( job->vquality < 0.0 )
{
/* Average bitrate */
context->bit_rate = 1000 * job->vbitrate;
// ffmpeg's mpeg2 encoder requires that the bit_rate_tolerance be >=
// bitrate * fps
context->bit_rate_tolerance = context->bit_rate * av_q2d(fps) + 1;
}
else
{
/* Constant quantizer */
// These settings produce better image quality than
// what was previously used
context->flags |= CODEC_FLAG_QSCALE;
context->global_quality = FF_QP2LAMBDA * job->vquality + 0.5;
//Set constant quality for libvpx
if ( w->codec_param == AV_CODEC_ID_VP8 )
{
char quality[7];
snprintf(quality, 7, "%.2f", job->vquality);
av_dict_set( &av_opts, "crf", quality, 0 );
//Setting the deadline to good and cpu-used to 0
//causes the encoder to balance video quality and
//encode time, with a bias to video quality.
av_dict_set( &av_opts, "deadline", "good", 0);
av_dict_set( &av_opts, "cpu-used", "0", 0);
//This value was chosen to make the bitrate high enough
//for libvpx to "turn off" the maximum bitrate feature
//that is normally applied to constant quality.
context->bit_rate = job->width*job->height*( (double)fps.num / (double)fps.den );
hb_log( "encavcodec: encoding at CQ %.2f", job->vquality );
}
else
{
hb_log( "encavcodec: encoding at constant quantizer %d",
context->global_quality );
}
}
context->width = job->width;
context->height = job->height;
context->pix_fmt = AV_PIX_FMT_YUV420P;
if( job->anamorphic.mode )
{
context->sample_aspect_ratio.num = job->anamorphic.par_width;
context->sample_aspect_ratio.den = job->anamorphic.par_height;
hb_log( "encavcodec: encoding with stored aspect %d/%d",
job->anamorphic.par_width, job->anamorphic.par_height );
}
if( job->mux & HB_MUX_MASK_MP4 )
{
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
if( job->grayscale )
{
context->flags |= CODEC_FLAG_GRAY;
}
if( job->pass != 0 && job->pass != -1 )
{
char filename[1024]; memset( filename, 0, 1024 );
hb_get_tempory_filename( job->h, filename, "ffmpeg.log" );
if( job->pass == 1 )
{
pv->file = hb_fopen(filename, "wb");
context->flags |= CODEC_FLAG_PASS1;
}
else
{
int size;
char * log;
pv->file = hb_fopen(filename, "rb");
fseek( pv->file, 0, SEEK_END );
size = ftell( pv->file );
fseek( pv->file, 0, SEEK_SET );
log = malloc( size + 1 );
log[size] = '\0';
fread( log, size, 1, pv->file );
fclose( pv->file );
pv->file = NULL;
context->flags |= CODEC_FLAG_PASS2;
context->stats_in = log;
}
}
if (hb_avcodec_open(context, codec, &av_opts, HB_FFMPEG_THREADS_AUTO))
{
hb_log( "encavcodecInit: avcodec_open failed" );
}
// avcodec_open populates the opts dictionary with the
// things it didn't recognize.
AVDictionaryEntry *t = NULL;
while( ( t = av_dict_get( av_opts, "", t, AV_DICT_IGNORE_SUFFIX ) ) )
{
hb_log( "encavcodecInit: Unknown avcodec option %s", t->key );
}
av_dict_free( &av_opts );
pv->context = context;
job->areBframes = 0;
if ( context->has_b_frames )
{
job->areBframes = 1;
}
if( ( job->mux & HB_MUX_MASK_MP4 ) && job->pass != 1 )
{
w->config->mpeg4.length = context->extradata_size;
memcpy( w->config->mpeg4.bytes, context->extradata,
context->extradata_size );
}
return 0;
}
/***********************************************************************
* Close
***********************************************************************
*
**********************************************************************/
void encavcodecClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
if( pv->context && pv->context->codec )
{
hb_deep_log( 2, "encavcodec: closing libavcodec" );
avcodec_flush_buffers( pv->context );
hb_avcodec_close( pv->context );
av_free( pv->context );
}
if( pv->file )
{
fclose( pv->file );
}
free( pv );
w->private_data = NULL;
}
/*
* see comments in definition of 'frame_info' in pv struct for description
* of what these routines are doing.
*/
static void save_frame_info( hb_work_private_t * pv, hb_buffer_t * in )
{
int i = pv->frameno_in & FRAME_INFO_MASK;
pv->frame_info[i].start = in->s.start;
pv->frame_info[i].duration = in->s.stop - in->s.start;
}
static int64_t get_frame_start( hb_work_private_t * pv, int64_t frameno )
{
int i = frameno & FRAME_INFO_MASK;
return pv->frame_info[i].start;
}
static int64_t get_frame_duration( hb_work_private_t * pv, int64_t frameno )
{
int i = frameno & FRAME_INFO_MASK;
return pv->frame_info[i].duration;
}
static void compute_dts_offset( hb_work_private_t * pv, hb_buffer_t * buf )
{
if ( pv->job->areBframes )
{
if ( ( pv->frameno_in ) == pv->job->areBframes )
{
pv->dts_delay = buf->s.start;
pv->job->config.h264.init_delay = pv->dts_delay;
}
}
}
static uint8_t convert_pict_type( int pict_type, char pkt_flag_key, uint16_t* sflags )
{
uint8_t retval = 0;
switch ( pict_type )
{
case AV_PICTURE_TYPE_P:
{
retval = HB_FRAME_P;
} break;
case AV_PICTURE_TYPE_B:
{
retval = HB_FRAME_B;
} break;
case AV_PICTURE_TYPE_S:
{
retval = HB_FRAME_P;
} break;
case AV_PICTURE_TYPE_SP:
{
retval = HB_FRAME_P;
} break;
case AV_PICTURE_TYPE_BI:
case AV_PICTURE_TYPE_SI:
case AV_PICTURE_TYPE_I:
{
*sflags |= HB_FRAME_REF;
if ( pkt_flag_key )
{
retval = HB_FRAME_IDR;
}
else
{
retval = HB_FRAME_I;
}
} break;
default:
{
if ( pkt_flag_key )
{
//buf->s.flags |= HB_FRAME_REF;
*sflags |= HB_FRAME_REF;
retval = HB_FRAME_KEY;
}
else
{
retval = HB_FRAME_REF;
}
} break;
}
return retval;
}
// Generate DTS by rearranging PTS in this sequence:
// pts0 - delay, pts1 - delay, pts2 - delay, pts1, pts2, pts3...
//
// Where pts0 - ptsN are in decoded monotonically increasing presentation
// order and delay == pts1 (1 being the number of frames the decoder must
// delay before it has suffecient information to decode). The number of
// frames to delay is set by job->areBframes, so it is configurable.
// This guarantees that DTS <= PTS for any frame.
//
// This is similar to how x264 generates DTS
static hb_buffer_t * process_delay_list( hb_work_private_t * pv, hb_buffer_t * buf )
{
if ( pv->job->areBframes )
{
// Has dts_delay been set yet?
if ( pv->frameno_in <= pv->job->areBframes )
{
// dts_delay not yet set. queue up buffers till it is set.
if ( pv->delay_tail == NULL )
{
pv->delay_head = pv->delay_tail = buf;
}
else
{
pv->delay_tail->next = buf;
pv->delay_tail = buf;
}
return NULL;
}
// We have dts_delay. Apply it to any queued buffers renderOffset
// and return all queued buffers.
if ( pv->delay_tail == NULL && buf != NULL )
{
// Use the cached frame info to get the start time of Nth frame
// Note that start Nth frame != start time this buffer since the
// output buffers have rearranged start times.
if (pv->frameno_out < pv->job->areBframes)
{
int64_t start = get_frame_start( pv, pv->frameno_out );
buf->s.renderOffset = start - pv->dts_delay;
}
else
{
buf->s.renderOffset =
get_frame_start(pv, pv->frameno_out - pv->job->areBframes);
}
pv->frameno_out++;
return buf;
}
else
{
pv->delay_tail->next = buf;
buf = pv->delay_head;
while ( buf )
{
// Use the cached frame info to get the start time of Nth frame
// Note that start Nth frame != start time this buffer since the
// output buffers have rearranged start times.
if (pv->frameno_out < pv->job->areBframes)
{
int64_t start = get_frame_start( pv, pv->frameno_out );
buf->s.renderOffset = start - pv->dts_delay;
}
else
{
buf->s.renderOffset = get_frame_start(pv,
pv->frameno_out - pv->job->areBframes);
}
buf = buf->next;
pv->frameno_out++;
}
buf = pv->delay_head;
pv->delay_head = pv->delay_tail = NULL;
return buf;
}
}
else if ( buf )
{
buf->s.renderOffset = buf->s.start;
return buf;
}
return NULL;
}
/***********************************************************************
* Work
***********************************************************************
*
**********************************************************************/
int encavcodecWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
AVFrame * frame;
hb_buffer_t * in = *buf_in, * buf;
char final_flushing_call = (in->size <= 0);
if ( final_flushing_call )
{
//make a flushing call to encode for codecs that can encode out of order
/* EOF on input - send it downstream & say we're done */
*buf_in = NULL;
frame = NULL;
}
else
{
frame = av_frame_alloc();
frame->data[0] = in->plane[0].data;
frame->data[1] = in->plane[1].data;
frame->data[2] = in->plane[2].data;
frame->linesize[0] = in->plane[0].stride;
frame->linesize[1] = in->plane[1].stride;
frame->linesize[2] = in->plane[2].stride;
// For constant quality, setting the quality in AVCodecContext
// doesn't do the trick. It must be set in the AVFrame.
frame->quality = pv->context->global_quality;
// Remember info about this frame that we need to pass across
// the avcodec_encode_video call (since it reorders frames).
save_frame_info( pv, in );
compute_dts_offset( pv, in );
// Bizarro ffmpeg appears to require the input AVFrame.pts to be
// set to a frame number. Setting it to an actual pts causes
// jerky video.
// frame->pts = in->s.start;
frame->pts = pv->frameno_in++;
}
if ( pv->context->codec )
{
int ret;
AVPacket pkt;
int got_packet;
char still_flushing = final_flushing_call;
hb_buffer_t* buf_head = NULL;
hb_buffer_t* buf_last = NULL;
do
{
av_init_packet(&pkt);
/* Should be way too large */
buf = hb_video_buffer_init( job->width, job->height );
pkt.data = buf->data;
pkt.size = buf->alloc;
ret = avcodec_encode_video2( pv->context, &pkt, frame, &got_packet );
if ( ret < 0 || pkt.size <= 0 || !got_packet )
{
hb_buffer_close( &buf );
still_flushing = 0;
}
else
{
int64_t frameno = pkt.pts;
buf->size = pkt.size;
buf->s.start = get_frame_start( pv, frameno );
buf->s.duration = get_frame_duration( pv, frameno );
buf->s.stop = buf->s.stop + buf->s.duration;
buf->s.flags &= ~HB_FRAME_REF;
buf->s.frametype = convert_pict_type( pv->context->coded_frame->pict_type, pkt.flags & AV_PKT_FLAG_KEY, &buf->s.flags );
buf = process_delay_list( pv, buf );
if (buf_head == NULL)
{
buf_head = buf;
}
else
{
buf_last->next = buf;
}
buf_last = buf;
}
/* Write stats */
if (job->pass == 1 && pv->context->stats_out != NULL)
{
fprintf( pv->file, "%s", pv->context->stats_out );
}
} while (still_flushing);
if (buf_last != NULL && final_flushing_call)
{
buf_last->next = in;
buf = buf_head;
}
else if (final_flushing_call)
{
buf = in;
}
}
else
{
buf = NULL;
hb_error( "encavcodec: codec context has uninitialized codec; skipping frame" );
}
av_frame_free( &frame );
*buf_out = buf;
return final_flushing_call? HB_WORK_DONE : HB_WORK_OK;
}
HandBrake-0.10.2/libhb/detelecine.c 0000664 0001752 0001752 00000063157 12463330511 017376 0 ustar handbrake handbrake /* detelecine.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
/*
*
* PULLUP DEFINITIONS
*
*/
#define PULLUP_FMT_Y 1
#define PULLUP_HAVE_BREAKS 1
#define PULLUP_HAVE_AFFINITY 2
#define PULLUP_BREAK_LEFT 1
#define PULLUP_BREAK_RIGHT 2
#define PULLUP_ABS( a ) (((a)^((a)>>31))-((a)>>31))
#ifndef PIC_FLAG_REPEAT_FIRST_FIELD
#define PIC_FLAG_REPEAT_FIRST_FIELD 256
#endif
struct pullup_buffer
{
int lock[2];
unsigned char **planes;
int *size;
};
struct pullup_field
{
int parity;
struct pullup_buffer *buffer;
unsigned int flags;
int breaks;
int affinity;
int *diffs;
int *comb;
int *var;
struct pullup_field *prev, *next;
};
struct pullup_frame
{
int lock;
int length;
int parity;
struct pullup_buffer **ifields, *ofields[2];
struct pullup_buffer *buffer;
};
struct pullup_context
{
/* Public interface */
int format;
int nplanes;
int *bpp, *w, *h, *stride, *background;
unsigned int cpu;
int junk_left, junk_right, junk_top, junk_bottom;
int verbose;
int metric_plane;
int strict_breaks;
int strict_pairs;
int parity;
/* Internal data */
struct pullup_field *first, *last, *head;
struct pullup_buffer *buffers;
int nbuffers;
int (*diff)(unsigned char *, unsigned char *, int);
int (*comb)(unsigned char *, unsigned char *, int);
int (*var)(unsigned char *, unsigned char *, int);
int metric_w, metric_h, metric_len, metric_offset;
struct pullup_frame *frame;
};
/*
*
* DETELECINE FILTER DEFINITIONS
*
*/
struct hb_filter_private_s
{
struct pullup_context * pullup_ctx;
int pullup_fakecount;
int pullup_skipflag;
};
static int hb_detelecine_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_detelecine_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_detelecine_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_detelecine =
{
.id = HB_FILTER_DETELECINE,
.enforce_order = 1,
.name = "Detelecine (pullup)",
.settings = NULL,
.init = hb_detelecine_init,
.work = hb_detelecine_work,
.close = hb_detelecine_close,
};
/*
*
* PULLUP STATIC FUNCTIONS
*
*/
static int pullup_diff_y( unsigned char *a, unsigned char * b, int s )
{
int i, j, diff = 0;
for( i = 4; i; i-- )
{
for( j = 0; j < 8; j++ )
{
diff += PULLUP_ABS( a[j]-b[j] );
}
a+=s; b+=s;
}
return diff;
}
static int pullup_licomb_y( unsigned char * a, unsigned char * b, int s )
{
int i, j, diff = 0;
for( i = 4; i; i-- )
{
for( j = 0; j < 8; j++ )
{
diff += PULLUP_ABS( (a[j]<<1) - b[j-s] - b[j] )
+ PULLUP_ABS( (b[j]<<1) - a[j] - a[j+s] );
}
a+=s; b+=s;
}
return diff;
}
static int pullup_var_y( unsigned char * a, unsigned char * b, int s )
{
int i, j, var = 0;
for( i = 3; i; i-- )
{
for( j = 0; j < 8; j++ )
{
var += PULLUP_ABS( a[j]-a[j+s] );
}
a+=s; b+=s;
}
return 4*var;
}
static void pullup_alloc_metrics( struct pullup_context * c,
struct pullup_field * f )
{
f->diffs = calloc( c->metric_len, sizeof(int) );
f->comb = calloc( c->metric_len, sizeof(int) );
f->var = calloc( c->metric_len, sizeof(int) );
}
static void pullup_compute_metric( struct pullup_context * c,
struct pullup_field * fa, int pa,
struct pullup_field * fb, int pb,
int (* func)( unsigned char *,
unsigned char *, int),
int * dest )
{
unsigned char *a, *b;
int x, y;
int mp = c->metric_plane;
int xstep = c->bpp[mp];
int ystep = c->stride[mp]<<3;
int s = c->stride[mp]<<1; /* field stride */
int w = c->metric_w*xstep;
if( !fa->buffer || !fb->buffer ) return;
/* Shortcut for duplicate fields (e.g. from RFF flag) */
if( fa->buffer == fb->buffer && pa == pb )
{
memset( dest, 0, c->metric_len * sizeof(int) );
return;
}
a = fa->buffer->planes[mp] + pa * c->stride[mp] + c->metric_offset;
b = fb->buffer->planes[mp] + pb * c->stride[mp] + c->metric_offset;
for( y = c->metric_h; y; y-- )
{
for( x = 0; x < w; x += xstep )
{
*dest++ = func( a + x, b + x, s );
}
a += ystep; b += ystep;
}
}
static struct pullup_field * pullup_make_field_queue( struct pullup_context * c,
int len )
{
struct pullup_field * head, * f;
f = head = calloc( 1, sizeof(struct pullup_field) );
pullup_alloc_metrics( c, f );
for ( ; len > 0; len-- )
{
f->next = calloc( 1, sizeof(struct pullup_field) );
f->next->prev = f;
f = f->next;
pullup_alloc_metrics( c, f );
}
f->next = head;
head->prev = f;
return head;
}
static void pullup_check_field_queue( struct pullup_context * c )
{
if( c->head->next == c->first )
{
struct pullup_field *f = calloc( 1, sizeof(struct pullup_field) );
pullup_alloc_metrics( c, f );
f->prev = c->head;
f->next = c->first;
c->head->next = f;
c->first->prev = f;
}
}
static void pullup_copy_field( struct pullup_context * c,
struct pullup_buffer * dest,
struct pullup_buffer * src,
int parity )
{
int i, j;
unsigned char *d, *s;
for( i = 0; i < c->nplanes; i++ )
{
s = src->planes[i] + parity*c->stride[i];
d = dest->planes[i] + parity*c->stride[i];
for( j = c->h[i]>>1; j; j-- )
{
memcpy( d, s, c->stride[i] );
s += c->stride[i]<<1;
d += c->stride[i]<<1;
}
}
}
static int pullup_queue_length( struct pullup_field * begin,
struct pullup_field * end )
{
int count = 1;
struct pullup_field * f;
if( !begin || !end ) return 0;
for( f = begin; f != end; f = f->next ) count++;
return count;
}
static int pullup_find_first_break( struct pullup_field * f, int max )
{
int i;
for( i = 0; i < max; i++ )
{
if( f->breaks & PULLUP_BREAK_RIGHT ||
f->next->breaks & PULLUP_BREAK_LEFT )
{
return i+1;
}
f = f->next;
}
return 0;
}
static void pullup_compute_breaks( struct pullup_context * c,
struct pullup_field * f0 )
{
int i;
struct pullup_field *f1 = f0->next;
struct pullup_field *f2 = f1->next;
struct pullup_field *f3 = f2->next;
int l, max_l=0, max_r=0;
if( f0->flags & PULLUP_HAVE_BREAKS ) return;
f0->flags |= PULLUP_HAVE_BREAKS;
/* Special case when fields are 100% identical */
if( f0->buffer == f2->buffer && f1->buffer != f3->buffer )
{
f2->breaks |= PULLUP_BREAK_RIGHT;
return;
}
if( f0->buffer != f2->buffer && f1->buffer == f3->buffer )
{
f1->breaks |= PULLUP_BREAK_LEFT;
return;
}
for( i = 0; i < c->metric_len; i++ )
{
l = f2->diffs[i] - f3->diffs[i];
if( l > max_l) max_l = l;
if( -l > max_r) max_r = -l;
}
/* Don't get tripped up when differences are mostly quant error */
if( max_l + max_r < 128 ) return;
if( max_l > 4*max_r ) f1->breaks |= PULLUP_BREAK_LEFT;
if( max_r > 4*max_l ) f2->breaks |= PULLUP_BREAK_RIGHT;
}
static void pullup_compute_affinity( struct pullup_context * c,
struct pullup_field * f )
{
int i;
int max_l = 0, max_r = 0, l;
if( f->flags & PULLUP_HAVE_AFFINITY )
{
return;
}
f->flags |= PULLUP_HAVE_AFFINITY;
if( f->buffer == f->next->next->buffer )
{
f->affinity = 1;
f->next->affinity = 0;
f->next->next->affinity = -1;
f->next->flags |= PULLUP_HAVE_AFFINITY;
f->next->next->flags |= PULLUP_HAVE_AFFINITY;
return;
}
for( i = 0; i < c->metric_len; i++ )
{
int lv = f->prev->var[i];
int rv = f->next->var[i];
int v = f->var[i];
int lc = f->comb[i] - (v+lv) + PULLUP_ABS( v-lv );
int rc = f->next->comb[i] - (v+rv) + PULLUP_ABS( v-rv );
lc = (lc > 0) ? lc : 0;
rc = (rc > 0) ? rc : 0;
l = lc - rc;
if( l > max_l ) max_l = l;
if( -l > max_r ) max_r = -l;
}
if( max_l + max_r < 64 )
{
return;
}
if( max_r > 6*max_l )
{
f->affinity = -1;
}
else if( max_l > 6*max_r )
{
f->affinity = 1;
}
}
static void pullup_foo( struct pullup_context * c )
{
struct pullup_field * f = c->first;
int i, n = pullup_queue_length (f, c->last );
for( i = 0; i < n-1; i++ )
{
if( i < n-3 ) pullup_compute_breaks( c, f );
pullup_compute_affinity( c, f );
f = f->next;
}
}
static int pullup_decide_frame_length( struct pullup_context * c )
{
struct pullup_field *f0 = c->first;
struct pullup_field *f1 = f0->next;
struct pullup_field *f2 = f1->next;
int l;
if( pullup_queue_length( c->first, c->last ) < 4 )
{
return 0;
}
pullup_foo( c );
if( f0->affinity == -1 ) return 1;
l = pullup_find_first_break( f0, 3 );
if( l == 1 && c->strict_breaks < 0 ) l = 0;
switch (l)
{
case 1:
if ( c->strict_breaks < 1 &&
f0->affinity == 1 &&
f1->affinity == -1 )
{
return 2;
}
else
{
return 1;
}
case 2:
/* FIXME: strictly speaking, f0->prev is no longer valid... :) */
if( c->strict_pairs &&
(f0->prev->breaks & PULLUP_BREAK_RIGHT) &&
(f2->breaks & PULLUP_BREAK_LEFT) &&
(f0->affinity != 1 || f1->affinity != -1) )
{
return 1;
}
if( f1->affinity == 1 )
{
return 1;
}
else
{
return 2;
}
case 3:
if( f2->affinity == 1 )
{
return 2;
}
else
{
return 3;
}
default:
/* 9 possibilities covered before switch */
if( f1->affinity == 1 )
{
return 1; /* covers 6 */
}
else if( f1->affinity == -1 )
{
return 2; /* covers 6 */
}
else if( f2->affinity == -1 )
{
/* covers 2 */
if( f0->affinity == 1 )
{
return 3;
}
else
{
return 1;
}
}
else
{
return 2; /* the remaining 6 */
}
}
}
static void pullup_print_aff_and_breaks(struct pullup_context * c,
struct pullup_field * f )
{
int i;
struct pullup_field * f0 = f;
const char aff_l[] = "+..", aff_r[] = "..+";
hb_log( "affinity: " );
for( i = 0; i < 4; i++ )
{
printf( "%c%d%c",
aff_l[1+f->affinity],
i,
aff_r[1+f->affinity] );
f = f->next;
}
f = f0;
printf("\nbreaks: ");
for( i = 0; i < 4; i++ )
{
printf( "%c%d%c",
f->breaks & PULLUP_BREAK_LEFT ? '|' : '.',
i,
f->breaks & PULLUP_BREAK_RIGHT ? '|' : '.' );
f = f->next;
}
printf("\n");
}
/*
*
* PULLUP CONTEXT FUNCTIONS
*
*/
struct pullup_context * pullup_alloc_context( void )
{
struct pullup_context * c;
c = calloc( 1, sizeof(struct pullup_context)) ;
return c;
}
void pullup_preinit_context( struct pullup_context * c )
{
c->bpp = calloc( c->nplanes, sizeof(int) );
c->w = calloc( c->nplanes, sizeof(int) );
c->h = calloc( c->nplanes, sizeof(int) );
c->stride = calloc( c->nplanes, sizeof(int) );
c->background = calloc( c->nplanes, sizeof(int) );
}
void pullup_init_context( struct pullup_context * c )
{
int mp = c->metric_plane;
if ( c->nbuffers < 10 )
{
c->nbuffers = 10;
}
c->buffers = calloc( c->nbuffers, sizeof (struct pullup_buffer) );
c->metric_w = (c->w[mp] - ((c->junk_left + c->junk_right) << 3)) >> 3;
c->metric_h = (c->h[mp] - ((c->junk_top + c->junk_bottom) << 1)) >> 3;
c->metric_offset = c->junk_left*c->bpp[mp] + (c->junk_top<<1)*c->stride[mp];
c->metric_len = c->metric_w * c->metric_h;
c->head = pullup_make_field_queue( c, 8 );
c->frame = calloc( 1, sizeof (struct pullup_frame) );
c->frame->ifields = calloc( 3, sizeof (struct pullup_buffer *) );
if( c->format == PULLUP_FMT_Y )
{
c->diff = pullup_diff_y;
c->comb = pullup_licomb_y;
c->var = pullup_var_y;
}
}
void pullup_free_context( struct pullup_context * c )
{
struct pullup_field * f;
free( c->buffers );
f = c->head->next;
while( f != c->head )
{
free( f->diffs );
free( f->comb );
f = f->next;
free( f->prev );
}
free( f->diffs );
free( f->comb );
free(f);
free( c->frame );
free( c );
}
/*
*
* PULLUP BUFFER FUNCTIONS
*
*/
static void pullup_alloc_buffer( struct pullup_context * c,
struct pullup_buffer * b )
{
int i;
if( b->planes ) return;
b->planes = calloc( c->nplanes, sizeof(unsigned char *) );
b->size = calloc( c->nplanes, sizeof(int) );
for ( i = 0; i < c->nplanes; i++ )
{
b->size[i] = c->h[i] * c->stride[i];
b->planes[i] = malloc(b->size[i]);
/* Deal with idiotic 128=0 for chroma: */
memset( b->planes[i], c->background[i], b->size[i] );
}
}
struct pullup_buffer * pullup_lock_buffer( struct pullup_buffer * b,
int parity )
{
if( !b ) return 0;
if( (parity+1) & 1 ) b->lock[0]++;
if( (parity+1) & 2 ) b->lock[1]++;
return b;
}
void pullup_release_buffer( struct pullup_buffer * b,
int parity )
{
if( !b ) return;
if( (parity+1) & 1 ) b->lock[0]--;
if( (parity+1) & 2 ) b->lock[1]--;
}
struct pullup_buffer * pullup_get_buffer( struct pullup_context * c,
int parity )
{
int i;
/* Try first to get the sister buffer for the previous field */
if( parity < 2 &&
c->last &&
parity != c->last->parity &&
!c->last->buffer->lock[parity])
{
pullup_alloc_buffer( c, c->last->buffer );
return pullup_lock_buffer( c->last->buffer, parity );
}
/* Prefer a buffer with both fields open */
for( i = 0; i < c->nbuffers; i++ )
{
if( c->buffers[i].lock[0] ) continue;
if( c->buffers[i].lock[1] ) continue;
pullup_alloc_buffer( c, &c->buffers[i] );
return pullup_lock_buffer( &c->buffers[i], parity );
}
if( parity == 2 ) return 0;
/* Search for any half-free buffer */
for( i = 0; i < c->nbuffers; i++ )
{
if( ((parity+1) & 1) && c->buffers[i].lock[0] ) continue;
if( ((parity+1) & 2) && c->buffers[i].lock[1] ) continue;
pullup_alloc_buffer( c, &c->buffers[i] );
return pullup_lock_buffer( &c->buffers[i], parity );
}
return 0;
}
/*
*
* PULLUP FRAME FUNCTIONS
*
*/
struct pullup_frame * pullup_get_frame( struct pullup_context * c )
{
int i;
struct pullup_frame * fr = c->frame;
int n = pullup_decide_frame_length( c );
int aff = c->first->next->affinity;
if ( !n ) return 0;
if ( fr->lock ) return 0;
if ( c->verbose )
{
pullup_print_aff_and_breaks(c, c->first);
printf("duration: %d \n", n);
}
fr->lock++;
fr->length = n;
fr->parity = c->first->parity;
fr->buffer = 0;
for( i = 0; i < n; i++ )
{
/* We cheat and steal the buffer without release+relock */
fr->ifields[i] = c->first->buffer;
c->first->buffer = 0;
c->first = c->first->next;
}
if( n == 1 )
{
fr->ofields[fr->parity] = fr->ifields[0];
fr->ofields[fr->parity^1] = 0;
}
else if( n == 2 )
{
fr->ofields[fr->parity] = fr->ifields[0];
fr->ofields[fr->parity^1] = fr->ifields[1];
}
else if( n == 3 )
{
if( aff == 0 )
{
aff = (fr->ifields[0] == fr->ifields[1]) ? -1 : 1;
}
fr->ofields[fr->parity] = fr->ifields[1+aff];
fr->ofields[fr->parity^1] = fr->ifields[1];
}
pullup_lock_buffer( fr->ofields[0], 0 );
pullup_lock_buffer( fr->ofields[1], 1 );
if( fr->ofields[0] == fr->ofields[1] )
{
fr->buffer = fr->ofields[0];
pullup_lock_buffer(fr->buffer, 2);
return fr;
}
return fr;
}
void pullup_pack_frame( struct pullup_context * c, struct pullup_frame * fr)
{
int i;
if (fr->buffer) return;
if (fr->length < 2) return; /* FIXME: deal with this */
for( i = 0; i < 2; i++ )
{
if( fr->ofields[i]->lock[i^1] ) continue;
fr->buffer = fr->ofields[i];
pullup_lock_buffer(fr->buffer, 2);
pullup_copy_field( c, fr->buffer, fr->ofields[i^1], i^1 );
return;
}
fr->buffer = pullup_get_buffer( c, 2 );
pullup_copy_field( c, fr->buffer, fr->ofields[0], 0 );
pullup_copy_field( c, fr->buffer, fr->ofields[1], 1 );
}
void pullup_release_frame( struct pullup_frame * fr )
{
int i;
for( i = 0; i < fr->length; i++ )
{
pullup_release_buffer( fr->ifields[i], fr->parity ^ (i&1) );
}
pullup_release_buffer( fr->ofields[0], 0 );
pullup_release_buffer( fr->ofields[1], 1 );
if (fr->buffer) pullup_release_buffer( fr->buffer, 2 );
fr->lock--;
}
/*
*
* PULLUP FIELD FUNCTIONS
*
*/
void pullup_submit_field( struct pullup_context * c,
struct pullup_buffer * b,
int parity )
{
struct pullup_field * f;
/* Grow the circular list if needed */
pullup_check_field_queue( c );
/* Cannot have two fields of same parity in a row; drop the new one */
if( c->last && c->last->parity == parity ) return;
f = c->head;
f->parity = parity;
f->buffer = pullup_lock_buffer( b, parity );
f->flags = 0;
f->breaks = 0;
f->affinity = 0;
pullup_compute_metric( c, f, parity, f->prev->prev,
parity, c->diff, f->diffs );
pullup_compute_metric( c, parity?f->prev:f, 0,
parity?f:f->prev, 1, c->comb, f->comb );
pullup_compute_metric( c, f, parity, f,
-1, c->var, f->var );
/* Advance the circular list */
if( !c->first ) c->first = c->head;
c->last = c->head;
c->head = c->head->next;
}
void pullup_flush_fields( struct pullup_context * c )
{
struct pullup_field * f;
for( f = c->first; f && f != c->head; f = f->next )
{
pullup_release_buffer( f->buffer, f->parity );
f->buffer = 0;
}
c->first = c->last = 0;
}
/*
*
* DETELECINE FILTER FUNCTIONS
*
*/
static int hb_detelecine_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( sizeof(struct hb_filter_private_s), 1 );
hb_filter_private_t * pv = filter->private_data;
struct pullup_context * ctx;
pv->pullup_ctx = ctx = pullup_alloc_context();
ctx->junk_left = ctx->junk_right = 1;
ctx->junk_top = ctx->junk_bottom = 4;
ctx->strict_breaks = -1;
ctx->metric_plane = 0;
ctx->parity = -1;
if( filter->settings )
{
sscanf( filter->settings, "%d:%d:%d:%d:%d:%d:%d",
&ctx->junk_left,
&ctx->junk_right,
&ctx->junk_top,
&ctx->junk_bottom,
&ctx->strict_breaks,
&ctx->metric_plane,
&ctx->parity );
}
ctx->format = PULLUP_FMT_Y;
ctx->nplanes = 4;
pullup_preinit_context( ctx );
ctx->bpp[0] = ctx->bpp[1] = ctx->bpp[2] = 8;
ctx->background[1] = ctx->background[2] = 128;
ctx->w[0] = init->width;
ctx->h[0] = hb_image_height( init->pix_fmt, init->height, 0 );
ctx->stride[0] = hb_image_stride( init->pix_fmt, init->width, 0 );
ctx->w[1] = init->width >> 1;
ctx->h[1] = hb_image_height( init->pix_fmt, init->height, 1 );
ctx->stride[1] = hb_image_stride( init->pix_fmt, init->width, 1 );
ctx->w[1] = init->width >> 1;
ctx->h[2] = hb_image_height( init->pix_fmt, init->height, 2 );
ctx->stride[2] = hb_image_stride( init->pix_fmt, init->width, 2 );
ctx->w[3] = ((init->width+15)/16) * ((init->height+15)/16);
ctx->h[3] = 2;
ctx->stride[3] = ctx->w[3];
#if 0
ctx->verbose = 1;
#endif
pullup_init_context( ctx );
pv->pullup_fakecount = 1;
pv->pullup_skipflag = 0;
return 0;
}
static void hb_detelecine_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
if( pv->pullup_ctx )
{
pullup_free_context( pv->pullup_ctx );
}
free( pv );
filter->private_data = NULL;
}
static int hb_detelecine_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in, * out;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
struct pullup_context * ctx = pv->pullup_ctx;
struct pullup_buffer * buf;
struct pullup_frame * frame;
buf = pullup_get_buffer( ctx, 2 );
if( !buf )
{
frame = pullup_get_frame( ctx );
pullup_release_frame( frame );
hb_log( "Could not get buffer from pullup!" );
return HB_FILTER_FAILED;
}
/* Copy input buffer into pullup buffer */
memcpy( buf->planes[0], in->plane[0].data, buf->size[0] );
memcpy( buf->planes[1], in->plane[1].data, buf->size[1] );
memcpy( buf->planes[2], in->plane[2].data, buf->size[2] );
/* Submit buffer fields based on buffer flags.
Detelecine assumes BFF when the TFF flag isn't present. */
int parity = 1;
if( in->s.flags & PIC_FLAG_TOP_FIELD_FIRST )
{
/* Source signals TFF */
parity = 0;
}
else if( ctx->parity == 0 )
{
/* Many non-MPEG-2 sources lack parity flags even though
they are TFF, so this allow users to override. */
parity = 0;
}
if( ctx->parity == 1 )
{
/* Override autodetected parity with BFF */
parity = 1;
}
pullup_submit_field( ctx, buf, parity );
pullup_submit_field( ctx, buf, parity^1 );
if( in->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD )
{
pullup_submit_field( ctx, buf, parity );
}
pullup_release_buffer( buf, 2 );
/* Get frame and check if pullup is ready */
frame = pullup_get_frame( ctx );
if( !frame )
{
if( pv->pullup_fakecount )
{
pv->pullup_fakecount--;
*buf_in = NULL;
*buf_out = in;
goto output_frame;
}
else
{
goto discard_frame;
}
}
/* Check to see if frame should be dropped */
if( frame->length < 2 )
{
pullup_release_frame( frame );
frame = pullup_get_frame( ctx );
if (!frame)
{
goto discard_frame;
}
if( frame->length < 2 )
{
pullup_release_frame( frame );
if( !(in->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD) )
{
goto discard_frame;
}
frame = pullup_get_frame( ctx );
if( !frame )
{
goto discard_frame;
}
if( frame->length < 2 )
{
pullup_release_frame( frame );
goto discard_frame;
}
}
}
/* Check to see if frame buffer is ready for export */
if( !frame->buffer )
{
pullup_pack_frame( ctx, frame );
}
out = hb_video_buffer_init( in->f.width, in->f.height );
/* Copy pullup frame buffer into output buffer */
memcpy( out->plane[0].data, frame->buffer->planes[0], frame->buffer->size[0] );
memcpy( out->plane[1].data, frame->buffer->planes[1], frame->buffer->size[1] );
memcpy( out->plane[2].data, frame->buffer->planes[2], frame->buffer->size[2] );
pullup_release_frame( frame );
out->s = in->s;
hb_buffer_move_subs( out, in );
*buf_out = out;
output_frame:
return HB_FILTER_OK;
/* This and all discard_frame calls shown above are
the result of me restoring the functionality in
pullup that huevos_rancheros disabled because
HB couldn't handle it. */
discard_frame:
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/deccc608sub.h 0000664 0001752 0001752 00000006321 12463330511 017301 0 ustar handbrake handbrake /* deccc608sub.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/*
* From ccextractor...
*/
#ifndef __DECCC608SUB_H__
#define __DECCC608SUB_H__
#include "common.h"
struct s_write;
#define CC608_SCREEN_WIDTH 32
enum cc_modes
{
MODE_POPUP = 0,
MODE_ROLLUP_2 = 1,
MODE_ROLLUP_3 = 2,
MODE_ROLLUP_4 = 3,
MODE_TEXT = 4
};
enum color_code
{
COL_WHITE = 0,
COL_GREEN = 1,
COL_BLUE = 2,
COL_CYAN = 3,
COL_RED = 4,
COL_YELLOW = 5,
COL_MAGENTA = 6,
COL_USERDEFINED = 7
};
enum font_bits
{
FONT_REGULAR = 0,
FONT_ITALICS = 1,
FONT_UNDERLINED = 2,
FONT_UNDERLINED_ITALICS = 3
};
#define FONT_STYLE_MASK FONT_UNDERLINED_ITALICS
struct eia608_screen // A CC buffer
{
unsigned char characters[15][33];
unsigned char colors[15][33];
unsigned char fonts[15][33]; // Extra char at the end for a 0
int row_used[15]; // Any data in row?
int empty; // Buffer completely empty?
int dirty; // Flag indicates buffer has changed since written
};
struct eia608
{
struct eia608_screen buffer1;
struct eia608_screen buffer2;
int cursor_row, cursor_column;
int visible_buffer;
int ssa_counter; // Number of subs currently written
int screenfuls_counter; // Number of meaningful screenfuls written
int64_t current_visible_start_ms; // At what time did the current visible buffer became so?
enum cc_modes mode;
unsigned char last_c1, last_c2;
int channel; // Currently selected channel
unsigned char color; // Color we are currently using to write
unsigned char font; // Font we are currently using to write
int rollup_base_row;
};
struct s_write {
struct eia608 *data608;
FILE *fh;
unsigned char *subline;
int new_sentence;
int new_channel;
int in_xds_mode;
hb_buffer_t *hb_buffer;
hb_buffer_t *hb_last_buffer;
uint64_t last_pts;
unsigned char *enc_buffer; // Generic general purpose buffer
unsigned enc_buffer_used;
unsigned enc_buffer_capacity;
int clear_sub_needed; // Indicates that we need to send a null
// subtitle to clear the current subtitle
int rollup_cr; // Flag indicates if CR command performed by rollup
int direct_rollup;
int line; // SSA line number
int width;
int height;
int crop[4];
uint8_t prev_font_style;
uint8_t prev_font_color;
};
enum command_code
{
COM_UNKNOWN = 0,
COM_ERASEDISPLAYEDMEMORY = 1,
COM_RESUMECAPTIONLOADING = 2,
COM_ENDOFCAPTION = 3,
COM_TABOFFSET1 = 4,
COM_TABOFFSET2 = 5,
COM_TABOFFSET3 = 6,
COM_ROLLUP2 = 7,
COM_ROLLUP3 = 8,
COM_ROLLUP4 = 9,
COM_CARRIAGERETURN = 10,
COM_ERASENONDISPLAYEDMEMORY = 11,
COM_BACKSPACE = 12,
COM_RESUMETEXTDISPLAY = 13
};
enum encoding_type
{
ENC_UNICODE = 0,
ENC_LATIN_1 = 1,
ENC_UTF_8 = 2
};
enum output_format
{
OF_RAW = 0,
OF_SRT = 1,
OF_SAMI = 2,
OF_TRANSCRIPT = 3,
OF_RCWT = 4
};
#endif // __DECCC608SUB_H__
HandBrake-0.10.2/libhb/reader.c 0000664 0001752 0001752 00000070471 12511064504 016534 0 ustar handbrake handbrake /* reader.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
static int hb_reader_init( hb_work_object_t * w, hb_job_t * job );
static void hb_reader_close( hb_work_object_t * w );
hb_work_object_t hb_reader =
{
WORK_READER,
"Reader",
hb_reader_init,
NULL,
hb_reader_close,
NULL,
NULL
};
typedef struct
{
int startup;
double average; // average time between packets
double filtered_average; // average time between packets
int64_t last; // last timestamp seen on this stream
int id; // stream id
int is_audio; // != 0 if this is an audio stream
int valid; // Stream timing is not valid until next scr.
} stream_timing_t;
struct hb_work_private_s
{
hb_job_t * job;
hb_title_t * title;
volatile int * die;
hb_bd_t * bd;
hb_dvd_t * dvd;
hb_stream_t * stream;
stream_timing_t *stream_timing;
int64_t scr_offset;
int sub_scr_set;
hb_psdemux_t demux;
int scr_changes;
uint32_t sequence;
uint8_t st_slots; // size (in slots) of stream_timing array
uint8_t saw_video; // != 0 if we've seen video
uint8_t saw_audio; // != 0 if we've seen audio
int start_found; // found pts_to_start point
int64_t pts_to_start;
uint64_t st_first;
uint64_t duration;
hb_fifo_t * fifos[100];
};
/***********************************************************************
* Local prototypes
**********************************************************************/
static hb_fifo_t ** GetFifoForId( hb_work_private_t * r, int id );
static void UpdateState( hb_work_private_t * r, int64_t start);
/***********************************************************************
* hb_reader_init
***********************************************************************
*
**********************************************************************/
static int hb_reader_open( hb_work_private_t * r )
{
if ( r->title->type == HB_BD_TYPE )
{
if ( !( r->bd = hb_bd_init( r->title->path ) ) )
return 1;
}
else if ( r->title->type == HB_DVD_TYPE )
{
if ( !( r->dvd = hb_dvd_init( r->title->path ) ) )
return 1;
}
else if ( r->title->type == HB_STREAM_TYPE ||
r->title->type == HB_FF_STREAM_TYPE )
{
if ( !( r->stream = hb_stream_open( r->title->path, r->title, 0 ) ) )
return 1;
}
else
{
// Unknown type, should never happen
return 1;
}
return 0;
}
static int hb_reader_init( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * r;
r = calloc( sizeof( hb_work_private_t ), 1 );
w->private_data = r;
r->job = job;
r->title = job->title;
r->die = job->die;
r->sequence = 0;
r->st_slots = 4;
r->stream_timing = calloc( sizeof(stream_timing_t), r->st_slots );
r->stream_timing[0].id = r->title->video_id;
r->stream_timing[0].average = 90000. * (double)job->vrate_base /
(double)job->vrate;
r->stream_timing[0].filtered_average = r->stream_timing[0].average;
r->stream_timing[0].last = -r->stream_timing[0].average;
r->stream_timing[0].valid = 1;
r->stream_timing[0].startup = 10;
r->stream_timing[1].id = -1;
r->demux.last_scr = AV_NOPTS_VALUE;
if ( !job->pts_to_start )
r->start_found = 1;
else
{
// The frame at the actual start time may not be an i-frame
// so can't be decoded without starting a little early.
// sync.c will drop early frames.
// Starting a little over 10 seconds early
r->pts_to_start = MAX(0, job->pts_to_start - 1000000);
}
if (job->pts_to_stop)
{
r->duration = job->pts_to_start + job->pts_to_stop;
}
else if (job->frame_to_stop)
{
int frames = job->frame_to_start + job->frame_to_stop;
r->duration = (int64_t)frames * job->title->rate_base * 90000 / job->title->rate;
}
else
{
hb_chapter_t *chapter;
int ii;
r->duration = 0;
for (ii = job->chapter_start; ii < job->chapter_end; ii++)
{
chapter = hb_list_item( job->title->list_chapter, ii - 1);
r->duration += chapter->duration;
}
}
// The stream needs to be open before starting the reader thead
// to prevent a race with decoders that may share information
// with the reader. Specifically avcodec needs this.
if ( hb_reader_open( r ) )
{
free( r->stream_timing );
free( r );
return 1;
}
return 0;
}
static void hb_reader_close( hb_work_object_t * w )
{
hb_work_private_t * r = w->private_data;
if (r->bd)
{
hb_bd_stop( r->bd );
hb_bd_close( &r->bd );
}
else if (r->dvd)
{
hb_dvd_stop( r->dvd );
hb_dvd_close( &r->dvd );
}
else if (r->stream)
{
hb_stream_close(&r->stream);
}
if ( r->stream_timing )
{
free( r->stream_timing );
}
free( r );
}
static void push_buf( const hb_work_private_t *r, hb_fifo_t *fifo, hb_buffer_t *buf )
{
while ( !*r->die && !r->job->done )
{
if ( hb_fifo_full_wait( fifo ) )
{
hb_fifo_push( fifo, buf );
buf = NULL;
break;
}
}
if ( buf )
{
hb_buffer_close( &buf );
}
}
static int is_audio( hb_work_private_t *r, int id )
{
int i;
hb_audio_t *audio;
for( i = 0; ( audio = hb_list_item( r->title->list_audio, i ) ); ++i )
{
if ( audio->id == id )
{
return 1;
}
}
return 0;
}
static int is_subtitle( hb_work_private_t *r, int id )
{
int i;
hb_subtitle_t *sub;
for( i = 0; ( sub = hb_list_item( r->title->list_subtitle, i ) ); ++i )
{
if ( sub->id == id )
{
return 1;
}
}
return 0;
}
// The MPEG STD (Standard Target Decoder) essentially requires that we keep
// per-stream timing so that when there's a timing discontinuity we can
// seemlessly join packets on either side of the discontinuity. This join
// requires that we know the timestamp of the previous packet and the
// average inter-packet time (since we position the new packet at the end
// of the previous packet). The next four routines keep track of this
// per-stream timing.
// find or create the per-stream timing state for 'buf'
static stream_timing_t *id_to_st( hb_work_private_t *r, const hb_buffer_t *buf, int valid )
{
stream_timing_t *st = r->stream_timing;
while ( st->id != buf->s.id && st->id != -1)
{
++st;
}
// if we haven't seen this stream add it.
if ( st->id == -1 )
{
// we keep the steam timing info in an array with some power-of-two
// number of slots. If we don't have two slots left (one for our new
// entry plus one for the "-1" eol) we need to expand the array.
int slot = st - r->stream_timing;
if ( slot + 1 >= r->st_slots )
{
r->st_slots *= 2;
r->stream_timing = realloc( r->stream_timing, r->st_slots *
sizeof(*r->stream_timing) );
st = r->stream_timing + slot;
}
st->id = buf->s.id;
st->average = 30.*90.;
st->filtered_average = st->average;
st->startup = 10;
st->last = -st->average;
if ( ( st->is_audio = is_audio( r, buf->s.id ) ) != 0 )
{
r->saw_audio = 1;
}
st[1].id = -1;
st->valid = valid;
}
return st;
}
// update the average inter-packet time of the stream associated with 'buf'
// using a recursive low-pass filter with a 16 packet time constant.
static void update_ipt( hb_work_private_t *r, const hb_buffer_t *buf )
{
stream_timing_t *st = id_to_st( r, buf, 1 );
if (buf->s.renderOffset == AV_NOPTS_VALUE)
{
st->last += st->filtered_average;
return;
}
double dt = buf->s.renderOffset - st->last;
// Protect against spurious bad timestamps
// timestamps should only move forward and by reasonable increments
if ( dt > 0 && dt < 5 * 90000LL )
{
if( st->startup )
{
st->average += ( dt - st->average ) * (1./4.);
st->startup--;
}
else
{
st->average += ( dt - st->average ) * (1./32.);
}
// Ignore outliers
if (dt < 1.5 * st->average)
{
st->filtered_average += ( dt - st->filtered_average ) * (1./32.);
}
}
st->last = buf->s.renderOffset;
st->valid = 1;
}
// use the per-stream state associated with 'buf' to compute a new scr_offset
// such that 'buf' will follow the previous packet of this stream separated
// by the average packet time of the stream.
static void new_scr_offset( hb_work_private_t *r, hb_buffer_t *buf )
{
stream_timing_t *st = id_to_st( r, buf, 1 );
int64_t last;
if ( !st->valid )
{
// !valid means we've not received any previous data
// for this stream. There is no 'last' packet time.
// So approximate it with video's last time.
last = r->stream_timing[0].last;
st->valid = 1;
}
else
{
last = st->last;
}
int64_t nxt = last + st->filtered_average;
r->scr_offset = buf->s.renderOffset - nxt;
// This log is handy when you need to debug timing problems...
//hb_log("id %x last %"PRId64" avg %g nxt %"PRId64" renderOffset %"PRId64
// " scr_offset %"PRId64"",
// buf->s.id, last, st->filtered_average, nxt,
// buf->s.renderOffset, r->scr_offset);
r->scr_changes = r->demux.scr_changes;
}
/***********************************************************************
* ReaderFunc
***********************************************************************
*
**********************************************************************/
void ReadLoop( void * _w )
{
hb_work_object_t * w = _w;
hb_work_private_t * r = w->private_data;
hb_fifo_t ** fifos;
hb_buffer_t * buf = NULL;
hb_list_t * list;
int n;
int chapter = -1;
int chapter_end = r->job->chapter_end;
uint8_t done = 0;
if (r->bd)
{
if( !hb_bd_start( r->bd, r->title ) )
{
hb_bd_close( &r->bd );
return;
}
if ( r->job->start_at_preview )
{
// XXX code from DecodePreviews - should go into its own routine
hb_bd_seek( r->bd, (float)r->job->start_at_preview /
( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
}
else if ( r->job->pts_to_start )
{
// Note, bd seeks always put us to an i-frame. no need
// to start decoding early using r->pts_to_start
hb_bd_seek_pts( r->bd, r->job->pts_to_start );
r->duration -= r->job->pts_to_start;
r->job->pts_to_start = 0;
r->start_found = 1;
}
else
{
hb_bd_seek_chapter( r->bd, r->job->chapter_start );
}
if (r->job->angle > 1)
{
hb_bd_set_angle( r->bd, r->job->angle - 1 );
}
}
else if (r->dvd)
{
/*
* XXX this code is a temporary hack that should go away if/when
* chapter merging goes away in libhb/dvd.c
* map the start and end chapter numbers to on-media chapter
* numbers since chapter merging could cause the handbrake numbers
* to diverge from the media numbers and, if our chapter_end is after
* a media chapter that got merged, we'll stop ripping too early.
*/
int start = r->job->chapter_start;
hb_chapter_t *chap = hb_list_item( r->job->list_chapter, chapter_end - 1 );
chapter_end = chap->index;
if (start > 1)
{
chap = hb_list_item( r->job->list_chapter, start - 1 );
start = chap->index;
}
/* end chapter mapping XXX */
if( !hb_dvd_start( r->dvd, r->title, start ) )
{
hb_dvd_close( &r->dvd );
return;
}
if (r->job->angle)
{
hb_dvd_set_angle( r->dvd, r->job->angle );
}
if ( r->job->start_at_preview )
{
// XXX code from DecodePreviews - should go into its own routine
hb_dvd_seek( r->dvd, (float)r->job->start_at_preview /
( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
}
}
else if ( r->stream && r->job->start_at_preview )
{
// XXX code from DecodePreviews - should go into its own routine
hb_stream_seek( r->stream, (float)( r->job->start_at_preview - 1 ) /
( r->job->seek_points ? ( r->job->seek_points + 1.0 ) : 11.0 ) );
}
else if ( r->stream && r->job->pts_to_start )
{
int64_t pts_to_start = r->job->pts_to_start;
// Find out what the first timestamp of the stream is
// and then seek to the appropriate offset from it
if ( ( buf = hb_stream_read( r->stream ) ) )
{
if (buf->s.start != AV_NOPTS_VALUE)
{
pts_to_start += buf->s.start;
}
}
if ( hb_stream_seek_ts( r->stream, pts_to_start ) >= 0 )
{
// Seek takes us to the nearest I-frame before the timestamp
// that we want. So we will retrieve the start time of the
// first packet we get, subtract that from pts_to_start, and
// inspect the reset of the frames in sync.
r->start_found = 2;
r->duration -= r->job->pts_to_start;
r->job->pts_to_start = pts_to_start;
hb_buffer_close(&buf);
}
// hb_stream_seek_ts does nothing for TS streams and will return
// an error. In this case, the current buf remains valid and
// gets processed below.
}
else if( r->stream )
{
/*
* Standard stream, seek to the starting chapter, if set, and track the
* end chapter so that we end at the right time.
*/
int start = r->job->chapter_start;
hb_chapter_t *chap = hb_list_item( r->job->list_chapter, chapter_end - 1 );
chapter_end = chap->index;
if (start > 1)
{
chap = hb_list_item( r->job->list_chapter, start - 1 );
start = chap->index;
}
/*
* Seek to the start chapter.
*/
hb_stream_seek_chapter( r->stream, start );
}
list = hb_list_init();
while(!*r->die && !r->job->done && !done)
{
if (r->bd)
chapter = hb_bd_chapter( r->bd );
else if (r->dvd)
chapter = hb_dvd_chapter( r->dvd );
else if (r->stream)
chapter = hb_stream_chapter( r->stream );
if( chapter < 0 )
{
hb_log( "reader: end of the title reached" );
break;
}
if( chapter > chapter_end )
{
hb_log( "reader: end of chapter %d (media %d) reached at media chapter %d",
r->job->chapter_end, chapter_end, chapter );
break;
}
if (buf == NULL)
{
if (r->bd)
{
if( (buf = hb_bd_read( r->bd )) == NULL )
{
break;
}
}
else if (r->dvd)
{
if( (buf = hb_dvd_read( r->dvd )) == NULL )
{
break;
}
}
else if (r->stream)
{
if ( (buf = hb_stream_read( r->stream )) == NULL )
{
break;
}
}
}
(hb_demux[r->title->demuxer])( buf, list, &r->demux );
while( ( buf = hb_list_item( list, 0 ) ) )
{
hb_list_rem( list, buf );
fifos = GetFifoForId( r, buf->s.id );
if (fifos && r->stream && r->start_found == 2 )
{
// We will inspect the timestamps of each frame in sync
// to skip from this seek point to the timestamp we
// want to start at.
if (buf->s.start != AV_NOPTS_VALUE &&
buf->s.start < r->job->pts_to_start)
{
r->job->pts_to_start -= buf->s.start;
}
else if ( buf->s.start >= r->job->pts_to_start )
{
r->job->pts_to_start = 0;
}
r->start_found = 1;
}
if ( fifos && ! r->saw_video && !r->job->indepth_scan )
{
// The first data packet with a PTS from an audio or video stream
// that we're decoding defines 'time zero'. Discard packets until
// we get one.
if (buf->s.start != AV_NOPTS_VALUE &&
buf->s.renderOffset != AV_NOPTS_VALUE &&
(buf->s.id == r->title->video_id ||
is_audio( r, buf->s.id)))
{
// force a new scr offset computation
r->scr_changes = r->demux.scr_changes - 1;
// create a stream state if we don't have one so the
// offset will get computed correctly.
id_to_st( r, buf, 1 );
r->saw_video = 1;
hb_log( "reader: first SCR %"PRId64" id 0x%x DTS %"PRId64,
r->demux.last_scr, buf->s.id, buf->s.renderOffset );
}
else
{
fifos = NULL;
}
}
if ( r->job->indepth_scan || fifos )
{
if ( buf->s.renderOffset != AV_NOPTS_VALUE )
{
if ( r->scr_changes != r->demux.scr_changes )
{
// This is the first audio or video packet after an SCR
// change. Compute a new scr offset that would make this
// packet follow the last of this stream with the
// correct average spacing.
stream_timing_t *st = id_to_st( r, buf, 0 );
// if this is the video stream and we don't have
// audio yet or this is an audio stream
// generate a new scr
if ( st->is_audio ||
( st == r->stream_timing && !r->saw_audio ) )
{
new_scr_offset( r, buf );
r->sub_scr_set = 0;
}
else
{
// defer the scr change until we get some
// audio since audio has a timestamp per
// frame but video & subtitles don't. Clear
// the timestamps so the decoder will generate
// them from the frame durations.
if (is_subtitle(r, buf->s.id) &&
buf->s.start != AV_NOPTS_VALUE)
{
if (!r->sub_scr_set)
{
// We can't generate timestamps in the
// subtitle decoder as we can for
// audio & video. So we need to make
// the closest guess that we can
// for the subtitles start time here.
int64_t last = r->stream_timing[0].last;
r->scr_offset = buf->s.start - last;
r->sub_scr_set = 1;
}
}
else
{
buf->s.start = AV_NOPTS_VALUE;
buf->s.renderOffset = AV_NOPTS_VALUE;
}
}
}
}
if ( buf->s.start != AV_NOPTS_VALUE )
{
int64_t start = buf->s.start - r->scr_offset;
if (!r->start_found || r->job->indepth_scan)
{
UpdateState( r, start );
}
if (r->job->indepth_scan && r->job->pts_to_stop &&
start >= r->pts_to_start + r->job->pts_to_stop)
{
// sync normally would terminate p-to-p
// but sync doesn't run during indepth scan
hb_log( "reader: reached pts %"PRId64", exiting early", start );
done = 1;
break;
}
if (!r->start_found && start >= r->pts_to_start)
{
// pts_to_start point found
r->start_found = 1;
if (r->stream)
{
// libav multi-threaded decoders can get into
// a bad state if the initial data is not
// decodable. So try to improve the chances of
// a good start by waiting for an initial iframe
hb_stream_set_need_keyframe(r->stream, 1);
hb_buffer_close( &buf );
continue;
}
}
// This log is handy when you need to debug timing problems
//hb_log("id %x scr_offset %"PRId64
// " start %"PRId64" --> %"PRId64"",
// buf->s.id, r->scr_offset, buf->s.start,
// buf->s.start - r->scr_offset);
buf->s.start -= r->scr_offset;
if ( buf->s.stop != AV_NOPTS_VALUE )
{
buf->s.stop -= r->scr_offset;
}
}
if ( buf->s.renderOffset != AV_NOPTS_VALUE )
{
// This packet is referenced to the same SCR as the last.
// Adjust timestamp to remove the System Clock Reference
// offset then update the average inter-packet time
// for this stream.
buf->s.renderOffset -= r->scr_offset;
update_ipt( r, buf );
}
#if 0
// JAS: This was added to fix a rare "audio time went backward"
// sync error I found in one sample. But it has a bad side
// effect on DVDs, causing frequent "adding silence" sync
// errors. So I am disabling it.
else
{
update_ipt( r, buf );
}
#endif
}
if( fifos )
{
if ( !r->start_found )
{
hb_buffer_close( &buf );
continue;
}
buf->sequence = r->sequence++;
/* if there are mutiple output fifos, send a copy of the
* buffer down all but the first (we have to not ship the
* original buffer or we'll race with the thread that's
* consuming the buffer & inject garbage into the data stream). */
for( n = 1; fifos[n] != NULL; n++)
{
hb_buffer_t *buf_copy = hb_buffer_init( buf->size );
buf_copy->s = buf->s;
memcpy( buf_copy->data, buf->data, buf->size );
push_buf( r, fifos[n], buf_copy );
}
push_buf( r, fifos[0], buf );
buf = NULL;
}
else
{
hb_buffer_close( &buf );
}
}
}
// send empty buffers downstream to video & audio decoders to signal we're done.
if( !*r->die && !r->job->done )
{
push_buf( r, r->job->fifo_mpeg2, hb_buffer_init(0) );
hb_audio_t *audio;
for( n = 0; (audio = hb_list_item( r->job->list_audio, n)); ++n )
{
if ( audio->priv.fifo_in )
push_buf( r, audio->priv.fifo_in, hb_buffer_init(0) );
}
hb_subtitle_t *subtitle;
for( n = 0; (subtitle = hb_list_item( r->job->list_subtitle, n)); ++n )
{
if ( subtitle->fifo_in && subtitle->source == VOBSUB)
push_buf( r, subtitle->fifo_in, hb_buffer_init(0) );
}
}
hb_list_empty( &list );
hb_log( "reader: done. %d scr changes", r->demux.scr_changes );
if ( r->demux.dts_drops )
{
hb_log( "reader: %d drops because DTS out of range", r->demux.dts_drops );
}
}
static void UpdateState( hb_work_private_t * r, int64_t start)
{
hb_state_t state;
uint64_t now;
double avg;
now = hb_get_date();
if( !r->st_first )
{
r->st_first = now;
}
#define p state.param.working
if ( !r->job->indepth_scan )
{
state.state = HB_STATE_SEARCHING;
p.progress = (float) start / (float) r->job->pts_to_start;
}
else
{
state.state = HB_STATE_WORKING;
p.progress = (float) start / (float) r->duration;
}
if( p.progress > 1.0 )
{
p.progress = 1.0;
}
p.rate_cur = 0.0;
p.rate_avg = 0.0;
if (now > r->st_first)
{
int eta;
avg = 1000.0 * (double)start / (now - r->st_first);
if ( !r->job->indepth_scan )
eta = ( r->job->pts_to_start - start ) / avg;
else
eta = ( r->duration - start ) / avg;
p.hours = eta / 3600;
p.minutes = ( eta % 3600 ) / 60;
p.seconds = eta % 60;
}
else
{
p.hours = -1;
p.minutes = -1;
p.seconds = -1;
}
#undef p
hb_set_state( r->job->h, &state );
}
/***********************************************************************
* GetFifoForId
***********************************************************************
*
**********************************************************************/
static hb_fifo_t ** GetFifoForId( hb_work_private_t * r, int id )
{
hb_job_t * job = r->job;
hb_title_t * title = job->title;
hb_audio_t * audio;
hb_subtitle_t * subtitle;
int i, n, count;
memset(r->fifos, 0, sizeof(r->fifos));
if( id == title->video_id )
{
if (job->indepth_scan && !job->frame_to_stop)
{
/*
* Ditch the video here during the indepth scan until
* we can improve the MPEG2 decode performance.
*
* But if we specify a stop frame, we must decode the
* frames in order to count them.
*/
return NULL;
}
else
{
r->fifos[0] = job->fifo_mpeg2;
return r->fifos;
}
}
count = hb_list_count( job->list_subtitle );
count = count > 99 ? 99 : count;
for( i = n = 0; i < count; i++ )
{
subtitle = hb_list_item( job->list_subtitle, i );
if (id == subtitle->id)
{
/* pass the subtitles to be processed */
r->fifos[n++] = subtitle->fifo_in;
}
}
if ( n != 0 )
{
return r->fifos;
}
if( !job->indepth_scan )
{
for( i = n = 0; i < hb_list_count( job->list_audio ); i++ )
{
audio = hb_list_item( job->list_audio, i );
if( id == audio->id )
{
r->fifos[n++] = audio->priv.fifo_in;
}
}
if( n != 0 )
{
return r->fifos;
}
}
return NULL;
}
HandBrake-0.10.2/libhb/encavcodecaudio.c 0000664 0001752 0001752 00000035665 12463330511 020414 0 ustar handbrake handbrake /* encavcodecaudio.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
struct hb_work_private_s
{
hb_job_t * job;
AVCodecContext * context;
int out_discrete_channels;
int samples_per_frame;
unsigned long max_output_bytes;
unsigned long input_samples;
uint8_t * output_buf;
uint8_t * input_buf;
hb_list_t * list;
AVAudioResampleContext *avresample;
};
static int encavcodecaInit( hb_work_object_t *, hb_job_t * );
static int encavcodecaWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
static void encavcodecaClose( hb_work_object_t * );
hb_work_object_t hb_encavcodeca =
{
WORK_ENCAVCODEC_AUDIO,
"AVCodec Audio encoder (libavcodec)",
encavcodecaInit,
encavcodecaWork,
encavcodecaClose
};
static int encavcodecaInit(hb_work_object_t *w, hb_job_t *job)
{
AVCodec *codec;
AVCodecContext *context;
hb_audio_t *audio = w->audio;
hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
w->private_data = pv;
pv->job = job;
pv->list = hb_list_init();
// channel count, layout and matrix encoding
int matrix_encoding;
uint64_t channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown,
&matrix_encoding);
pv->out_discrete_channels =
hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
// default settings and options
AVDictionary *av_opts = NULL;
const char *codec_name = NULL;
enum AVCodecID codec_id = AV_CODEC_ID_NONE;
enum AVSampleFormat sample_fmt = AV_SAMPLE_FMT_FLTP;
int bits_per_raw_sample = 0;
int profile = FF_PROFILE_UNKNOWN;
// override with encoder-specific values
switch (audio->config.out.codec)
{
case HB_ACODEC_AC3:
codec_id = AV_CODEC_ID_AC3;
if (matrix_encoding != AV_MATRIX_ENCODING_NONE)
av_dict_set(&av_opts, "dsur_mode", "on", 0);
break;
case HB_ACODEC_FDK_AAC:
case HB_ACODEC_FDK_HAAC:
codec_name = "libfdk_aac";
sample_fmt = AV_SAMPLE_FMT_S16;
bits_per_raw_sample = 16;
switch (audio->config.out.codec)
{
case HB_ACODEC_FDK_HAAC:
profile = FF_PROFILE_AAC_HE;
break;
default:
profile = FF_PROFILE_AAC_LOW;
break;
}
// Libav's libfdk-aac wrapper expects back channels for 5.1
// audio, and will error out unless we translate the layout
if (channel_layout == AV_CH_LAYOUT_5POINT1)
channel_layout = AV_CH_LAYOUT_5POINT1_BACK;
break;
case HB_ACODEC_FFAAC:
codec_name = "aac";
av_dict_set(&av_opts, "stereo_mode", "ms_off", 0);
break;
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
codec_id = AV_CODEC_ID_FLAC;
switch (audio->config.out.codec)
{
case HB_ACODEC_FFFLAC24:
sample_fmt = AV_SAMPLE_FMT_S32;
bits_per_raw_sample = 24;
break;
default:
sample_fmt = AV_SAMPLE_FMT_S16;
bits_per_raw_sample = 16;
break;
}
break;
default:
hb_error("encavcodecaInit: unsupported codec (0x%x)",
audio->config.out.codec);
return 1;
}
if (codec_name != NULL)
{
codec = avcodec_find_encoder_by_name(codec_name);
if (codec == NULL)
{
hb_error("encavcodecaInit: avcodec_find_encoder_by_name(%s) failed",
codec_name);
return 1;
}
}
else
{
codec = avcodec_find_encoder(codec_id);
if (codec == NULL)
{
hb_error("encavcodecaInit: avcodec_find_encoder(%d) failed",
codec_id);
return 1;
}
}
// allocate the context and apply the settings
context = avcodec_alloc_context3(codec);
hb_ff_set_sample_fmt(context, codec, sample_fmt);
context->bits_per_raw_sample = bits_per_raw_sample;
context->profile = profile;
context->channel_layout = channel_layout;
context->channels = pv->out_discrete_channels;
context->sample_rate = audio->config.out.samplerate;
if (audio->config.out.bitrate > 0)
{
context->bit_rate = audio->config.out.bitrate * 1000;
}
else if (audio->config.out.quality >= 0)
{
context->global_quality = audio->config.out.quality * FF_QP2LAMBDA;
context->flags |= CODEC_FLAG_QSCALE;
}
if (audio->config.out.compression_level >= 0)
{
context->compression_level = audio->config.out.compression_level;
}
// For some codecs, libav requires the following flag to be set
// so that it fills extradata with global header information.
// If this flag is not set, it inserts the data into each
// packet instead.
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (hb_avcodec_open(context, codec, &av_opts, 0))
{
hb_error("encavcodecaInit: hb_avcodec_open() failed");
return 1;
}
// avcodec_open populates the opts dictionary with the
// things it didn't recognize.
AVDictionaryEntry *t = NULL;
while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX)))
{
hb_log("encavcodecaInit: Unknown avcodec option %s", t->key);
}
av_dict_free(&av_opts);
pv->context = context;
audio->config.out.samples_per_frame =
pv->samples_per_frame = context->frame_size;
pv->input_samples = context->frame_size * context->channels;
pv->input_buf = malloc(pv->input_samples * sizeof(float));
// Some encoders in libav (e.g. fdk-aac) fail if the output buffer
// size is not some minumum value. 8K seems to be enough :(
pv->max_output_bytes = MAX(FF_MIN_BUFFER_SIZE,
(pv->input_samples *
av_get_bytes_per_sample(context->sample_fmt)));
// sample_fmt conversion
if (context->sample_fmt != AV_SAMPLE_FMT_FLT)
{
pv->output_buf = malloc(pv->max_output_bytes);
pv->avresample = avresample_alloc_context();
if (pv->avresample == NULL)
{
hb_error("encavcodecaInit: avresample_alloc_context() failed");
return 1;
}
av_opt_set_int(pv->avresample, "in_sample_fmt",
AV_SAMPLE_FMT_FLT, 0);
av_opt_set_int(pv->avresample, "out_sample_fmt",
context->sample_fmt, 0);
av_opt_set_int(pv->avresample, "in_channel_layout",
context->channel_layout, 0);
av_opt_set_int(pv->avresample, "out_channel_layout",
context->channel_layout, 0);
if (hb_audio_dither_is_supported(audio->config.out.codec))
{
// dithering needs the sample rate
av_opt_set_int(pv->avresample, "in_sample_rate",
context->sample_rate, 0);
av_opt_set_int(pv->avresample, "out_sample_rate",
context->sample_rate, 0);
av_opt_set_int(pv->avresample, "dither_method",
audio->config.out.dither_method, 0);
}
if (avresample_open(pv->avresample))
{
hb_error("encavcodecaInit: avresample_open() failed");
avresample_free(&pv->avresample);
return 1;
}
}
else
{
pv->avresample = NULL;
pv->output_buf = pv->input_buf;
}
if (context->extradata != NULL)
{
memcpy(w->config->extradata.bytes, context->extradata,
context->extradata_size);
w->config->extradata.length = context->extradata_size;
}
audio->config.out.delay = av_rescale_q(context->delay, context->time_base,
(AVRational){1, 90000});
return 0;
}
/***********************************************************************
* Close
***********************************************************************
*
**********************************************************************/
// Some encoders (e.g. flac) require a final NULL encode in order to
// finalize things.
static void Finalize(hb_work_object_t *w)
{
hb_work_private_t *pv = w->private_data;
// Finalize with NULL input needed by FLAC to generate md5sum
// in context extradata
// Prepare output packet
AVPacket pkt;
int got_packet;
hb_buffer_t *buf = hb_buffer_init(pv->max_output_bytes);
av_init_packet(&pkt);
pkt.data = buf->data;
pkt.size = buf->alloc;
avcodec_encode_audio2(pv->context, &pkt, NULL, &got_packet);
hb_buffer_close(&buf);
// Then we need to recopy the header since it was modified
if (pv->context->extradata != NULL)
{
memcpy(w->config->extradata.bytes, pv->context->extradata,
pv->context->extradata_size);
w->config->extradata.length = pv->context->extradata_size;
}
}
static void encavcodecaClose(hb_work_object_t * w)
{
hb_work_private_t * pv = w->private_data;
if (pv != NULL)
{
if (pv->context != NULL)
{
Finalize(w);
hb_deep_log(2, "encavcodeca: closing libavcodec");
if (pv->context->codec != NULL)
avcodec_flush_buffers(pv->context);
hb_avcodec_close(pv->context);
av_free( pv->context );
}
if (pv->output_buf != NULL)
{
free(pv->output_buf);
}
if (pv->input_buf != NULL && pv->input_buf != pv->output_buf)
{
free(pv->input_buf);
}
pv->output_buf = pv->input_buf = NULL;
if (pv->list != NULL)
{
hb_list_empty(&pv->list);
}
if (pv->avresample != NULL)
{
avresample_free(&pv->avresample);
}
free(pv);
w->private_data = NULL;
}
}
static hb_buffer_t* Encode(hb_work_object_t *w)
{
hb_work_private_t *pv = w->private_data;
hb_audio_t *audio = w->audio;
uint64_t pts, pos;
if (hb_list_bytes(pv->list) < pv->input_samples * sizeof(float))
{
return NULL;
}
hb_list_getbytes(pv->list, pv->input_buf, pv->input_samples * sizeof(float),
&pts, &pos);
// Prepare input frame
int out_linesize;
int out_size = av_samples_get_buffer_size(&out_linesize,
pv->context->channels,
pv->samples_per_frame,
pv->context->sample_fmt, 1);
AVFrame frame = { .nb_samples = pv->samples_per_frame, };
avcodec_fill_audio_frame(&frame,
pv->context->channels, pv->context->sample_fmt,
pv->output_buf, out_size, 1);
if (pv->avresample != NULL)
{
int in_linesize;
av_samples_get_buffer_size(&in_linesize, pv->context->channels,
frame.nb_samples, AV_SAMPLE_FMT_FLT, 1);
int out_samples = avresample_convert(pv->avresample,
frame.extended_data, out_linesize,
frame.nb_samples,
&pv->input_buf, in_linesize,
frame.nb_samples);
if (out_samples != pv->samples_per_frame)
{
// we're not doing sample rate conversion, so this shouldn't happen
hb_log("encavcodecaWork: avresample_convert() failed");
return NULL;
}
}
// Libav requires that timebase of audio frames be in sample_rate units
frame.pts = pts + (90000 * pos / (sizeof(float) *
pv->out_discrete_channels *
audio->config.out.samplerate));
frame.pts = av_rescale(frame.pts, pv->context->sample_rate, 90000);
// Prepare output packet
AVPacket pkt;
int got_packet;
hb_buffer_t *out = hb_buffer_init(pv->max_output_bytes);
av_init_packet(&pkt);
pkt.data = out->data;
pkt.size = out->alloc;
// Encode
int ret = avcodec_encode_audio2(pv->context, &pkt, &frame, &got_packet);
if (ret < 0)
{
hb_log("encavcodeca: avcodec_encode_audio failed");
hb_buffer_close(&out);
return NULL;
}
if (got_packet && pkt.size)
{
out->size = pkt.size;
// The output pts from libav is in context->time_base. Convert it back
// to our timebase.
out->s.start = av_rescale_q(pkt.pts, pv->context->time_base,
(AVRational){1, 90000});
out->s.duration = (double)90000 * pv->samples_per_frame /
audio->config.out.samplerate;
out->s.stop = out->s.start + out->s.duration;
out->s.type = AUDIO_BUF;
out->s.frametype = HB_FRAME_AUDIO;
}
else
{
hb_buffer_close(&out);
return Encode(w);
}
return out;
}
static hb_buffer_t * Flush( hb_work_object_t * w )
{
hb_buffer_t *first, *buf, *last;
first = last = buf = Encode( w );
while( buf )
{
last = buf;
buf->next = Encode( w );
buf = buf->next;
}
if( last )
{
last->next = hb_buffer_init( 0 );
}
else
{
first = hb_buffer_init( 0 );
}
return first;
}
/***********************************************************************
* Work
***********************************************************************
*
**********************************************************************/
static int encavcodecaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in, * buf;
if ( in->size <= 0 )
{
/* EOF on input - send it downstream & say we're done */
*buf_out = Flush( w );
return HB_WORK_DONE;
}
if ( pv->context == NULL || pv->context->codec == NULL )
{
// No encoder context. Nothing we can do.
return HB_WORK_OK;
}
hb_list_add( pv->list, in );
*buf_in = NULL;
*buf_out = buf = Encode( w );
while ( buf )
{
buf->next = Encode( w );
buf = buf->next;
}
return HB_WORK_OK;
}
HandBrake-0.10.2/libhb/hbffmpeg.h 0000664 0001752 0001752 00000002406 12463330511 017046 0 ustar handbrake handbrake /* hbffmpeg.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/channel_layout.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/avutil.h"
#include "libavutil/downmix_info.h"
#include "libswscale/swscale.h"
#include "libavresample/avresample.h"
#include "common.h"
#define HB_FFMPEG_THREADS_AUTO (-1) // let hb_avcodec_open() decide thread_count
void hb_avcodec_init(void);
int hb_avcodec_open(AVCodecContext *, AVCodec *, AVDictionary **, int);
int hb_avcodec_close(AVCodecContext *);
uint64_t hb_ff_mixdown_xlat(int hb_mixdown, int *downmix_mode);
void hb_ff_set_sample_fmt(AVCodecContext *, AVCodec *, enum AVSampleFormat);
struct SwsContext*
hb_sws_get_context(int srcW, int srcH, enum AVPixelFormat srcFormat,
int dstW, int dstH, enum AVPixelFormat dstFormat,
int flags);
int hb_avpicture_fill(AVPicture *pic, hb_buffer_t *buf);
HandBrake-0.10.2/libhb/rendersub.c 0000664 0001752 0001752 00000065417 12463330511 017267 0 ustar handbrake handbrake /* rendersub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
#include
struct hb_filter_private_s
{
// Common
int crop[4];
int type;
// VOBSUB
hb_list_t * sub_list; // List of active subs
// SSA
ASS_Library * ssa;
ASS_Renderer * renderer;
ASS_Track * ssaTrack;
uint8_t script_initialized;
// SRT
int line;
hb_buffer_t * current_sub;
};
// VOBSUB
static int vobsub_init( hb_filter_object_t * filter, hb_filter_init_t * init );
static int vobsub_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void vobsub_close( hb_filter_object_t * filter );
// SSA
static int ssa_init( hb_filter_object_t * filter, hb_filter_init_t * init );
static int ssa_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void ssa_close( hb_filter_object_t * filter );
// SRT
static int textsub_init( hb_filter_object_t * filter, hb_filter_init_t * init );
static int textsub_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void textsub_close( hb_filter_object_t * filter );
// PGS
static int pgssub_init ( hb_filter_object_t * filter, hb_filter_init_t * init );
static int pgssub_work ( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void pgssub_close( hb_filter_object_t * filter );
// Entry points
static int hb_rendersub_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_rendersub_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_rendersub_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_render_sub =
{
.id = HB_FILTER_RENDER_SUB,
.enforce_order = 1,
.name = "Subtitle renderer",
.settings = NULL,
.init = hb_rendersub_init,
.work = hb_rendersub_work,
.close = hb_rendersub_close,
};
static void blend( hb_buffer_t *dst, hb_buffer_t *src, int left, int top )
{
int xx, yy;
int ww, hh;
int x0, y0;
uint8_t *y_in, *y_out;
uint8_t *u_in, *u_out;
uint8_t *v_in, *v_out;
uint8_t *a_in, alpha;
x0 = y0 = 0;
if( left < 0 )
{
x0 = -left;
}
if( top < 0 )
{
y0 = -top;
}
ww = src->f.width;
if( src->f.width - x0 > dst->f.width - left )
{
ww = dst->f.width - left + x0;
}
hh = src->f.height;
if( src->f.height - y0 > dst->f.height - top )
{
hh = dst->f.height - top + y0;
}
// Blend luma
for( yy = y0; yy < hh; yy++ )
{
y_in = src->plane[0].data + yy * src->plane[0].stride;
y_out = dst->plane[0].data + ( yy + top ) * dst->plane[0].stride;
a_in = src->plane[3].data + yy * src->plane[3].stride;
for( xx = x0; xx < ww; xx++ )
{
alpha = a_in[xx];
/*
* Merge the luminance and alpha with the picture
*/
y_out[left + xx] =
( (uint16_t)y_out[left + xx] * ( 255 - alpha ) +
(uint16_t)y_in[xx] * alpha ) / 255;
}
}
// Blend U & V
// Assumes source and dest are the same PIX_FMT
int hshift = 0;
int wshift = 0;
if( dst->plane[1].height < dst->plane[0].height )
hshift = 1;
if( dst->plane[1].width < dst->plane[0].width )
wshift = 1;
for( yy = y0 >> hshift; yy < hh >> hshift; yy++ )
{
u_in = src->plane[1].data + yy * src->plane[1].stride;
u_out = dst->plane[1].data + ( yy + ( top >> hshift ) ) * dst->plane[1].stride;
v_in = src->plane[2].data + yy * src->plane[2].stride;
v_out = dst->plane[2].data + ( yy + ( top >> hshift ) ) * dst->plane[2].stride;
a_in = src->plane[3].data + ( yy << hshift ) * src->plane[3].stride;
for( xx = x0 >> wshift; xx < ww >> wshift; xx++ )
{
alpha = a_in[xx << wshift];
// Blend averge U and alpha
u_out[(left >> wshift) + xx] =
( (uint16_t)u_out[(left >> wshift) + xx] * ( 255 - alpha ) +
(uint16_t)u_in[xx] * alpha ) / 255;
// Blend V and alpha
v_out[(left >> wshift) + xx] =
( (uint16_t)v_out[(left >> wshift) + xx] * ( 255 - alpha ) +
(uint16_t)v_in[xx] * alpha ) / 255;
}
}
}
// Assumes that the input buffer has the same dimensions
// as the original title diminsions
static void ApplySub( hb_filter_private_t * pv, hb_buffer_t * buf, hb_buffer_t * sub )
{
int top, left, margin_top, margin_percent;
if ( !pv->ssa )
{
/*
* Percent of height of picture that form a margin that subtitles
* should not be displayed within.
*/
margin_percent = 2;
/*
* If necessary, move the subtitle so it is not in a cropped zone.
* When it won't fit, we center it so we lose as much on both ends.
* Otherwise we try to leave a 20px or 2% margin around it.
*/
margin_top = ( ( buf->f.height - pv->crop[0] - pv->crop[1] ) *
margin_percent ) / 100;
if( margin_top > 20 )
{
/*
* A maximum margin of 20px regardless of height of the picture.
*/
margin_top = 20;
}
if( sub->f.height > buf->f.height - pv->crop[0] - pv->crop[1] -
( margin_top * 2 ) )
{
/*
* The subtitle won't fit in the cropped zone, so center
* it vertically so we fit in as much as we can.
*/
top = pv->crop[0] + ( buf->f.height - pv->crop[0] -
pv->crop[1] - sub->f.height ) / 2;
}
else if( sub->f.y < pv->crop[0] + margin_top )
{
/*
* The subtitle fits in the cropped zone, but is currently positioned
* within our top margin, so move it outside of our margin.
*/
top = pv->crop[0] + margin_top;
}
else if( sub->f.y > buf->f.height - pv->crop[1] - margin_top - sub->f.height )
{
/*
* The subtitle fits in the cropped zone, and is not within the top
* margin but is within the bottom margin, so move it to be above
* the margin.
*/
top = buf->f.height - pv->crop[1] - margin_top - sub->f.height;
}
else
{
/*
* The subtitle is fine where it is.
*/
top = sub->f.y;
}
if( sub->f.width > buf->f.width - pv->crop[2] - pv->crop[3] - 40 )
left = pv->crop[2] + ( buf->f.width - pv->crop[2] -
pv->crop[3] - sub->f.width ) / 2;
else if( sub->f.x < pv->crop[2] + 20 )
left = pv->crop[2] + 20;
else if( sub->f.x > buf->f.width - pv->crop[3] - 20 - sub->f.width )
left = buf->f.width - pv->crop[3] - 20 - sub->f.width;
else
left = sub->f.x;
}
else
{
top = sub->f.y;
left = sub->f.x;
}
blend( buf, sub, left, top );
}
// Assumes that the input buffer has the same dimensions
// as the original title diminsions
static void ApplyVOBSubs( hb_filter_private_t * pv, hb_buffer_t * buf )
{
int ii;
hb_buffer_t *sub, *next;
for( ii = 0; ii < hb_list_count(pv->sub_list); )
{
sub = hb_list_item( pv->sub_list, ii );
if (ii + 1 < hb_list_count(pv->sub_list))
next = hb_list_item( pv->sub_list, ii + 1 );
else
next = NULL;
if ((sub->s.stop != AV_NOPTS_VALUE && sub->s.stop <= buf->s.start) ||
(next != NULL && sub->s.stop == AV_NOPTS_VALUE && next->s.start <= buf->s.start))
{
// Subtitle stop is in the past, delete it
hb_list_rem( pv->sub_list, sub );
hb_buffer_close( &sub );
}
else if( sub->s.start <= buf->s.start )
{
// The subtitle has started before this frame and ends
// after it. Render the subtitle into the frame.
while ( sub )
{
ApplySub( pv, buf, sub );
sub = sub->next;
}
ii++;
}
else
{
// The subtitle starts in the future. No need to continue.
break;
}
}
}
static int vobsub_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
hb_filter_private_t * pv = filter->private_data;
pv->sub_list = hb_list_init();
return 0;
}
static void vobsub_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
if( pv->sub_list )
hb_list_empty( &pv->sub_list );
free( pv );
filter->private_data = NULL;
}
static int vobsub_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * sub;
if ( in->size <= 0 )
{
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_DONE;
}
// Get any pending subtitles and add them to the active
// subtitle list
while( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
{
hb_list_add( pv->sub_list, sub );
}
ApplyVOBSubs( pv, in );
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_OK;
}
static uint8_t ssaAlpha( ASS_Image *frame, int x, int y )
{
unsigned frameA = ( frame->color ) & 0xff;
unsigned gliphA = frame->bitmap[y*frame->stride + x];
// Alpha for this pixel is the frame opacity (255 - frameA)
// multiplied by the gliph alfa (gliphA) for this pixel
unsigned alpha = (255 - frameA) * gliphA >> 8;
return (uint8_t)alpha;
}
static hb_buffer_t * RenderSSAFrame( hb_filter_private_t * pv, ASS_Image * frame )
{
hb_buffer_t *sub;
int xx, yy;
unsigned r = ( frame->color >> 24 ) & 0xff;
unsigned g = ( frame->color >> 16 ) & 0xff;
unsigned b = ( frame->color >> 8 ) & 0xff;
int yuv = hb_rgb2yuv((r << 16) | (g << 8) | b );
unsigned frameY = (yuv >> 16) & 0xff;
unsigned frameV = (yuv >> 8 ) & 0xff;
unsigned frameU = (yuv >> 0 ) & 0xff;
sub = hb_frame_buffer_init( AV_PIX_FMT_YUVA420P, frame->w, frame->h );
if( sub == NULL )
return NULL;
uint8_t *y_out, *u_out, *v_out, *a_out;
y_out = sub->plane[0].data;
u_out = sub->plane[1].data;
v_out = sub->plane[2].data;
a_out = sub->plane[3].data;
for( yy = 0; yy < frame->h; yy++ )
{
for( xx = 0; xx < frame->w; xx++ )
{
y_out[xx] = frameY;
if( ( yy & 1 ) == 0 )
{
u_out[xx>>1] = frameU;
v_out[xx>>1] = frameV;
}
a_out[xx] = ssaAlpha( frame, xx, yy );;
}
y_out += sub->plane[0].stride;
if( ( yy & 1 ) == 0 )
{
u_out += sub->plane[1].stride;
v_out += sub->plane[2].stride;
}
a_out += sub->plane[3].stride;
}
sub->f.width = frame->w;
sub->f.height = frame->h;
sub->f.x = frame->dst_x + pv->crop[2];
sub->f.y = frame->dst_y + pv->crop[0];
return sub;
}
static void ApplySSASubs( hb_filter_private_t * pv, hb_buffer_t * buf )
{
ASS_Image *frameList;
hb_buffer_t *sub;
frameList = ass_render_frame( pv->renderer, pv->ssaTrack,
buf->s.start / 90, NULL );
if ( !frameList )
return;
ASS_Image *frame;
for (frame = frameList; frame; frame = frame->next) {
sub = RenderSSAFrame( pv, frame );
if( sub )
{
ApplySub( pv, buf, sub );
hb_buffer_close( &sub );
}
}
}
static void ssa_log(int level, const char *fmt, va_list args, void *data)
{
if ( level < 5 ) // same as default verbosity when no callback is set
{
hb_valog( 1, "[ass]", fmt, args );
}
}
static int ssa_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
hb_filter_private_t * pv = filter->private_data;
pv->ssa = ass_library_init();
if ( !pv->ssa ) {
hb_error( "decssasub: libass initialization failed\n" );
return 1;
}
// Redirect libass output to hb_log
ass_set_message_cb( pv->ssa, ssa_log, NULL );
// Load embedded fonts
hb_list_t * list_attachment = init->job->list_attachment;
int i;
for ( i = 0; i < hb_list_count(list_attachment); i++ )
{
hb_attachment_t * attachment = hb_list_item( list_attachment, i );
if ( attachment->type == FONT_TTF_ATTACH )
{
ass_add_font(
pv->ssa,
attachment->name,
attachment->data,
attachment->size );
}
}
ass_set_extract_fonts( pv->ssa, 1 );
ass_set_style_overrides( pv->ssa, NULL );
pv->renderer = ass_renderer_init( pv->ssa );
if ( !pv->renderer ) {
hb_log( "decssasub: renderer initialization failed\n" );
return 1;
}
ass_set_use_margins( pv->renderer, 0 );
ass_set_hinting( pv->renderer, ASS_HINTING_LIGHT ); // VLC 1.0.4 uses this
ass_set_font_scale( pv->renderer, 1.0 );
ass_set_line_spacing( pv->renderer, 1.0 );
// Setup default font family
//
// SSA v4.00 requires that "Arial" be the default font
const char *font = NULL;
const char *family = "Arial";
// NOTE: This can sometimes block for several *seconds*.
// It seems that process_fontdata() for some embedded fonts is slow.
ass_set_fonts( pv->renderer, font, family, /*haveFontConfig=*/1, NULL, 1 );
// Setup track state
pv->ssaTrack = ass_new_track( pv->ssa );
if ( !pv->ssaTrack ) {
hb_log( "decssasub: ssa track initialization failed\n" );
return 1;
}
int width = init->width - ( pv->crop[2] + pv->crop[3] );
int height = init->height - ( pv->crop[0] + pv->crop[1] );
ass_set_frame_size( pv->renderer, width, height);
double par = (double)init->par_width / init->par_height;
ass_set_aspect_ratio( pv->renderer, 1, par );
return 0;
}
static void ssa_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
if ( pv->ssaTrack )
ass_free_track( pv->ssaTrack );
if ( pv->renderer )
ass_renderer_done( pv->renderer );
if ( pv->ssa )
ass_library_done( pv->ssa );
free( pv );
filter->private_data = NULL;
}
static int ssa_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * sub;
if (!pv->script_initialized)
{
// NOTE: The codec extradata is expected to be in MKV format
// I would like to initialize this in ssa_init, but when we are
// transcoding text subtitles to SSA, the extradata does not
// get initialized until the decoder is initialized. Since
// decoder initialization happens after filter initialization,
// we need to postpone this.
ass_process_codec_private(pv->ssaTrack,
(char*)filter->subtitle->extradata,
filter->subtitle->extradata_size);
pv->script_initialized = 1;
}
if ( in->size <= 0 )
{
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_DONE;
}
// Get any pending subtitles and add them to the active
// subtitle list
while( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
{
// Parse MKV-SSA packet
// SSA subtitles always have an explicit stop time, so we
// do not need to do special processing for stop == AV_NOPTS_VALUE
ass_process_chunk( pv->ssaTrack, (char*)sub->data, sub->size,
sub->s.start / 90,
(sub->s.stop - sub->s.start) / 90 );
hb_buffer_close(&sub);
}
ApplySSASubs( pv, in );
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_OK;
}
static int textsub_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
hb_filter_private_t * pv = filter->private_data;
int width = init->width - pv->crop[2] - pv->crop[3];
int height = init->height - pv->crop[0] - pv->crop[1];
// Text subtitles for which we create a dummy ASS header need
// to have the header rewritten with the correct dimensions.
hb_subtitle_add_ssa_header(filter->subtitle, width, height);
return ssa_init(filter, init);
}
static void textsub_close( hb_filter_object_t * filter )
{
return ssa_close(filter);
}
static void process_sub(hb_filter_private_t *pv, hb_buffer_t *sub)
{
int64_t start, dur;
char *ssa, *tmp;
// libass expects every chunk to have a unique sequence number
// since we are repeating subs in some cases, we need to replace
// the sequence number.
tmp = strchr((char*)sub->data, ',');
if (tmp == NULL)
return;
ssa = hb_strdup_printf("%d%s", ++pv->line, tmp);
// Parse MKV-SSA packet
// SSA subtitles always have an explicit stop time, so we
// do not need to do special processing for stop == AV_NOPTS_VALUE
start = sub->s.start;
dur = sub->s.stop - sub->s.start;
ass_process_chunk(pv->ssaTrack, ssa, sub->size, start, dur);
free(ssa);
}
static int textsub_work(hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out)
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * sub;
if (!pv->script_initialized)
{
ass_process_codec_private(pv->ssaTrack,
(char*)filter->subtitle->extradata,
filter->subtitle->extradata_size);
pv->script_initialized = 1;
}
if (in->size <= 0)
{
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_DONE;
}
int in_start_ms = in->s.start / 90;
// Get any pending subtitles and add them to the active
// subtitle list
while ((sub = hb_fifo_get(filter->subtitle->fifo_out)))
{
// libass expects times in ms. So to make the math easy,
// convert to ms immediately.
sub->s.start /= 90;
if (sub->s.stop != AV_NOPTS_VALUE)
{
sub->s.stop /= 90;
}
// Subtitle formats such as CC can have stop times
// that are not known until an "erase display" command
// is encountered in the stream. For these formats
// current_sub is the currently active subtitle for which
// we do not yet know the stop time. We do not currently
// support overlapping subtitles of this type.
if (pv->current_sub != NULL)
{
// Next sub start time tells us the stop time of the
// current sub when it is not known in advance.
pv->current_sub->s.stop = sub->s.start;
process_sub(pv, pv->current_sub);
hb_buffer_close(&pv->current_sub);
}
if (sub->s.start == sub->s.stop)
{
// Zero duration sub used to "clear" previous sub that had
// an unknown duration
hb_buffer_close(&sub);
}
else if (sub->s.stop == AV_NOPTS_VALUE)
{
// We don't know the duration of this sub. So we will
// apply it to every video frame until we see a "clear" sub.
pv->current_sub = sub;
pv->current_sub->s.stop = pv->current_sub->s.start;
}
else
{
// Duration of this subtitle is known, so we can just
// process it normally.
process_sub(pv, sub);
hb_buffer_close(&sub);
}
}
if (pv->current_sub != NULL && pv->current_sub->s.start <= in_start_ms)
{
// We don't know the duration of this subtitle, but we know
// that it started before the current video frame and that
// it is still active. So render it on this video frame.
pv->current_sub->s.start = pv->current_sub->s.stop;
pv->current_sub->s.stop = in_start_ms + 1;
process_sub(pv, pv->current_sub);
}
ApplySSASubs(pv, in);
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_OK;
}
static void ApplyPGSSubs( hb_filter_private_t * pv, hb_buffer_t * buf )
{
int index;
hb_buffer_t * old_sub;
hb_buffer_t * sub;
// Each PGS subtitle supersedes anything that preceded it.
// Find the active subtitle (if there is one), and delete
// everything before it.
for( index = hb_list_count( pv->sub_list ) - 1; index > 0; index-- )
{
sub = hb_list_item( pv->sub_list, index);
if ( sub->s.start <= buf->s.start )
{
while ( index > 0 )
{
old_sub = hb_list_item( pv->sub_list, index - 1);
hb_list_rem( pv->sub_list, old_sub );
hb_buffer_close( &old_sub );
index--;
}
}
}
// Some PGS subtitles have no content and only serve to clear
// the screen. If any of these are at the front of our list,
// we can now get rid of them.
while ( hb_list_count( pv->sub_list ) > 0 )
{
sub = hb_list_item( pv->sub_list, 0 );
if (sub->f.width != 0 && sub->f.height != 0)
break;
hb_list_rem( pv->sub_list, sub );
hb_buffer_close( &sub );
}
// Check to see if there's an active subtitle, and apply it.
if ( hb_list_count( pv->sub_list ) > 0)
{
sub = hb_list_item( pv->sub_list, 0 );
if ( sub->s.start <= buf->s.start )
{
while ( sub )
{
ApplySub( pv, buf, sub );
sub = sub->sub;
}
}
}
}
static int pgssub_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
hb_filter_private_t * pv = filter->private_data;
pv->sub_list = hb_list_init();
return 0;
}
static void pgssub_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if ( !pv )
{
return;
}
if ( pv->sub_list )
hb_list_empty( &pv->sub_list );
free( pv );
filter->private_data = NULL;
}
static int pgssub_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out)
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * sub;
if ( in->size <= 0 )
{
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_DONE;
}
// Get any pending subtitles and add them to the active
// subtitle list
while ( ( sub = hb_fifo_get( filter->subtitle->fifo_out ) ) )
{
hb_list_add( pv->sub_list, sub );
}
ApplyPGSSubs( pv, in );
*buf_in = NULL;
*buf_out = in;
return HB_FILTER_OK;
}
static int hb_rendersub_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
hb_subtitle_t *subtitle;
int ii;
if( filter->settings )
{
sscanf( filter->settings, "%d:%d:%d:%d",
&pv->crop[0],
&pv->crop[1],
&pv->crop[2],
&pv->crop[3]);
}
// Find the subtitle we need
for( ii = 0; ii < hb_list_count(init->job->list_subtitle); ii++ )
{
subtitle = hb_list_item( init->job->list_subtitle, ii );
if( subtitle && subtitle->config.dest == RENDERSUB )
{
// Found it
filter->subtitle = subtitle;
pv->type = subtitle->source;
break;
}
}
if( filter->subtitle == NULL )
{
hb_log("rendersub: no subtitle marked for burn");
return 1;
}
switch( pv->type )
{
case VOBSUB:
{
return vobsub_init( filter, init );
} break;
case SSASUB:
{
return ssa_init( filter, init );
} break;
case SRTSUB:
case CC608SUB:
case UTF8SUB:
case TX3GSUB:
{
return textsub_init( filter, init );
} break;
case PGSSUB:
{
return pgssub_init( filter, init );
} break;
default:
{
hb_log("rendersub: unsupported subtitle format %d", pv->type );
return 1;
} break;
}
}
static int hb_rendersub_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
switch( pv->type )
{
case VOBSUB:
{
return vobsub_work( filter, buf_in, buf_out );
} break;
case SSASUB:
{
return ssa_work( filter, buf_in, buf_out );
} break;
case SRTSUB:
case CC608SUB:
case UTF8SUB:
case TX3GSUB:
{
return textsub_work( filter, buf_in, buf_out );
} break;
case PGSSUB:
{
return pgssub_work( filter, buf_in, buf_out );
} break;
default:
{
hb_error("rendersub: unsupported subtitle format %d", pv->type );
return 1;
} break;
}
}
static void hb_rendersub_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
switch( pv->type )
{
case VOBSUB:
{
vobsub_close( filter );
} break;
case SSASUB:
{
ssa_close( filter );
} break;
case SRTSUB:
case CC608SUB:
case UTF8SUB:
case TX3GSUB:
{
textsub_close( filter );
} break;
case PGSSUB:
{
pgssub_close( filter );
} break;
default:
{
hb_error("rendersub: unsupported subtitle format %d", pv->type );
} break;
}
}
HandBrake-0.10.2/libhb/qsv_memory.h 0000664 0001752 0001752 00000004375 12205472744 017511 0 ustar handbrake handbrake /* ********************************************************************* *\
Copyright (C) 2013 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\* ********************************************************************* */
#ifndef QSV_MEMORY_H
#define QSV_MEMORY_H
#include "libavcodec/qsv.h"
#include "msdk/mfxplugin.h"
typedef struct{
struct{
// for "planes" , Y and VU
uint8_t *data[2];
int strides[2];
} qsv_data;
struct{
// for each plane, Y U V
uint8_t *data[3];
int strides[3];
} data;
int width;
int height;
} qsv_memory_copy_t;
int qsv_nv12_to_yuv420(struct SwsContext* sws_context,hb_buffer_t* dst, mfxFrameSurface1* src,mfxCoreInterface *core);
int qsv_yuv420_to_nv12(struct SwsContext* sws_context,mfxFrameSurface1* dst, hb_buffer_t* src);
#endif // QSV_MEMORY_H
HandBrake-0.10.2/libhb/decpgssub.c 0000664 0001752 0001752 00000045056 12463330511 017252 0 ustar handbrake handbrake /* decpgssub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
struct hb_work_private_s
{
AVCodecContext * context;
hb_job_t * job;
// For PGS subs, when doing passthru, we don't know if we need a
// packet until we have processed several packets. So we cache
// all the packets we see until libav returns a subtitle with
// the information we need.
hb_buffer_t * list_pass_buffer;
hb_buffer_t * last_pass_buffer;
// It is possible for multiple subtitles to be enncapsulated in
// one packet. This won't happen for PGS subs, but may for other
// types of subtitles. Since I plan to generalize this code to handle
// other than PGS, we will need to keep a list of all subtitles seen
// while parsing an input packet.
hb_buffer_t * list_buffer;
hb_buffer_t * last_buffer;
// XXX: we may occasionally see subtitles with broken timestamps
// while this should really get fixed elsewhere,
// dropping subtitles should be avoided as much as possible
int64_t last_pts;
// for PGS subs, we need to pass 'empty' subtitles through (they clear the
// display) - when doing forced-only extraction, only pass empty subtitles
// through if we've seen a forced sub and haven't seen any empty sub since
uint8_t seen_forced_sub;
// if we start encoding partway through the source, we may encounter empty
// subtitles before we see any actual subtitle content - discard them
uint8_t discard_subtitle;
};
static int decsubInit( hb_work_object_t * w, hb_job_t * job )
{
AVCodec *codec = avcodec_find_decoder( AV_CODEC_ID_HDMV_PGS_SUBTITLE );
AVCodecContext *context = avcodec_alloc_context3( codec );
context->codec = codec;
hb_work_private_t * pv;
pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->discard_subtitle = 1;
pv->seen_forced_sub = 0;
pv->last_pts = 0;
pv->context = context;
pv->job = job;
// Set decoder opts...
AVDictionary * av_opts = NULL;
// e.g. av_dict_set( &av_opts, "refcounted_frames", "1", 0 );
if (hb_avcodec_open(pv->context, codec, &av_opts, 0))
{
av_dict_free( &av_opts );
hb_log("decsubInit: avcodec_open failed");
return 1;
}
av_dict_free( &av_opts );
return 0;
}
static void make_empty_pgs( hb_buffer_t * buf )
{
hb_buffer_t * b = buf;
uint8_t done = 0;
// Each buffer is composed of 1 or more segments.
// Segment header is:
// type - 1 byte
// length - 2 bytes
// We want to modify the presentation segment which is type 0x16
//
// Note that every pgs display set is required to have a presentation
// segment, so we will only have to look at one display set.
while ( b && !done )
{
int ii = 0;
while (ii + 3 <= b->size)
{
uint8_t type;
int len;
int segment_len_pos;
type = b->data[ii++];
segment_len_pos = ii;
len = ((int)b->data[ii] << 8) + b->data[ii+1];
ii += 2;
if (type == 0x16 && ii + len <= b->size)
{
int obj_count;
int kk, jj = ii;
int obj_start;
// Skip
// video descriptor 5 bytes
// composition descriptor 3 bytes
// palette update flg 1 byte
// palette id ref 1 byte
jj += 10;
// Set number of composition objects to 0
obj_count = b->data[jj];
b->data[jj] = 0;
jj++;
obj_start = jj;
// And remove all the composition objects
for (kk = 0; kk < obj_count; kk++)
{
uint8_t crop;
crop = b->data[jj + 3];
// skip
// object id - 2 bytes
// window id - 1 byte
// object/forced flag - 1 byte
// x pos - 2 bytes
// y pos - 2 bytes
jj += 8;
if (crop & 0x80)
{
// skip
// crop x - 2 bytes
// crop y - 2 bytes
// crop w - 2 bytes
// crop h - 2 bytes
jj += 8;
}
}
if (jj < b->size)
{
memmove(b->data + obj_start, b->data + jj, b->size - jj);
}
b->size = obj_start + ( b->size - jj );
done = 1;
len = obj_start - (segment_len_pos + 2);
b->data[segment_len_pos] = len >> 8;
b->data[segment_len_pos+1] = len & 0xff;
break;
}
ii += len;
}
b = b->next;
}
}
static int decsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in;
if ( in->size <= 0 )
{
/* EOF on input stream - send it downstream & say that we're done */
if ( pv->list_buffer == NULL )
{
pv->list_buffer = pv->last_buffer = in;
}
else
{
pv->last_buffer->next = in;
}
*buf_in = NULL;
*buf_out = pv->list_buffer;
pv->list_buffer = NULL;
return HB_WORK_DONE;
}
if ( !pv->job->indepth_scan &&
w->subtitle->config.dest == PASSTHRUSUB &&
hb_subtitle_can_pass( PGSSUB, pv->job->mux ) )
{
// Append to buffer list. It will be sent to fifo after we determine
// if this is a packet we need.
if ( pv->list_pass_buffer == NULL )
{
pv->list_pass_buffer = pv->last_pass_buffer = in;
}
else
{
pv->last_pass_buffer->next = in;
pv->last_pass_buffer = in;
}
// We are keeping the buffer, so prevent the filter loop from
// deleting it.
*buf_in = NULL;
}
AVSubtitle subtitle;
memset( &subtitle, 0, sizeof(subtitle) );
AVPacket avp;
av_init_packet( &avp );
avp.data = in->data;
avp.size = in->size;
// libav wants pkt pts in AV_TIME_BASE units
if (in->s.start != AV_NOPTS_VALUE)
{
avp.pts = av_rescale(in->s.start, AV_TIME_BASE, 90000);
}
else
{
avp.pts = AV_NOPTS_VALUE;
}
int has_subtitle = 0;
do
{
int usedBytes = avcodec_decode_subtitle2( pv->context, &subtitle, &has_subtitle, &avp );
if (usedBytes < 0)
{
hb_log("unable to decode subtitle with %d bytes.", avp.size);
return HB_WORK_OK;
}
if (usedBytes <= avp.size)
{
avp.data += usedBytes;
avp.size -= usedBytes;
}
else
{
avp.size = 0;
}
/* Subtitles are "usable" if:
* 1. Libav returned a subtitle (has_subtitle) AND
* 2. we're not doing Foreign Audio Search (!pv->job->indepth_scan) AND
* 3. the sub is non-empty or we've seen one such sub before (!pv->discard_subtitle)
* For forced-only extraction, usable subtitles also need to:
* a. be forced (subtitle.rects[0]->flags & AV_SUBTITLE_FLAG_FORCED) OR
* b. follow a forced sub (pv->seen_forced_sub) */
uint8_t forced_sub = 0;
uint8_t useable_sub = 0;
uint8_t clear_subtitle = 0;
if (has_subtitle)
{
// subtitle statistics
if (subtitle.num_rects)
{
w->subtitle->hits++;
if (subtitle.rects[0]->flags & AV_SUBTITLE_FLAG_FORCED)
{
forced_sub = 1;
w->subtitle->forced_hits++;
}
}
else
{
clear_subtitle = 1;
}
// are we doing Foreign Audio Search?
if (!pv->job->indepth_scan)
{
// do we want to discard this subtitle?
pv->discard_subtitle = pv->discard_subtitle && clear_subtitle;
// do we need this subtitle?
useable_sub = (!pv->discard_subtitle &&
(!w->subtitle->config.force ||
forced_sub || pv->seen_forced_sub));
// do we need to create an empty subtitle?
if (w->subtitle->config.force &&
useable_sub && !forced_sub && !clear_subtitle)
{
// We are forced-only and need to output this subtitle, but
// it's neither forced nor empty.
//
// If passthru, create an empty subtitle.
// Also, flag an empty subtitle for subtitle RENDER.
make_empty_pgs(pv->list_pass_buffer);
clear_subtitle = 1;
}
// is the subtitle forced?
pv->seen_forced_sub = forced_sub;
}
}
if (useable_sub)
{
int64_t pts = AV_NOPTS_VALUE;
hb_buffer_t * out = NULL;
if (subtitle.pts != AV_NOPTS_VALUE)
{
pts = av_rescale(subtitle.pts, 90000, AV_TIME_BASE);
}
else
{
if (in->s.start >= 0)
{
pts = in->s.start;
}
else
{
// XXX: a broken pts will cause us to drop this subtitle,
// which is bad; use a default duration of 3 seconds
//
// A broken pts is only generated when a pgs packet
// occurs after a discontinuity and before the
// next audio or video packet which re-establishes
// timing (afaik).
pts = pv->last_pts + 3 * 90000LL;
hb_log("[warning] decpgssub: track %d, invalid PTS",
w->subtitle->out_track);
}
}
// work around broken timestamps
if (pts < pv->last_pts)
{
// XXX: this should only happen if the prevous pts
// was unknown and our 3 second default duration
// overshot the next pgs pts.
//
// assign a 1 second duration
pts = pv->last_pts + 1 * 90000LL;
hb_log("[warning] decpgssub: track %d, non-monotically increasing PTS",
w->subtitle->out_track);
}
pv->last_pts = pts;
if ( w->subtitle->config.dest == PASSTHRUSUB &&
hb_subtitle_can_pass( PGSSUB, pv->job->mux ) )
{
/* PGS subtitles are spread across multiple packets (1 per segment).
* In the MKV container, all segments are found in the same packet
* (this is expected by some devices, such as the WD TV Live).
* So if there are multiple packets, merge them. */
if (pv->list_pass_buffer->next == NULL)
{
// packets already merged (e.g. MKV sources)
out = pv->list_pass_buffer;
pv->list_pass_buffer = NULL;
}
else
{
int size = 0;
uint8_t * data;
hb_buffer_t * b;
b = pv->list_pass_buffer;
while (b != NULL)
{
size += b->size;
b = b->next;
}
out = hb_buffer_init( size );
data = out->data;
b = pv->list_pass_buffer;
while (b != NULL)
{
memcpy( data, b->data, b->size );
data += b->size;
b = b->next;
}
hb_buffer_close( &pv->list_pass_buffer );
out->s = in->s;
out->sequence = in->sequence;
}
out->s.frametype = HB_FRAME_SUBTITLE;
out->s.renderOffset = AV_NOPTS_VALUE;
out->s.stop = AV_NOPTS_VALUE;
out->s.start = pts;
}
else
{
if (!clear_subtitle)
{
unsigned ii, x0, y0, x1, y1, w, h;
x0 = subtitle.rects[0]->x;
y0 = subtitle.rects[0]->y;
x1 = subtitle.rects[0]->x + subtitle.rects[0]->w;
y1 = subtitle.rects[0]->y + subtitle.rects[0]->h;
// First, find total bounding rectangle
for (ii = 1; ii < subtitle.num_rects; ii++)
{
if (subtitle.rects[ii]->x < x0)
x0 = subtitle.rects[ii]->x;
if (subtitle.rects[ii]->y < y0)
y0 = subtitle.rects[ii]->y;
if (subtitle.rects[ii]->x + subtitle.rects[ii]->w > x1)
x1 = subtitle.rects[ii]->x + subtitle.rects[ii]->w;
if (subtitle.rects[ii]->y + subtitle.rects[ii]->h > y1)
y1 = subtitle.rects[ii]->y + subtitle.rects[ii]->h;
}
w = x1 - x0;
h = y1 - y0;
out = hb_frame_buffer_init(AV_PIX_FMT_YUVA420P, w, h);
memset(out->data, 0, out->size);
out->s.frametype = HB_FRAME_SUBTITLE;
out->s.id = in->s.id;
out->sequence = in->sequence;
out->s.start = pts;
out->s.stop = AV_NOPTS_VALUE;
out->s.renderOffset = AV_NOPTS_VALUE;
out->f.x = x0;
out->f.y = y0;
for (ii = 0; ii < subtitle.num_rects; ii++)
{
AVSubtitleRect *rect = subtitle.rects[ii];
int off_x = rect->x - x0;
int off_y = rect->y - y0;
uint8_t *lum = out->plane[0].data;
uint8_t *chromaU = out->plane[1].data;
uint8_t *chromaV = out->plane[2].data;
uint8_t *alpha = out->plane[3].data;
lum += off_y * out->plane[0].stride + off_x;
alpha += off_y * out->plane[3].stride + off_x;
chromaU += (off_y >> 1) * out->plane[1].stride + (off_x >> 1);
chromaV += (off_y >> 1) * out->plane[2].stride + (off_x >> 1);
int xx, yy;
for (yy = 0; yy < rect->h; yy++)
{
for (xx = 0; xx < rect->w; xx++)
{
uint32_t argb, yuv;
int pixel;
uint8_t color;
pixel = yy * rect->w + xx;
color = rect->pict.data[0][pixel];
argb = ((uint32_t*)rect->pict.data[1])[color];
yuv = hb_rgb2yuv(argb);
lum[xx] = (yuv >> 16) & 0xff;
alpha[xx] = (argb >> 24) & 0xff;
if ((xx & 1) == 0 && (yy & 1) == 0)
{
chromaV[xx>>1] = (yuv >> 8) & 0xff;
chromaU[xx>>1] = yuv & 0xff;
}
}
lum += out->plane[0].stride;
if ((yy & 1) == 0)
{
chromaU += out->plane[1].stride;
chromaV += out->plane[2].stride;
}
alpha += out->plane[3].stride;
}
}
if ( pv->list_buffer == NULL )
{
pv->list_buffer = pv->last_buffer = out;
}
else
{
pv->last_buffer->next = out;
pv->last_buffer = out;
}
out = NULL;
}
else
{
out = hb_buffer_init( 1 );
out->s.frametype = HB_FRAME_SUBTITLE;
out->s.id = in->s.id;
out->s.start = pts;
out->s.stop = pts;
out->f.x = 0;
out->f.y = 0;
out->f.width = 0;
out->f.height = 0;
}
}
if ( pv->list_buffer == NULL )
{
pv->list_buffer = pv->last_buffer = out;
}
else
{
pv->last_buffer->next = out;
}
while (pv->last_buffer && pv->last_buffer->next)
{
pv->last_buffer = pv->last_buffer->next;
}
}
else if ( has_subtitle )
{
hb_buffer_close( &pv->list_pass_buffer );
pv->list_pass_buffer = NULL;
}
if ( has_subtitle )
{
avsubtitle_free(&subtitle);
}
} while (avp.size > 0);
*buf_out = pv->list_buffer;
pv->list_buffer = NULL;
return HB_WORK_OK;
}
static void decsubClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
avcodec_flush_buffers( pv->context );
avcodec_close( pv->context );
}
hb_work_object_t hb_decpgssub =
{
WORK_DECPGSSUB,
"PGS decoder",
decsubInit,
decsubWork,
decsubClose
};
HandBrake-0.10.2/libhb/audio_resample.h 0000664 0001752 0001752 00000007223 12463330511 020263 0 ustar handbrake handbrake /* audio_resample.h
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage:
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/* Implements a libavresample wrapper for convenience.
*
* Supports sample_fmt and channel_layout conversion.
*
* sample_rate conversion will come later (libavresample doesn't support
* sample_rate conversion with float samples yet). */
#ifndef AUDIO_RESAMPLE_H
#define AUDIO_RESAMPLE_H
#include
#include
#include "libavutil/channel_layout.h"
#include "libavresample/avresample.h"
/* Default mix level for center and surround channels */
#define HB_MIXLEV_DEFAULT ((double)M_SQRT1_2)
/* Default mix level for LFE channel */
#define HB_MIXLEV_ZERO ((double)0.0)
typedef struct
{
int dual_mono_downmix;
int dual_mono_right_only;
int resample_needed;
AVAudioResampleContext *avresample;
struct
{
uint64_t channel_layout;
double lfe_mix_level;
double center_mix_level;
double surround_mix_level;
enum AVSampleFormat sample_fmt;
} in;
struct
{
int channels;
uint64_t channel_layout;
double lfe_mix_level;
double center_mix_level;
double surround_mix_level;
enum AVSampleFormat sample_fmt;
} resample;
struct
{
int channels;
int sample_size;
int normalize_mix_level;
uint64_t channel_layout;
enum AVSampleFormat sample_fmt;
enum AVMatrixEncoding matrix_encoding;
} out;
} hb_audio_resample_t;
/* Initialize an hb_audio_resample_t for converting audio to the requested
* sample_fmt and mixdown.
*
* Also sets the default audio input characteristics, so that they are the same
* as the output characteristics (no conversion needed).
*/
hb_audio_resample_t* hb_audio_resample_init(enum AVSampleFormat sample_fmt,
int hb_amixdown, int normalize_mix);
/* The following functions set the audio input characteristics.
*
* They should be called whenever the relevant characteristic(s) differ from the
* requested output characteristics, or if they may have changed in the source.
*/
void hb_audio_resample_set_channel_layout(hb_audio_resample_t *resample,
uint64_t channel_layout);
void hb_audio_resample_set_mix_levels(hb_audio_resample_t *resample,
double surround_mix_level,
double center_mix_level,
double lfe_mix_level);
void hb_audio_resample_set_sample_fmt(hb_audio_resample_t *resample,
enum AVSampleFormat sample_fmt);
/* Update an hb_audio_resample_t.
*
* Must be called after using any of the above functions.
*/
int hb_audio_resample_update(hb_audio_resample_t *resample);
/* Free an hb_audio_remsample_t. */
void hb_audio_resample_free(hb_audio_resample_t *resample);
/* Convert input samples to the requested output characteristics
* (sample_fmt and channel_layout + matrix_encoding).
*
* Returns an hb_buffer_t with the converted output.
*
* resampling is only done when necessary.
*/
hb_buffer_t* hb_audio_resample(hb_audio_resample_t *resample,
uint8_t **samples, int nsamples);
#endif /* AUDIO_RESAMPLE_H */
HandBrake-0.10.2/libhb/decvobsub.c 0000664 0001752 0001752 00000046655 12463330511 017255 0 ustar handbrake handbrake /* decvobsub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/*
* Decoder for DVD bitmap subtitles, also known as "VOB subtitles" within the HandBrake source code.
*
* Input format of the subtitle packets is described here:
* http://sam.zoy.org/writings/dvd/subtitles/
*
* An auxiliary input is the color palette lookup table, in 'subtitle->palette'.
* The demuxer implementation must fill out this table appropriately.
* - In the case of a true DVD input, the palette is read from the IFO file.
* - In the case of an MKV file input, the palette is read from the codec private data of the subtitle track.
*
* Output format of this decoder is PICTURESUB, which is:
* struct PictureSubPacket {
* uint8_t lum[pixelCount]; // Y
* uint8_t alpha[pixelCount]; // alpha (max = 16)
* uint8_t chromaU[pixelCount]; // Cb
* uint8_t chromaV[pixelCount]; // Cr
* }
*/
#include "hb.h"
struct hb_work_private_s
{
hb_job_t * job;
hb_buffer_t * buf;
int size_sub;
int size_got;
int size_rle;
int64_t pts;
int64_t pts_start;
int64_t pts_stop;
int pts_forced;
int x;
int y;
int width;
int height;
int stream_id;
int offsets[2];
uint8_t lum[4];
uint8_t chromaU[4];
uint8_t chromaV[4];
uint8_t alpha[4];
uint8_t palette_set;
};
static hb_buffer_t * Decode( hb_work_object_t * );
int decsubInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv;
pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
pv->pts = 0;
// Warn if the input color palette is empty
pv->palette_set = w->subtitle->palette_set;
if ( pv->palette_set )
{
// Make sure the entries in the palette are not all 0
pv->palette_set = 0;
int i;
for (i=0; i<16; i++)
{
if (w->subtitle->palette[i])
{
pv->palette_set = 1;
break;
}
}
}
if (!pv->palette_set) {
hb_log( "decvobsub: input color palette is empty!" );
}
return 0;
}
int decsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in;
int size_sub, size_rle;
if ( in->size <= 0 )
{
/* EOF on input stream - send it downstream & say that we're done */
*buf_out = in;
*buf_in = NULL;
return HB_WORK_DONE;
}
pv->stream_id = in->s.id;
size_sub = ( in->data[0] << 8 ) | in->data[1];
size_rle = ( in->data[2] << 8 ) | in->data[3];
if( !pv->size_sub )
{
/* We are looking for the start of a new subtitle */
if( size_sub && size_rle && size_sub > size_rle &&
in->size <= size_sub )
{
/* Looks all right so far */
pv->size_sub = size_sub;
pv->size_rle = size_rle;
pv->buf = hb_buffer_init( 0xFFFF );
memcpy( pv->buf->data, in->data, in->size );
pv->buf->s.id = in->s.id;
pv->buf->s.frametype = HB_FRAME_SUBTITLE;
pv->buf->sequence = in->sequence;
pv->size_got = in->size;
if( in->s.start >= 0 )
{
pv->pts = in->s.start;
}
}
}
else
{
/* We are waiting for the end of the current subtitle */
if( in->size <= pv->size_sub - pv->size_got )
{
memcpy( pv->buf->data + pv->size_got, in->data, in->size );
pv->buf->s.id = in->s.id;
pv->buf->sequence = in->sequence;
pv->size_got += in->size;
if( in->s.start >= 0 )
{
pv->pts = in->s.start;
}
}
else
{
// bad size, must have lost sync
// force re-sync
if ( pv->buf != NULL )
hb_buffer_close( &pv->buf );
pv->size_sub = 0;
}
}
*buf_out = NULL;
if( pv->size_sub && pv->size_sub == pv->size_got )
{
pv->buf->size = pv->size_sub;
/* We got a complete subtitle, decode it */
*buf_out = Decode( w );
if( buf_out && *buf_out )
{
(*buf_out)->s.id = in->s.id;
(*buf_out)->sequence = in->sequence;
}
/* Wait for the next one */
pv->size_sub = 0;
pv->size_got = 0;
pv->size_rle = 0;
if ( pv->pts_stop != AV_NOPTS_VALUE )
{
// If we don't get a valid next timestamp, use the stop time
// of the current sub as the start of the next.
// This can happen if reader invalidates timestamps while
// waiting for an audio to update the SCR.
pv->pts = pv->pts_stop;
}
}
return HB_WORK_OK;
}
void decsubClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
if ( pv->buf )
hb_buffer_close( &pv->buf );
free( w->private_data );
}
hb_work_object_t hb_decvobsub =
{
WORK_DECVOBSUB,
"VOBSUB decoder",
decsubInit,
decsubWork,
decsubClose
};
/***********************************************************************
* ParseControls
***********************************************************************
* Get the start and end dates (relative to the PTS from the PES
* header), the width and height of the subpicture and the colors and
* alphas used in it
**********************************************************************/
static void ParseControls( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
uint8_t * buf = pv->buf->data;
int i;
int command;
int date, next;
pv->pts_start = AV_NOPTS_VALUE;
pv->pts_stop = AV_NOPTS_VALUE;
pv->pts_forced = 0;
pv->alpha[3] = 0;
pv->alpha[2] = 0;
pv->alpha[1] = 0;
pv->alpha[0] = 0;
for( i = pv->size_rle; ; )
{
date = ( buf[i] << 8 ) | buf[i+1]; i += 2;
next = ( buf[i] << 8 ) | buf[i+1]; i += 2;
for( ;; )
{
command = buf[i++];
/*
* There are eight commands available for
* Sub-Pictures. The first SP_DCSQ should contain, as a
* minimum, SET_COLOR, SET_CONTR, SET_DAREA, and
* SET_DSPXA
*/
if( command == 0xFF ) // 0xFF - CMD_END - ends one SP_DCSQ
{
break;
}
switch( command )
{
case 0x00: // 0x00 - FSTA_DSP - Forced Start Display, no arguments
pv->pts_start = pv->pts + date * 1024;
pv->pts_forced = 1;
w->subtitle->hits++;
w->subtitle->forced_hits++;
break;
case 0x01: // 0x01 - STA_DSP - Start Display, no arguments
pv->pts_start = pv->pts + date * 1024;
pv->pts_forced = 0;
w->subtitle->hits++;
break;
case 0x02: // 0x02 - STP_DSP - Stop Display, no arguments
if(pv->pts_stop == AV_NOPTS_VALUE)
pv->pts_stop = pv->pts + date * 1024;
break;
case 0x03: // 0x03 - SET_COLOR - Set Colour indices
{
/*
* SET_COLOR - provides four indices into the CLUT
* for the current PGC to associate with the four
* pixel values
*/
int colors[4];
int j;
colors[0] = (buf[i+0]>>4)&0x0f;
colors[1] = (buf[i+0])&0x0f;
colors[2] = (buf[i+1]>>4)&0x0f;
colors[3] = (buf[i+1])&0x0f;
for( j = 0; j < 4; j++ )
{
/*
* Not sure what is happening here, in theory
* the palette is in YCbCr. And we want YUV.
*
* However it looks more like YCrCb (according
* to pgcedit). And the scalers for YCrCb don't
* work, but I get the right colours by doing
* no conversion.
*/
uint32_t color = w->subtitle->palette[colors[j]];
uint8_t Cr, Cb, y;
y = (color>>16) & 0xff;
Cr = (color>>8) & 0xff;
Cb = (color) & 0xff;
pv->lum[3-j] = y;
pv->chromaU[3-j] = Cb;
pv->chromaV[3-j] = Cr;
/* hb_log("color[%d] y = %d, u = %d, v = %d",
3-j,
pv->lum[3-j],
pv->chromaU[3-j],
pv->chromaV[3-j]);
*/
}
i += 2;
break;
}
case 0x04: // 0x04 - SET_CONTR - Set Contrast
{
/*
* SET_CONTR - directly provides the four contrast
* (alpha blend) values to associate with the four
* pixel values
*/
uint8_t alpha[4];
alpha[3] = ((buf[i+0] >> 4) & 0x0f) << 4;
alpha[2] = ((buf[i+0] ) & 0x0f) << 4;
alpha[1] = ((buf[i+1] >> 4) & 0x0f) << 4;
alpha[0] = ((buf[i+1] ) & 0x0f) << 4;
int lastAlpha = pv->alpha[3] + pv->alpha[2] + pv->alpha[1] + pv->alpha[0];
int currAlpha = alpha[3] + alpha[2] + alpha[1] + alpha[0];
// fading-in, save the highest alpha value
if( currAlpha > lastAlpha )
{
pv->alpha[3] = alpha[3];
pv->alpha[2] = alpha[2];
pv->alpha[1] = alpha[1];
pv->alpha[0] = alpha[0];
}
// fading-out
if (currAlpha < lastAlpha && pv->pts_stop == AV_NOPTS_VALUE)
{
pv->pts_stop = pv->pts + date * 1024;
}
i += 2;
break;
}
case 0x05: // 0x05 - SET_DAREA - defines the display area
{
pv->x = (buf[i+0]<<4) | ((buf[i+1]>>4)&0x0f);
pv->width = (((buf[i+1]&0x0f)<<8)| buf[i+2]) - pv->x + 1;
pv->y = (buf[i+3]<<4)| ((buf[i+4]>>4)&0x0f);
pv->height = (((buf[i+4]&0x0f)<<8)| buf[i+5]) - pv->y + 1;
i += 6;
break;
}
case 0x06: // 0x06 - SET_DSPXA - defines the pixel data addresses
{
pv->offsets[0] = ( buf[i] << 8 ) | buf[i+1]; i += 2;
pv->offsets[1] = ( buf[i] << 8 ) | buf[i+1]; i += 2;
break;
}
}
}
if( i > next )
{
break;
}
i = next;
}
// Generate timestamps if they are not set
if( pv->pts_start == AV_NOPTS_VALUE )
{
// Set pts to end of last sub if the start time is unknown.
pv->pts_start = pv->pts;
}
}
/***********************************************************************
* CropSubtitle
***********************************************************************
* Given a raw decoded subtitle, detects transparent borders and
* returns a cropped subtitle in a hb_buffer_t ready to be used by
* the renderer, or NULL if the subtitle was completely transparent
**********************************************************************/
static int LineIsTransparent( hb_work_object_t * w, uint8_t * p )
{
hb_work_private_t * pv = w->private_data;
int i;
for( i = 0; i < pv->width; i++ )
{
if( p[i] )
{
return 0;
}
}
return 1;
}
static int ColumnIsTransparent( hb_work_object_t * w, uint8_t * p )
{
hb_work_private_t * pv = w->private_data;
int i;
for( i = 0; i < pv->height; i++ )
{
if( p[i*pv->width] )
{
return 0;
}
}
return 1;
}
// Brain dead resampler. This should really use swscale...
// Uses Bresenham algo to pick source samples for averaging
static void resample( uint8_t * dst, uint8_t * src, int dst_w, int src_w )
{
int dst_x, src_x, err, cnt, sum, val;
if( dst_w < src_w )
{
// sample down
err = 0;
sum = 0;
val = 0;
cnt = 0;
err = src_w / 2;
dst_x = 0;
for( src_x = 0; src_x < src_w; src_x++ )
{
sum += src[src_x];
cnt++;
err -= dst_w;
if( err < 0 )
{
val = sum / cnt;
dst[dst_x++] = val;
sum = cnt = 0;
err += src_w;
}
}
for( ; dst_x < dst_w; dst_x++ )
{
dst[dst_x] = val;
}
}
else
{
// sample up
err = 0;
err = dst_w / 2;
src_x = 0;
for( dst_x = 0; dst_x < dst_w; dst_x++ )
{
dst[dst_x] = src[src_x];
err -= src_w;
if( err < 0 )
{
src_x++;
err += dst_w;
}
}
}
}
static hb_buffer_t * CropSubtitle( hb_work_object_t * w, uint8_t * raw )
{
hb_work_private_t * pv = w->private_data;
int i;
int crop[4] = { -1,-1,-1,-1 };
uint8_t * alpha;
int realwidth, realheight;
hb_buffer_t * buf;
uint8_t * lum_in, * alpha_in, * u_in, * v_in;
alpha = raw + pv->width * pv->height;
/* Top */
for( i = 0; i < pv->height; i++ )
{
if( !LineIsTransparent( w, &alpha[i*pv->width] ) )
{
crop[0] = i;
break;
}
}
if( crop[0] < 0 )
{
/* Empty subtitle */
return NULL;
}
/* Bottom */
for( i = pv->height - 1; i >= 0; i-- )
{
if( !LineIsTransparent( w, &alpha[i*pv->width] ) )
{
crop[1] = i;
break;
}
}
/* Left */
for( i = 0; i < pv->width; i++ )
{
if( !ColumnIsTransparent( w, &alpha[i] ) )
{
crop[2] = i;
break;
}
}
/* Right */
for( i = pv->width - 1; i >= 0; i-- )
{
if( !ColumnIsTransparent( w, &alpha[i] ) )
{
crop[3] = i;
break;
}
}
realwidth = crop[3] - crop[2] + 1;
realheight = crop[1] - crop[0] + 1;
buf = hb_frame_buffer_init( AV_PIX_FMT_YUVA420P, realwidth, realheight );
buf->s.frametype = HB_FRAME_SUBTITLE;
buf->s.start = pv->pts_start;
buf->s.stop = pv->pts_stop;
buf->s.type = SUBTITLE_BUF;
buf->f.x = pv->x + crop[2];
buf->f.y = pv->y + crop[0];
lum_in = raw + crop[0] * pv->width + crop[2];
alpha_in = lum_in + pv->width * pv->height;
u_in = alpha_in + pv->width * pv->height;
v_in = u_in + pv->width * pv->height;
uint8_t *dst;
for( i = 0; i < realheight; i++ )
{
// Luma
dst = buf->plane[0].data + buf->plane[0].stride * i;
memcpy( dst, lum_in, realwidth );
if( ( i & 1 ) == 0 )
{
// chroma U (resample to YUV420)
dst = buf->plane[1].data + buf->plane[1].stride * ( i >> 1 );
resample( dst, u_in, buf->plane[1].width, realwidth );
// chroma V (resample to YUV420)
dst = buf->plane[2].data + buf->plane[2].stride * ( i >> 1 );
resample( dst, v_in, buf->plane[2].width, realwidth );
}
// Alpha
dst = buf->plane[3].data + buf->plane[3].stride * i;
memcpy( dst, alpha_in, realwidth );
lum_in += pv->width;
alpha_in += pv->width;
u_in += pv->width;
v_in += pv->width;
}
return buf;
}
static hb_buffer_t * Decode( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
int code, line, col;
int offsets[2];
int * offset;
hb_buffer_t * buf;
uint8_t * buf_raw = NULL;
hb_job_t * job = pv->job;
/* Get infos about the subtitle */
ParseControls( w );
if( job->indepth_scan || ( w->subtitle->config.force && pv->pts_forced == 0 ) )
{
/*
* Don't encode subtitles when doing a scan.
*
* When forcing subtitles, ignore all those that don't
* have the forced flag set.
*/
hb_buffer_close( &pv->buf );
return NULL;
}
if (w->subtitle->config.dest == PASSTHRUSUB)
{
pv->buf->s.start = pv->pts_start;
pv->buf->s.stop = pv->pts_stop;
buf = pv->buf;
pv->buf = NULL;
return buf;
}
/* Do the actual decoding now */
buf_raw = malloc( ( pv->width * pv->height ) * 4 );
#define GET_NEXT_NIBBLE code = ( code << 4 ) | ( ( ( *offset & 1 ) ? \
( pv->buf->data[((*offset)>>1)] & 0xF ) : ( pv->buf->data[((*offset)>>1)] >> 4 ) ) ); \
(*offset)++
offsets[0] = pv->offsets[0] * 2;
offsets[1] = pv->offsets[1] * 2;
for( line = 0; line < pv->height; line++ )
{
/* Select even or odd field */
offset = ( line & 1 ) ? &offsets[1] : &offsets[0];
for( col = 0; col < pv->width; col += code >> 2 )
{
uint8_t * lum, * alpha, * chromaU, * chromaV;
code = 0;
GET_NEXT_NIBBLE;
if( code < 0x4 )
{
GET_NEXT_NIBBLE;
if( code < 0x10 )
{
GET_NEXT_NIBBLE;
if( code < 0x40 )
{
GET_NEXT_NIBBLE;
if( code < 0x100 )
{
/* End of line */
code |= ( pv->width - col ) << 2;
}
}
}
}
lum = buf_raw;
alpha = lum + pv->width * pv->height;
chromaU = alpha + pv->width * pv->height;
chromaV = chromaU + pv->width * pv->height;
memset( lum + line * pv->width + col,
pv->lum[code & 3], code >> 2 );
memset( alpha + line * pv->width + col,
pv->alpha[code & 3], code >> 2 );
memset( chromaU + line * pv->width + col,
pv->chromaU[code & 3], code >> 2 );
memset( chromaV + line * pv->width + col,
pv->chromaV[code & 3], code >> 2 );
}
/* Byte-align */
if( *offset & 1 )
{
(*offset)++;
}
}
hb_buffer_close( &pv->buf );
/* Crop subtitle (remove transparent borders) */
buf = CropSubtitle( w, buf_raw );
free( buf_raw );
return buf;
}
HandBrake-0.10.2/libhb/decutf8sub.c 0000664 0001752 0001752 00000004020 12463330511 017331 0 ustar handbrake handbrake /* decutf8sub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/*
* Decoder for UTF-8 subtitles obtained from file input-sources.
*
* Input and output packet format is UTF-8 encoded text,
* with limited HTML-style markup (only , , and ).
*
* @author David Foster (davidfstr)
*/
#include
#include
#include "hb.h"
#include "decsrtsub.h"
struct hb_work_private_s
{
int line; // SSA line number
};
static int decutf8Init(hb_work_object_t *w, hb_job_t *job)
{
hb_work_private_t * pv;
pv = calloc( 1, sizeof( hb_work_private_t ) );
if (pv == NULL)
return 1;
w->private_data = pv;
// Generate generic SSA Script Info.
int height = job->title->height - job->crop[0] - job->crop[1];
int width = job->title->width - job->crop[2] - job->crop[3];
hb_subtitle_add_ssa_header(w->subtitle, width, height);
return 0;
}
static int decutf8Work(hb_work_object_t * w,
hb_buffer_t **buf_in, hb_buffer_t **buf_out)
{
hb_work_private_t * pv = w->private_data;
// Pass the packets through without modification
hb_buffer_t *out = *buf_in;
out->s.frametype = HB_FRAME_SUBTITLE;
// Warn if the subtitle's duration has not been passed through by the
// demuxer, which will prevent the subtitle from displaying at all
if (out->s.stop == 0)
{
hb_log("decutf8sub: subtitle packet lacks duration");
}
hb_srt_to_ssa(out, ++pv->line);
*buf_in = NULL;
*buf_out = out;
if (out->size == 0)
return HB_WORK_DONE;
return HB_WORK_OK;
}
static void decutf8Close(hb_work_object_t *w)
{
free(w->private_data);
}
hb_work_object_t hb_decutf8sub =
{
WORK_DECUTF8SUB,
"UTF-8 Subtitle Decoder",
decutf8Init,
decutf8Work,
decutf8Close
};
HandBrake-0.10.2/libhb/nal_units.c 0000664 0001752 0001752 00000012264 12463330511 017262 0 ustar handbrake handbrake /* nal_units.c
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code.
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include
#include "common.h"
#include "nal_units.h"
static const uint8_t hb_annexb_startcode[] = { 0x00, 0x00, 0x00, 0x01, };
size_t hb_nal_unit_write_annexb(uint8_t *buf,
const uint8_t *nal_unit,
const size_t nal_unit_size)
{
if (buf != NULL)
{
memcpy(buf, hb_annexb_startcode, sizeof(hb_annexb_startcode));
memcpy(buf + sizeof(hb_annexb_startcode), nal_unit, nal_unit_size);
}
return sizeof(hb_annexb_startcode) + nal_unit_size;
}
size_t hb_nal_unit_write_isomp4(uint8_t *buf,
const uint8_t *nal_unit,
const size_t nal_unit_size)
{
int i;
uint8_t length[4]; // 4-byte length replaces Annex B start code prefix
if (buf != NULL)
{
for (i = 0; i < sizeof(length); i++)
{
length[i] = (nal_unit_size >> (8 * (sizeof(length) - 1 - i))) & 0xff;
}
memcpy(buf, &length[0], sizeof(length));
memcpy(buf + sizeof(length), nal_unit, nal_unit_size);
}
return sizeof(length) + nal_unit_size;
}
uint8_t* hb_annexb_find_next_nalu(const uint8_t *start, size_t *size)
{
uint8_t *nal = NULL;
uint8_t *buf = (uint8_t*)start;
uint8_t *end = (uint8_t*)start + *size;
/* Look for an Annex B start code prefix (3-byte sequence == 1) */
while (end - buf > 3)
{
if (!buf[0] && !buf[1] && buf[2] == 1)
{
nal = (buf += 3); // NAL unit begins after start code
break;
}
buf++;
}
if (nal == NULL)
{
*size = 0;
return NULL;
}
/*
* Start code prefix found, look for the next one to determine the size
*
* A 4-byte sequence == 1 is also a start code, so check for a 3-byte
* sequence == 0 too (start code emulation prevention will prevent such a
* sequence from occurring outside of a start code prefix)
*/
while (end - buf > 3)
{
if (!buf[0] && !buf[1] && (!buf[2] || buf[2] == 1))
{
end = buf;
break;
}
buf++;
}
*size = end - nal;
return nal;
}
hb_buffer_t* hb_nal_bitstream_annexb_to_mp4(const uint8_t *data,
const size_t size)
{
hb_buffer_t *out;
uint8_t *buf, *end;
size_t out_size, buf_size;
out_size = 0;
buf_size = size;
buf = (uint8_t*)data;
end = (uint8_t*)data + size;
while ((buf = hb_annexb_find_next_nalu(buf, &buf_size)) != NULL)
{
out_size += hb_nal_unit_write_isomp4(NULL, buf, buf_size);
buf_size = end - buf;
}
out = hb_buffer_init(out_size);
if (out == NULL)
{
hb_error("hb_nal_bitstream_annexb_to_mp4: hb_buffer_init failed");
return NULL;
}
out_size = 0;
buf_size = size;
buf = (uint8_t*)data;
end = (uint8_t*)data + size;
while ((buf = hb_annexb_find_next_nalu(buf, &buf_size)) != NULL)
{
out_size += hb_nal_unit_write_isomp4(out->data + out_size, buf, buf_size);
buf_size = end - buf;
}
return out;
}
static size_t mp4_nal_unit_length(const uint8_t *data,
const size_t nal_length_size,
size_t *nal_unit_length)
{
uint8_t i;
/* In MP4, NAL units are preceded by a 2-4 byte length field */
for (i = 0, *nal_unit_length = 0; i < nal_length_size; i++)
{
*nal_unit_length |= data[i] << (8 * (nal_length_size - 1 - i));
}
return nal_length_size;
}
hb_buffer_t* hb_nal_bitstream_mp4_to_annexb(const uint8_t *data,
const size_t size,
const uint8_t nal_length_size)
{
hb_buffer_t *out;
uint8_t *buf, *end;
size_t out_size, nal_size;
out_size = 0;
buf = (uint8_t*)data;
end = (uint8_t*)data + size;
while (end - buf > nal_length_size)
{
buf += mp4_nal_unit_length(buf, nal_length_size, &nal_size);
if (end - buf < nal_size)
{
hb_log("hb_nal_bitstream_mp4_to_annexb: truncated bitstream"
" (remaining: %lu, expected: %lu)", end - buf, nal_size);
return NULL;
}
out_size += hb_nal_unit_write_annexb(NULL, buf, nal_size);
buf += nal_size;
}
out = hb_buffer_init(out_size);
if (out == NULL)
{
hb_error("hb_nal_bitstream_mp4_to_annexb: hb_buffer_init failed");
return NULL;
}
out_size = 0;
buf = (uint8_t*)data;
end = (uint8_t*)data + size;
while (end - buf > nal_length_size)
{
buf += mp4_nal_unit_length(buf, nal_length_size, &nal_size);
out_size += hb_nal_unit_write_annexb(out->data + out_size, buf, nal_size);
buf += nal_size;
}
return out;
}
HandBrake-0.10.2/libhb/extras/ 0000775 0001752 0001752 00000000000 12535641635 016437 5 ustar handbrake handbrake HandBrake-0.10.2/libhb/extras/cl.h 0000664 0001752 0001752 00000166655 12230760063 017216 0 ustar handbrake handbrake /*******************************************************************************
* Copyright (c) 2008 - 2012 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
******************************************************************************/
#ifndef __OPENCL_CL_H
#define __OPENCL_CL_H
#include "cl_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************/
typedef struct _cl_platform_id * cl_platform_id;
typedef struct _cl_device_id * cl_device_id;
typedef struct _cl_context * cl_context;
typedef struct _cl_command_queue * cl_command_queue;
typedef struct _cl_mem * cl_mem;
typedef struct _cl_program * cl_program;
typedef struct _cl_kernel * cl_kernel;
typedef struct _cl_event * cl_event;
typedef struct _cl_sampler * cl_sampler;
typedef cl_uint cl_bool; /* WARNING! Unlike cl_ types in cl_platform.h, cl_bool is not guaranteed to be the same size as the bool in kernels. */
typedef cl_ulong cl_bitfield;
typedef cl_bitfield cl_device_type;
typedef cl_uint cl_platform_info;
typedef cl_uint cl_device_info;
typedef cl_bitfield cl_device_fp_config;
typedef cl_uint cl_device_mem_cache_type;
typedef cl_uint cl_device_local_mem_type;
typedef cl_bitfield cl_device_exec_capabilities;
typedef cl_bitfield cl_command_queue_properties;
typedef intptr_t cl_device_partition_property;
typedef cl_bitfield cl_device_affinity_domain;
typedef intptr_t cl_context_properties;
typedef cl_uint cl_context_info;
typedef cl_uint cl_command_queue_info;
typedef cl_uint cl_channel_order;
typedef cl_uint cl_channel_type;
typedef cl_bitfield cl_mem_flags;
typedef cl_uint cl_mem_object_type;
typedef cl_uint cl_mem_info;
typedef cl_bitfield cl_mem_migration_flags;
typedef cl_uint cl_image_info;
typedef cl_uint cl_buffer_create_type;
typedef cl_uint cl_addressing_mode;
typedef cl_uint cl_filter_mode;
typedef cl_uint cl_sampler_info;
typedef cl_bitfield cl_map_flags;
typedef cl_uint cl_program_info;
typedef cl_uint cl_program_build_info;
typedef cl_uint cl_program_binary_type;
typedef cl_int cl_build_status;
typedef cl_uint cl_kernel_info;
typedef cl_uint cl_kernel_arg_info;
typedef cl_uint cl_kernel_arg_address_qualifier;
typedef cl_uint cl_kernel_arg_access_qualifier;
typedef cl_bitfield cl_kernel_arg_type_qualifier;
typedef cl_uint cl_kernel_work_group_info;
typedef cl_uint cl_event_info;
typedef cl_uint cl_command_type;
typedef cl_uint cl_profiling_info;
typedef struct _cl_image_format {
cl_channel_order image_channel_order;
cl_channel_type image_channel_data_type;
} cl_image_format;
typedef struct _cl_image_desc {
cl_mem_object_type image_type;
size_t image_width;
size_t image_height;
size_t image_depth;
size_t image_array_size;
size_t image_row_pitch;
size_t image_slice_pitch;
cl_uint num_mip_levels;
cl_uint num_samples;
cl_mem buffer;
} cl_image_desc;
typedef struct _cl_buffer_region {
size_t origin;
size_t size;
} cl_buffer_region;
/******************************************************************************/
/* Error Codes */
#define CL_SUCCESS 0
#define CL_DEVICE_NOT_FOUND -1
#define CL_DEVICE_NOT_AVAILABLE -2
#define CL_COMPILER_NOT_AVAILABLE -3
#define CL_MEM_OBJECT_ALLOCATION_FAILURE -4
#define CL_OUT_OF_RESOURCES -5
#define CL_OUT_OF_HOST_MEMORY -6
#define CL_PROFILING_INFO_NOT_AVAILABLE -7
#define CL_MEM_COPY_OVERLAP -8
#define CL_IMAGE_FORMAT_MISMATCH -9
#define CL_IMAGE_FORMAT_NOT_SUPPORTED -10
#define CL_BUILD_PROGRAM_FAILURE -11
#define CL_MAP_FAILURE -12
#define CL_MISALIGNED_SUB_BUFFER_OFFSET -13
#define CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST -14
#define CL_COMPILE_PROGRAM_FAILURE -15
#define CL_LINKER_NOT_AVAILABLE -16
#define CL_LINK_PROGRAM_FAILURE -17
#define CL_DEVICE_PARTITION_FAILED -18
#define CL_KERNEL_ARG_INFO_NOT_AVAILABLE -19
#define CL_INVALID_VALUE -30
#define CL_INVALID_DEVICE_TYPE -31
#define CL_INVALID_PLATFORM -32
#define CL_INVALID_DEVICE -33
#define CL_INVALID_CONTEXT -34
#define CL_INVALID_QUEUE_PROPERTIES -35
#define CL_INVALID_COMMAND_QUEUE -36
#define CL_INVALID_HOST_PTR -37
#define CL_INVALID_MEM_OBJECT -38
#define CL_INVALID_IMAGE_FORMAT_DESCRIPTOR -39
#define CL_INVALID_IMAGE_SIZE -40
#define CL_INVALID_SAMPLER -41
#define CL_INVALID_BINARY -42
#define CL_INVALID_BUILD_OPTIONS -43
#define CL_INVALID_PROGRAM -44
#define CL_INVALID_PROGRAM_EXECUTABLE -45
#define CL_INVALID_KERNEL_NAME -46
#define CL_INVALID_KERNEL_DEFINITION -47
#define CL_INVALID_KERNEL -48
#define CL_INVALID_ARG_INDEX -49
#define CL_INVALID_ARG_VALUE -50
#define CL_INVALID_ARG_SIZE -51
#define CL_INVALID_KERNEL_ARGS -52
#define CL_INVALID_WORK_DIMENSION -53
#define CL_INVALID_WORK_GROUP_SIZE -54
#define CL_INVALID_WORK_ITEM_SIZE -55
#define CL_INVALID_GLOBAL_OFFSET -56
#define CL_INVALID_EVENT_WAIT_LIST -57
#define CL_INVALID_EVENT -58
#define CL_INVALID_OPERATION -59
#define CL_INVALID_GL_OBJECT -60
#define CL_INVALID_BUFFER_SIZE -61
#define CL_INVALID_MIP_LEVEL -62
#define CL_INVALID_GLOBAL_WORK_SIZE -63
#define CL_INVALID_PROPERTY -64
#define CL_INVALID_IMAGE_DESCRIPTOR -65
#define CL_INVALID_COMPILER_OPTIONS -66
#define CL_INVALID_LINKER_OPTIONS -67
#define CL_INVALID_DEVICE_PARTITION_COUNT -68
/* OpenCL Version */
#define CL_VERSION_1_0 1
#define CL_VERSION_1_1 1
#define CL_VERSION_1_2 1
/* cl_bool */
#define CL_FALSE 0
#define CL_TRUE 1
#define CL_BLOCKING CL_TRUE
#define CL_NON_BLOCKING CL_FALSE
/* cl_platform_info */
#define CL_PLATFORM_PROFILE 0x0900
#define CL_PLATFORM_VERSION 0x0901
#define CL_PLATFORM_NAME 0x0902
#define CL_PLATFORM_VENDOR 0x0903
#define CL_PLATFORM_EXTENSIONS 0x0904
/* cl_device_type - bitfield */
#define CL_DEVICE_TYPE_DEFAULT (1 << 0)
#define CL_DEVICE_TYPE_CPU (1 << 1)
#define CL_DEVICE_TYPE_GPU (1 << 2)
#define CL_DEVICE_TYPE_ACCELERATOR (1 << 3)
#define CL_DEVICE_TYPE_CUSTOM (1 << 4)
#define CL_DEVICE_TYPE_ALL 0xFFFFFFFF
/* cl_device_info */
#define CL_DEVICE_TYPE 0x1000
#define CL_DEVICE_VENDOR_ID 0x1001
#define CL_DEVICE_MAX_COMPUTE_UNITS 0x1002
#define CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS 0x1003
#define CL_DEVICE_MAX_WORK_GROUP_SIZE 0x1004
#define CL_DEVICE_MAX_WORK_ITEM_SIZES 0x1005
#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR 0x1006
#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT 0x1007
#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT 0x1008
#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG 0x1009
#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT 0x100A
#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE 0x100B
#define CL_DEVICE_MAX_CLOCK_FREQUENCY 0x100C
#define CL_DEVICE_ADDRESS_BITS 0x100D
#define CL_DEVICE_MAX_READ_IMAGE_ARGS 0x100E
#define CL_DEVICE_MAX_WRITE_IMAGE_ARGS 0x100F
#define CL_DEVICE_MAX_MEM_ALLOC_SIZE 0x1010
#define CL_DEVICE_IMAGE2D_MAX_WIDTH 0x1011
#define CL_DEVICE_IMAGE2D_MAX_HEIGHT 0x1012
#define CL_DEVICE_IMAGE3D_MAX_WIDTH 0x1013
#define CL_DEVICE_IMAGE3D_MAX_HEIGHT 0x1014
#define CL_DEVICE_IMAGE3D_MAX_DEPTH 0x1015
#define CL_DEVICE_IMAGE_SUPPORT 0x1016
#define CL_DEVICE_MAX_PARAMETER_SIZE 0x1017
#define CL_DEVICE_MAX_SAMPLERS 0x1018
#define CL_DEVICE_MEM_BASE_ADDR_ALIGN 0x1019
#define CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE 0x101A
#define CL_DEVICE_SINGLE_FP_CONFIG 0x101B
#define CL_DEVICE_GLOBAL_MEM_CACHE_TYPE 0x101C
#define CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE 0x101D
#define CL_DEVICE_GLOBAL_MEM_CACHE_SIZE 0x101E
#define CL_DEVICE_GLOBAL_MEM_SIZE 0x101F
#define CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE 0x1020
#define CL_DEVICE_MAX_CONSTANT_ARGS 0x1021
#define CL_DEVICE_LOCAL_MEM_TYPE 0x1022
#define CL_DEVICE_LOCAL_MEM_SIZE 0x1023
#define CL_DEVICE_ERROR_CORRECTION_SUPPORT 0x1024
#define CL_DEVICE_PROFILING_TIMER_RESOLUTION 0x1025
#define CL_DEVICE_ENDIAN_LITTLE 0x1026
#define CL_DEVICE_AVAILABLE 0x1027
#define CL_DEVICE_COMPILER_AVAILABLE 0x1028
#define CL_DEVICE_EXECUTION_CAPABILITIES 0x1029
#define CL_DEVICE_QUEUE_PROPERTIES 0x102A
#define CL_DEVICE_NAME 0x102B
#define CL_DEVICE_VENDOR 0x102C
#define CL_DRIVER_VERSION 0x102D
#define CL_DEVICE_PROFILE 0x102E
#define CL_DEVICE_VERSION 0x102F
#define CL_DEVICE_EXTENSIONS 0x1030
#define CL_DEVICE_PLATFORM 0x1031
#define CL_DEVICE_DOUBLE_FP_CONFIG 0x1032
/* 0x1033 reserved for CL_DEVICE_HALF_FP_CONFIG */
#define CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF 0x1034
#define CL_DEVICE_HOST_UNIFIED_MEMORY 0x1035
#define CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR 0x1036
#define CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT 0x1037
#define CL_DEVICE_NATIVE_VECTOR_WIDTH_INT 0x1038
#define CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG 0x1039
#define CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT 0x103A
#define CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE 0x103B
#define CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF 0x103C
#define CL_DEVICE_OPENCL_C_VERSION 0x103D
#define CL_DEVICE_LINKER_AVAILABLE 0x103E
#define CL_DEVICE_BUILT_IN_KERNELS 0x103F
#define CL_DEVICE_IMAGE_MAX_BUFFER_SIZE 0x1040
#define CL_DEVICE_IMAGE_MAX_ARRAY_SIZE 0x1041
#define CL_DEVICE_PARENT_DEVICE 0x1042
#define CL_DEVICE_PARTITION_MAX_SUB_DEVICES 0x1043
#define CL_DEVICE_PARTITION_PROPERTIES 0x1044
#define CL_DEVICE_PARTITION_AFFINITY_DOMAIN 0x1045
#define CL_DEVICE_PARTITION_TYPE 0x1046
#define CL_DEVICE_REFERENCE_COUNT 0x1047
#define CL_DEVICE_PREFERRED_INTEROP_USER_SYNC 0x1048
#define CL_DEVICE_PRINTF_BUFFER_SIZE 0x1049
#define CL_DEVICE_IMAGE_PITCH_ALIGNMENT 0x104A
#define CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT 0x104B
/* cl_device_fp_config - bitfield */
#define CL_FP_DENORM (1 << 0)
#define CL_FP_INF_NAN (1 << 1)
#define CL_FP_ROUND_TO_NEAREST (1 << 2)
#define CL_FP_ROUND_TO_ZERO (1 << 3)
#define CL_FP_ROUND_TO_INF (1 << 4)
#define CL_FP_FMA (1 << 5)
#define CL_FP_SOFT_FLOAT (1 << 6)
#define CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT (1 << 7)
/* cl_device_mem_cache_type */
#define CL_NONE 0x0
#define CL_READ_ONLY_CACHE 0x1
#define CL_READ_WRITE_CACHE 0x2
/* cl_device_local_mem_type */
#define CL_LOCAL 0x1
#define CL_GLOBAL 0x2
/* cl_device_exec_capabilities - bitfield */
#define CL_EXEC_KERNEL (1 << 0)
#define CL_EXEC_NATIVE_KERNEL (1 << 1)
/* cl_command_queue_properties - bitfield */
#define CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE (1 << 0)
#define CL_QUEUE_PROFILING_ENABLE (1 << 1)
/* cl_context_info */
#define CL_CONTEXT_REFERENCE_COUNT 0x1080
#define CL_CONTEXT_DEVICES 0x1081
#define CL_CONTEXT_PROPERTIES 0x1082
#define CL_CONTEXT_NUM_DEVICES 0x1083
/* cl_context_properties */
#define CL_CONTEXT_PLATFORM 0x1084
#define CL_CONTEXT_INTEROP_USER_SYNC 0x1085
/* cl_device_partition_property */
#define CL_DEVICE_PARTITION_EQUALLY 0x1086
#define CL_DEVICE_PARTITION_BY_COUNTS 0x1087
#define CL_DEVICE_PARTITION_BY_COUNTS_LIST_END 0x0
#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN 0x1088
/* cl_device_affinity_domain */
#define CL_DEVICE_AFFINITY_DOMAIN_NUMA (1 << 0)
#define CL_DEVICE_AFFINITY_DOMAIN_L4_CACHE (1 << 1)
#define CL_DEVICE_AFFINITY_DOMAIN_L3_CACHE (1 << 2)
#define CL_DEVICE_AFFINITY_DOMAIN_L2_CACHE (1 << 3)
#define CL_DEVICE_AFFINITY_DOMAIN_L1_CACHE (1 << 4)
#define CL_DEVICE_AFFINITY_DOMAIN_NEXT_PARTITIONABLE (1 << 5)
/* cl_command_queue_info */
#define CL_QUEUE_CONTEXT 0x1090
#define CL_QUEUE_DEVICE 0x1091
#define CL_QUEUE_REFERENCE_COUNT 0x1092
#define CL_QUEUE_PROPERTIES 0x1093
/* cl_mem_flags - bitfield */
#define CL_MEM_READ_WRITE (1 << 0)
#define CL_MEM_WRITE_ONLY (1 << 1)
#define CL_MEM_READ_ONLY (1 << 2)
#define CL_MEM_USE_HOST_PTR (1 << 3)
#define CL_MEM_ALLOC_HOST_PTR (1 << 4)
#define CL_MEM_COPY_HOST_PTR (1 << 5)
// reserved (1 << 6)
#define CL_MEM_HOST_WRITE_ONLY (1 << 7)
#define CL_MEM_HOST_READ_ONLY (1 << 8)
#define CL_MEM_HOST_NO_ACCESS (1 << 9)
/* cl_mem_migration_flags - bitfield */
#define CL_MIGRATE_MEM_OBJECT_HOST (1 << 0)
#define CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED (1 << 1)
/* cl_channel_order */
#define CL_R 0x10B0
#define CL_A 0x10B1
#define CL_RG 0x10B2
#define CL_RA 0x10B3
#define CL_RGB 0x10B4
#define CL_RGBA 0x10B5
#define CL_BGRA 0x10B6
#define CL_ARGB 0x10B7
#define CL_INTENSITY 0x10B8
#define CL_LUMINANCE 0x10B9
#define CL_Rx 0x10BA
#define CL_RGx 0x10BB
#define CL_RGBx 0x10BC
#define CL_DEPTH 0x10BD
#define CL_DEPTH_STENCIL 0x10BE
/* cl_channel_type */
#define CL_SNORM_INT8 0x10D0
#define CL_SNORM_INT16 0x10D1
#define CL_UNORM_INT8 0x10D2
#define CL_UNORM_INT16 0x10D3
#define CL_UNORM_SHORT_565 0x10D4
#define CL_UNORM_SHORT_555 0x10D5
#define CL_UNORM_INT_101010 0x10D6
#define CL_SIGNED_INT8 0x10D7
#define CL_SIGNED_INT16 0x10D8
#define CL_SIGNED_INT32 0x10D9
#define CL_UNSIGNED_INT8 0x10DA
#define CL_UNSIGNED_INT16 0x10DB
#define CL_UNSIGNED_INT32 0x10DC
#define CL_HALF_FLOAT 0x10DD
#define CL_FLOAT 0x10DE
#define CL_UNORM_INT24 0x10DF
/* cl_mem_object_type */
#define CL_MEM_OBJECT_BUFFER 0x10F0
#define CL_MEM_OBJECT_IMAGE2D 0x10F1
#define CL_MEM_OBJECT_IMAGE3D 0x10F2
#define CL_MEM_OBJECT_IMAGE2D_ARRAY 0x10F3
#define CL_MEM_OBJECT_IMAGE1D 0x10F4
#define CL_MEM_OBJECT_IMAGE1D_ARRAY 0x10F5
#define CL_MEM_OBJECT_IMAGE1D_BUFFER 0x10F6
/* cl_mem_info */
#define CL_MEM_TYPE 0x1100
#define CL_MEM_FLAGS 0x1101
#define CL_MEM_SIZE 0x1102
#define CL_MEM_HOST_PTR 0x1103
#define CL_MEM_MAP_COUNT 0x1104
#define CL_MEM_REFERENCE_COUNT 0x1105
#define CL_MEM_CONTEXT 0x1106
#define CL_MEM_ASSOCIATED_MEMOBJECT 0x1107
#define CL_MEM_OFFSET 0x1108
/* cl_image_info */
#define CL_IMAGE_FORMAT 0x1110
#define CL_IMAGE_ELEMENT_SIZE 0x1111
#define CL_IMAGE_ROW_PITCH 0x1112
#define CL_IMAGE_SLICE_PITCH 0x1113
#define CL_IMAGE_WIDTH 0x1114
#define CL_IMAGE_HEIGHT 0x1115
#define CL_IMAGE_DEPTH 0x1116
#define CL_IMAGE_ARRAY_SIZE 0x1117
#define CL_IMAGE_BUFFER 0x1118
#define CL_IMAGE_NUM_MIP_LEVELS 0x1119
#define CL_IMAGE_NUM_SAMPLES 0x111A
/* cl_addressing_mode */
#define CL_ADDRESS_NONE 0x1130
#define CL_ADDRESS_CLAMP_TO_EDGE 0x1131
#define CL_ADDRESS_CLAMP 0x1132
#define CL_ADDRESS_REPEAT 0x1133
#define CL_ADDRESS_MIRRORED_REPEAT 0x1134
/* cl_filter_mode */
#define CL_FILTER_NEAREST 0x1140
#define CL_FILTER_LINEAR 0x1141
/* cl_sampler_info */
#define CL_SAMPLER_REFERENCE_COUNT 0x1150
#define CL_SAMPLER_CONTEXT 0x1151
#define CL_SAMPLER_NORMALIZED_COORDS 0x1152
#define CL_SAMPLER_ADDRESSING_MODE 0x1153
#define CL_SAMPLER_FILTER_MODE 0x1154
/* cl_map_flags - bitfield */
#define CL_MAP_READ (1 << 0)
#define CL_MAP_WRITE (1 << 1)
#define CL_MAP_WRITE_INVALIDATE_REGION (1 << 2)
/* cl_program_info */
#define CL_PROGRAM_REFERENCE_COUNT 0x1160
#define CL_PROGRAM_CONTEXT 0x1161
#define CL_PROGRAM_NUM_DEVICES 0x1162
#define CL_PROGRAM_DEVICES 0x1163
#define CL_PROGRAM_SOURCE 0x1164
#define CL_PROGRAM_BINARY_SIZES 0x1165
#define CL_PROGRAM_BINARIES 0x1166
#define CL_PROGRAM_NUM_KERNELS 0x1167
#define CL_PROGRAM_KERNEL_NAMES 0x1168
/* cl_program_build_info */
#define CL_PROGRAM_BUILD_STATUS 0x1181
#define CL_PROGRAM_BUILD_OPTIONS 0x1182
#define CL_PROGRAM_BUILD_LOG 0x1183
#define CL_PROGRAM_BINARY_TYPE 0x1184
/* cl_program_binary_type */
#define CL_PROGRAM_BINARY_TYPE_NONE 0x0
#define CL_PROGRAM_BINARY_TYPE_COMPILED_OBJECT 0x1
#define CL_PROGRAM_BINARY_TYPE_LIBRARY 0x2
#define CL_PROGRAM_BINARY_TYPE_EXECUTABLE 0x4
/* cl_build_status */
#define CL_BUILD_SUCCESS 0
#define CL_BUILD_NONE -1
#define CL_BUILD_ERROR -2
#define CL_BUILD_IN_PROGRESS -3
/* cl_kernel_info */
#define CL_KERNEL_FUNCTION_NAME 0x1190
#define CL_KERNEL_NUM_ARGS 0x1191
#define CL_KERNEL_REFERENCE_COUNT 0x1192
#define CL_KERNEL_CONTEXT 0x1193
#define CL_KERNEL_PROGRAM 0x1194
#define CL_KERNEL_ATTRIBUTES 0x1195
/* cl_kernel_arg_info */
#define CL_KERNEL_ARG_ADDRESS_QUALIFIER 0x1196
#define CL_KERNEL_ARG_ACCESS_QUALIFIER 0x1197
#define CL_KERNEL_ARG_TYPE_NAME 0x1198
#define CL_KERNEL_ARG_TYPE_QUALIFIER 0x1199
#define CL_KERNEL_ARG_NAME 0x119A
/* cl_kernel_arg_address_qualifier */
#define CL_KERNEL_ARG_ADDRESS_GLOBAL 0x119B
#define CL_KERNEL_ARG_ADDRESS_LOCAL 0x119C
#define CL_KERNEL_ARG_ADDRESS_CONSTANT 0x119D
#define CL_KERNEL_ARG_ADDRESS_PRIVATE 0x119E
/* cl_kernel_arg_access_qualifier */
#define CL_KERNEL_ARG_ACCESS_READ_ONLY 0x11A0
#define CL_KERNEL_ARG_ACCESS_WRITE_ONLY 0x11A1
#define CL_KERNEL_ARG_ACCESS_READ_WRITE 0x11A2
#define CL_KERNEL_ARG_ACCESS_NONE 0x11A3
/* cl_kernel_arg_type_qualifer */
#define CL_KERNEL_ARG_TYPE_NONE 0
#define CL_KERNEL_ARG_TYPE_CONST (1 << 0)
#define CL_KERNEL_ARG_TYPE_RESTRICT (1 << 1)
#define CL_KERNEL_ARG_TYPE_VOLATILE (1 << 2)
/* cl_kernel_work_group_info */
#define CL_KERNEL_WORK_GROUP_SIZE 0x11B0
#define CL_KERNEL_COMPILE_WORK_GROUP_SIZE 0x11B1
#define CL_KERNEL_LOCAL_MEM_SIZE 0x11B2
#define CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE 0x11B3
#define CL_KERNEL_PRIVATE_MEM_SIZE 0x11B4
#define CL_KERNEL_GLOBAL_WORK_SIZE 0x11B5
/* cl_event_info */
#define CL_EVENT_COMMAND_QUEUE 0x11D0
#define CL_EVENT_COMMAND_TYPE 0x11D1
#define CL_EVENT_REFERENCE_COUNT 0x11D2
#define CL_EVENT_COMMAND_EXECUTION_STATUS 0x11D3
#define CL_EVENT_CONTEXT 0x11D4
/* cl_command_type */
#define CL_COMMAND_NDRANGE_KERNEL 0x11F0
#define CL_COMMAND_TASK 0x11F1
#define CL_COMMAND_NATIVE_KERNEL 0x11F2
#define CL_COMMAND_READ_BUFFER 0x11F3
#define CL_COMMAND_WRITE_BUFFER 0x11F4
#define CL_COMMAND_COPY_BUFFER 0x11F5
#define CL_COMMAND_READ_IMAGE 0x11F6
#define CL_COMMAND_WRITE_IMAGE 0x11F7
#define CL_COMMAND_COPY_IMAGE 0x11F8
#define CL_COMMAND_COPY_IMAGE_TO_BUFFER 0x11F9
#define CL_COMMAND_COPY_BUFFER_TO_IMAGE 0x11FA
#define CL_COMMAND_MAP_BUFFER 0x11FB
#define CL_COMMAND_MAP_IMAGE 0x11FC
#define CL_COMMAND_UNMAP_MEM_OBJECT 0x11FD
#define CL_COMMAND_MARKER 0x11FE
#define CL_COMMAND_ACQUIRE_GL_OBJECTS 0x11FF
#define CL_COMMAND_RELEASE_GL_OBJECTS 0x1200
#define CL_COMMAND_READ_BUFFER_RECT 0x1201
#define CL_COMMAND_WRITE_BUFFER_RECT 0x1202
#define CL_COMMAND_COPY_BUFFER_RECT 0x1203
#define CL_COMMAND_USER 0x1204
#define CL_COMMAND_BARRIER 0x1205
#define CL_COMMAND_MIGRATE_MEM_OBJECTS 0x1206
#define CL_COMMAND_FILL_BUFFER 0x1207
#define CL_COMMAND_FILL_IMAGE 0x1208
/* command execution status */
#define CL_COMPLETE 0x0
#define CL_RUNNING 0x1
#define CL_SUBMITTED 0x2
#define CL_QUEUED 0x3
/* cl_buffer_create_type */
#define CL_BUFFER_CREATE_TYPE_REGION 0x1220
/* cl_profiling_info */
#define CL_PROFILING_COMMAND_QUEUED 0x1280
#define CL_PROFILING_COMMAND_SUBMIT 0x1281
#define CL_PROFILING_COMMAND_START 0x1282
#define CL_PROFILING_COMMAND_END 0x1283
/********************************************************************************************************/
/* Platform API */
extern CL_API_ENTRY cl_int CL_API_CALL
clGetPlatformIDs(cl_uint /* num_entries */,
cl_platform_id * /* platforms */,
cl_uint * /* num_platforms */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetPlatformInfo(cl_platform_id /* platform */,
cl_platform_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Device APIs */
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceIDs(cl_platform_id /* platform */,
cl_device_type /* device_type */,
cl_uint /* num_entries */,
cl_device_id * /* devices */,
cl_uint * /* num_devices */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetDeviceInfo(cl_device_id /* device */,
cl_device_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clCreateSubDevices(cl_device_id /* in_device */,
const cl_device_partition_property * /* properties */,
cl_uint /* num_devices */,
cl_device_id * /* out_devices */,
cl_uint * /* num_devices_ret */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseDevice(cl_device_id /* device */) CL_API_SUFFIX__VERSION_1_2;
/* Context APIs */
extern CL_API_ENTRY cl_context CL_API_CALL
clCreateContext(const cl_context_properties * /* properties */,
cl_uint /* num_devices */,
const cl_device_id * /* devices */,
void (CL_CALLBACK * /* pfn_notify */)(const char *, const void *, size_t, void *),
void * /* user_data */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_context CL_API_CALL
clCreateContextFromType(const cl_context_properties * /* properties */,
cl_device_type /* device_type */,
void (CL_CALLBACK * /* pfn_notify*/ )(const char *, const void *, size_t, void *),
void * /* user_data */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetContextInfo(cl_context /* context */,
cl_context_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Command Queue APIs */
extern CL_API_ENTRY cl_command_queue CL_API_CALL
clCreateCommandQueue(cl_context /* context */,
cl_device_id /* device */,
cl_command_queue_properties /* properties */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetCommandQueueInfo(cl_command_queue /* command_queue */,
cl_command_queue_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Memory Object APIs */
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateBuffer(cl_context /* context */,
cl_mem_flags /* flags */,
size_t /* size */,
void * /* host_ptr */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateSubBuffer(cl_mem /* buffer */,
cl_mem_flags /* flags */,
cl_buffer_create_type /* buffer_create_type */,
const void * /* buffer_create_info */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_mem CL_API_CALL
clCreateImage(cl_context /* context */,
cl_mem_flags /* flags */,
const cl_image_format * /* image_format */,
const cl_image_desc * /* image_desc */,
void * /* host_ptr */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSupportedImageFormats(cl_context /* context */,
cl_mem_flags /* flags */,
cl_mem_object_type /* image_type */,
cl_uint /* num_entries */,
cl_image_format * /* image_formats */,
cl_uint * /* num_image_formats */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetMemObjectInfo(cl_mem /* memobj */,
cl_mem_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetImageInfo(cl_mem /* image */,
cl_image_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clSetMemObjectDestructorCallback( cl_mem /* memobj */,
void (CL_CALLBACK * /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
void * /*user_data */ ) CL_API_SUFFIX__VERSION_1_1;
/* Sampler APIs */
extern CL_API_ENTRY cl_sampler CL_API_CALL
clCreateSampler(cl_context /* context */,
cl_bool /* normalized_coords */,
cl_addressing_mode /* addressing_mode */,
cl_filter_mode /* filter_mode */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetSamplerInfo(cl_sampler /* sampler */,
cl_sampler_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Program Object APIs */
extern CL_API_ENTRY cl_program CL_API_CALL
clCreateProgramWithSource(cl_context /* context */,
cl_uint /* count */,
const char ** /* strings */,
const size_t * /* lengths */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_program CL_API_CALL
clCreateProgramWithBinary(cl_context /* context */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const size_t * /* lengths */,
const unsigned char ** /* binaries */,
cl_int * /* binary_status */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_program CL_API_CALL
clCreateProgramWithBuiltInKernels(cl_context /* context */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* kernel_names */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clBuildProgram(cl_program /* program */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* options */,
void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
void * /* user_data */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clCompileProgram(cl_program /* program */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* options */,
cl_uint /* num_input_headers */,
const cl_program * /* input_headers */,
const char ** /* header_include_names */,
void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
void * /* user_data */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_program CL_API_CALL
clLinkProgram(cl_context /* context */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* options */,
cl_uint /* num_input_programs */,
const cl_program * /* input_programs */,
void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
void * /* user_data */,
cl_int * /* errcode_ret */ ) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clUnloadPlatformCompiler(cl_platform_id /* platform */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetProgramInfo(cl_program /* program */,
cl_program_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetProgramBuildInfo(cl_program /* program */,
cl_device_id /* device */,
cl_program_build_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Kernel Object APIs */
extern CL_API_ENTRY cl_kernel CL_API_CALL
clCreateKernel(cl_program /* program */,
const char * /* kernel_name */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clCreateKernelsInProgram(cl_program /* program */,
cl_uint /* num_kernels */,
cl_kernel * /* kernels */,
cl_uint * /* num_kernels_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clSetKernelArg(cl_kernel /* kernel */,
cl_uint /* arg_index */,
size_t /* arg_size */,
const void * /* arg_value */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetKernelInfo(cl_kernel /* kernel */,
cl_kernel_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetKernelArgInfo(cl_kernel /* kernel */,
cl_uint /* arg_indx */,
cl_kernel_arg_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetKernelWorkGroupInfo(cl_kernel /* kernel */,
cl_device_id /* device */,
cl_kernel_work_group_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Event Object APIs */
extern CL_API_ENTRY cl_int CL_API_CALL
clWaitForEvents(cl_uint /* num_events */,
const cl_event * /* event_list */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clGetEventInfo(cl_event /* event */,
cl_event_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_event CL_API_CALL
clCreateUserEvent(cl_context /* context */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clRetainEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clReleaseEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clSetUserEventStatus(cl_event /* event */,
cl_int /* execution_status */) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clSetEventCallback( cl_event /* event */,
cl_int /* command_exec_callback_type */,
void (CL_CALLBACK * /* pfn_notify */)(cl_event, cl_int, void *),
void * /* user_data */) CL_API_SUFFIX__VERSION_1_1;
/* Profiling APIs */
extern CL_API_ENTRY cl_int CL_API_CALL
clGetEventProfilingInfo(cl_event /* event */,
cl_profiling_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
/* Flush and Finish APIs */
extern CL_API_ENTRY cl_int CL_API_CALL
clFlush(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clFinish(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0;
/* Enqueued Commands APIs */
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReadBuffer(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_read */,
size_t /* offset */,
size_t /* size */,
void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReadBufferRect(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_read */,
const size_t * /* buffer_offset */,
const size_t * /* host_offset */,
const size_t * /* region */,
size_t /* buffer_row_pitch */,
size_t /* buffer_slice_pitch */,
size_t /* host_row_pitch */,
size_t /* host_slice_pitch */,
void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueWriteBuffer(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_write */,
size_t /* offset */,
size_t /* size */,
const void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueWriteBufferRect(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_write */,
const size_t * /* buffer_offset */,
const size_t * /* host_offset */,
const size_t * /* region */,
size_t /* buffer_row_pitch */,
size_t /* buffer_slice_pitch */,
size_t /* host_row_pitch */,
size_t /* host_slice_pitch */,
const void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueFillBuffer(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
const void * /* pattern */,
size_t /* pattern_size */,
size_t /* offset */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyBuffer(cl_command_queue /* command_queue */,
cl_mem /* src_buffer */,
cl_mem /* dst_buffer */,
size_t /* src_offset */,
size_t /* dst_offset */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyBufferRect(cl_command_queue /* command_queue */,
cl_mem /* src_buffer */,
cl_mem /* dst_buffer */,
const size_t * /* src_origin */,
const size_t * /* dst_origin */,
const size_t * /* region */,
size_t /* src_row_pitch */,
size_t /* src_slice_pitch */,
size_t /* dst_row_pitch */,
size_t /* dst_slice_pitch */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_1;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueReadImage(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_read */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t /* row_pitch */,
size_t /* slice_pitch */,
void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueWriteImage(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_write */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t /* input_row_pitch */,
size_t /* input_slice_pitch */,
const void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueFillImage(cl_command_queue /* command_queue */,
cl_mem /* image */,
const void * /* fill_color */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyImage(cl_command_queue /* command_queue */,
cl_mem /* src_image */,
cl_mem /* dst_image */,
const size_t * /* src_origin[3] */,
const size_t * /* dst_origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyImageToBuffer(cl_command_queue /* command_queue */,
cl_mem /* src_image */,
cl_mem /* dst_buffer */,
const size_t * /* src_origin[3] */,
const size_t * /* region[3] */,
size_t /* dst_offset */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueCopyBufferToImage(cl_command_queue /* command_queue */,
cl_mem /* src_buffer */,
cl_mem /* dst_image */,
size_t /* src_offset */,
const size_t * /* dst_origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY void * CL_API_CALL
clEnqueueMapBuffer(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_map */,
cl_map_flags /* map_flags */,
size_t /* offset */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY void * CL_API_CALL
clEnqueueMapImage(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_map */,
cl_map_flags /* map_flags */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t * /* image_row_pitch */,
size_t * /* image_slice_pitch */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */,
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueUnmapMemObject(cl_command_queue /* command_queue */,
cl_mem /* memobj */,
void * /* mapped_ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueMigrateMemObjects(cl_command_queue /* command_queue */,
cl_uint /* num_mem_objects */,
const cl_mem * /* mem_objects */,
cl_mem_migration_flags /* flags */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueNDRangeKernel(cl_command_queue /* command_queue */,
cl_kernel /* kernel */,
cl_uint /* work_dim */,
const size_t * /* global_work_offset */,
const size_t * /* global_work_size */,
const size_t * /* local_work_size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueTask(cl_command_queue /* command_queue */,
cl_kernel /* kernel */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueNativeKernel(cl_command_queue /* command_queue */,
void (CL_CALLBACK * /*user_func*/)(void *),
void * /* args */,
size_t /* cb_args */,
cl_uint /* num_mem_objects */,
const cl_mem * /* mem_list */,
const void ** /* args_mem_loc */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueMarkerWithWaitList(cl_command_queue /* command_queue */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
extern CL_API_ENTRY cl_int CL_API_CALL
clEnqueueBarrierWithWaitList(cl_command_queue /* command_queue */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_2;
/* Extension function access
*
* Returns the extension function address for the given function name,
* or NULL if a valid function can not be found. The client must
* check to make sure the address is not NULL, before using or
* calling the returned function address.
*/
extern CL_API_ENTRY void * CL_API_CALL
clGetExtensionFunctionAddressForPlatform(cl_platform_id /* platform */,
const char * /* func_name */) CL_API_SUFFIX__VERSION_1_2;
// Deprecated OpenCL 1.1 APIs
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
clCreateImage2D(cl_context /* context */,
cl_mem_flags /* flags */,
const cl_image_format * /* image_format */,
size_t /* image_width */,
size_t /* image_height */,
size_t /* image_row_pitch */,
void * /* host_ptr */,
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
clCreateImage3D(cl_context /* context */,
cl_mem_flags /* flags */,
const cl_image_format * /* image_format */,
size_t /* image_width */,
size_t /* image_height */,
size_t /* image_depth */,
size_t /* image_row_pitch */,
size_t /* image_slice_pitch */,
void * /* host_ptr */,
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
clEnqueueMarker(cl_command_queue /* command_queue */,
cl_event * /* event */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
clEnqueueWaitForEvents(cl_command_queue /* command_queue */,
cl_uint /* num_events */,
const cl_event * /* event_list */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
clEnqueueBarrier(cl_command_queue /* command_queue */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_int CL_API_CALL
clUnloadCompiler(void) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED void * CL_API_CALL
clGetExtensionFunctionAddress(const char * /* func_name */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
#ifdef __cplusplus
}
#endif
#endif /* __OPENCL_CL_H */
HandBrake-0.10.2/libhb/extras/cl_platform.h 0000664 0001752 0001752 00000121075 12230760063 021105 0 ustar handbrake handbrake /**********************************************************************************
* Copyright (c) 2008-2012 The Khronos Group Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and/or associated documentation files (the
* "Materials"), to deal in the Materials without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Materials, and to
* permit persons to whom the Materials are furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
**********************************************************************************/
/* $Revision: 11803 $ on $Date: 2010-06-25 10:02:12 -0700 (Fri, 25 Jun 2010) $ */
#ifndef __CL_PLATFORM_H
#define __CL_PLATFORM_H
#ifdef __APPLE__
/* Contains #defines for AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER below */
#include
#endif
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_WIN32)
#define CL_API_ENTRY
#define CL_API_CALL __stdcall
#define CL_CALLBACK __stdcall
#else
#define CL_API_ENTRY
#define CL_API_CALL
#define CL_CALLBACK
#endif
#ifdef __APPLE__
#define CL_EXTENSION_WEAK_LINK __attribute__((weak_import))
#ifndef UNAVAILABLE_ATTRIBUTE
#define UNAVAILABLE_ATTRIBUTE
#endif
#ifdef AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER
#define CL_API_SUFFIX__VERSION_1_0 AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER
#define CL_EXT_SUFFIX__VERSION_1_0 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER
#else
#define CL_API_SUFFIX__VERSION_1_0 UNAVAILABLE_ATTRIBUTE
#define CL_EXT_SUFFIX__VERSION_1_0 CL_EXTENSION_WEAK_LINK UNAVAILABLE_ATTRIBUTE
#endif
#ifdef AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
#define CL_API_SUFFIX__VERSION_1_1 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
#define GCL_API_SUFFIX__VERSION_1_1 AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
#define CL_EXT_SUFFIX__VERSION_1_1 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER
#define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_7
#else
#define CL_API_SUFFIX__VERSION_1_1 UNAVAILABLE_ATTRIBUTE
#define GCL_API_SUFFIX__VERSION_1_1 UNAVAILABLE_ATTRIBUTE
#define CL_EXT_SUFFIX__VERSION_1_1 CL_EXTENSION_WEAK_LINK UNAVAILABLE_ATTRIBUTE
#define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATE CL_EXT_SUFFIX__VERSION_1_0
#endif
#ifdef AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
#define CL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
#define GCL_API_SUFFIX__VERSION_1_2 AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
#define CL_EXT_SUFFIX__VERSION_1_2 CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_8_AND_LATER
#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_EXTENSION_WEAK_LINK AVAILABLE_MAC_OS_X_VERSION_10_7_AND_LATER_BUT_DEPRECATED_IN_MAC_OS_X_VERSION_10_8
#else
#define CL_API_SUFFIX__VERSION_1_2 UNAVAILABLE_ATTRIBUTE
#define GCL_API_SUFFIX__VERSION_1_2 UNAVAILABLE_ATTRIBUTE
#define CL_EXT_SUFFIX__VERSION_1_2 CL_EXTENSION_WEAK_LINK UNAVAILABLE_ATTRIBUTE
#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED CL_EXT_SUFFIX__VERSION_1_1
#endif
#else
#define CL_EXTENSION_WEAK_LINK
#define CL_API_SUFFIX__VERSION_1_0
#define CL_EXT_SUFFIX__VERSION_1_0
#define CL_API_SUFFIX__VERSION_1_1
#define CL_EXT_SUFFIX__VERSION_1_1
#define CL_API_SUFFIX__VERSION_1_2
#define CL_EXT_SUFFIX__VERSION_1_2
#ifdef __GNUC__
#ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS
#define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
#else
#define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED __attribute__((deprecated))
#define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
#endif
#ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS
#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
#else
#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED __attribute__((deprecated))
#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
#endif
#elif _WIN32
#ifdef CL_USE_DEPRECATED_OPENCL_1_0_APIS
#define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
#else
#define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED __declspec(deprecated)
#endif
#ifdef CL_USE_DEPRECATED_OPENCL_1_1_APIS
#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
#else
#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED __declspec(deprecated)
#endif
#else
#define CL_EXT_SUFFIX__VERSION_1_0_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_0_DEPRECATED
#define CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED
#define CL_EXT_PREFIX__VERSION_1_1_DEPRECATED
#endif
#endif
#if (defined (_WIN32) && defined(_MSC_VER))
/* scalar types */
typedef signed __int8 cl_char;
typedef unsigned __int8 cl_uchar;
typedef signed __int16 cl_short;
typedef unsigned __int16 cl_ushort;
typedef signed __int32 cl_int;
typedef unsigned __int32 cl_uint;
typedef signed __int64 cl_long;
typedef unsigned __int64 cl_ulong;
typedef unsigned __int16 cl_half;
typedef float cl_float;
typedef double cl_double;
/* Macro names and corresponding values defined by OpenCL */
#define CL_CHAR_BIT 8
#define CL_SCHAR_MAX 127
#define CL_SCHAR_MIN (-127-1)
#define CL_CHAR_MAX CL_SCHAR_MAX
#define CL_CHAR_MIN CL_SCHAR_MIN
#define CL_UCHAR_MAX 255
#define CL_SHRT_MAX 32767
#define CL_SHRT_MIN (-32767-1)
#define CL_USHRT_MAX 65535
#define CL_INT_MAX 2147483647
#define CL_INT_MIN (-2147483647-1)
#define CL_UINT_MAX 0xffffffffU
#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL)
#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)
#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)
#define CL_FLT_DIG 6
#define CL_FLT_MANT_DIG 24
#define CL_FLT_MAX_10_EXP +38
#define CL_FLT_MAX_EXP +128
#define CL_FLT_MIN_10_EXP -37
#define CL_FLT_MIN_EXP -125
#define CL_FLT_RADIX 2
#define CL_FLT_MAX 340282346638528859811704183484516925440.0f
#define CL_FLT_MIN 1.175494350822287507969e-38f
#define CL_FLT_EPSILON 0x1.0p-23f
#define CL_DBL_DIG 15
#define CL_DBL_MANT_DIG 53
#define CL_DBL_MAX_10_EXP +308
#define CL_DBL_MAX_EXP +1024
#define CL_DBL_MIN_10_EXP -307
#define CL_DBL_MIN_EXP -1021
#define CL_DBL_RADIX 2
#define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0
#define CL_DBL_MIN 2.225073858507201383090e-308
#define CL_DBL_EPSILON 2.220446049250313080847e-16
#define CL_M_E 2.718281828459045090796
#define CL_M_LOG2E 1.442695040888963387005
#define CL_M_LOG10E 0.434294481903251816668
#define CL_M_LN2 0.693147180559945286227
#define CL_M_LN10 2.302585092994045901094
#define CL_M_PI 3.141592653589793115998
#define CL_M_PI_2 1.570796326794896557999
#define CL_M_PI_4 0.785398163397448278999
#define CL_M_1_PI 0.318309886183790691216
#define CL_M_2_PI 0.636619772367581382433
#define CL_M_2_SQRTPI 1.128379167095512558561
#define CL_M_SQRT2 1.414213562373095145475
#define CL_M_SQRT1_2 0.707106781186547572737
#define CL_M_E_F 2.71828174591064f
#define CL_M_LOG2E_F 1.44269502162933f
#define CL_M_LOG10E_F 0.43429449200630f
#define CL_M_LN2_F 0.69314718246460f
#define CL_M_LN10_F 2.30258512496948f
#define CL_M_PI_F 3.14159274101257f
#define CL_M_PI_2_F 1.57079637050629f
#define CL_M_PI_4_F 0.78539818525314f
#define CL_M_1_PI_F 0.31830987334251f
#define CL_M_2_PI_F 0.63661974668503f
#define CL_M_2_SQRTPI_F 1.12837922573090f
#define CL_M_SQRT2_F 1.41421353816986f
#define CL_M_SQRT1_2_F 0.70710676908493f
#define CL_NAN (CL_INFINITY - CL_INFINITY)
#define CL_HUGE_VALF ((cl_float) 1e50)
#define CL_HUGE_VAL ((cl_double) 1e500)
#define CL_MAXFLOAT CL_FLT_MAX
#define CL_INFINITY CL_HUGE_VALF
#else
#include
/* scalar types */
typedef int8_t cl_char;
typedef uint8_t cl_uchar;
typedef int16_t cl_short __attribute__((aligned(2)));
typedef uint16_t cl_ushort __attribute__((aligned(2)));
typedef int32_t cl_int __attribute__((aligned(4)));
typedef uint32_t cl_uint __attribute__((aligned(4)));
typedef int64_t cl_long __attribute__((aligned(8)));
typedef uint64_t cl_ulong __attribute__((aligned(8)));
typedef uint16_t cl_half __attribute__((aligned(2)));
typedef float cl_float __attribute__((aligned(4)));
typedef double cl_double __attribute__((aligned(8)));
/* Macro names and corresponding values defined by OpenCL */
#define CL_CHAR_BIT 8
#define CL_SCHAR_MAX 127
#define CL_SCHAR_MIN (-127-1)
#define CL_CHAR_MAX CL_SCHAR_MAX
#define CL_CHAR_MIN CL_SCHAR_MIN
#define CL_UCHAR_MAX 255
#define CL_SHRT_MAX 32767
#define CL_SHRT_MIN (-32767-1)
#define CL_USHRT_MAX 65535
#define CL_INT_MAX 2147483647
#define CL_INT_MIN (-2147483647-1)
#define CL_UINT_MAX 0xffffffffU
#define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL)
#define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL)
#define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL)
#define CL_FLT_DIG 6
#define CL_FLT_MANT_DIG 24
#define CL_FLT_MAX_10_EXP +38
#define CL_FLT_MAX_EXP +128
#define CL_FLT_MIN_10_EXP -37
#define CL_FLT_MIN_EXP -125
#define CL_FLT_RADIX 2
#define CL_FLT_MAX 0x1.fffffep127f
#define CL_FLT_MIN 0x1.0p-126f
#define CL_FLT_EPSILON 0x1.0p-23f
#define CL_DBL_DIG 15
#define CL_DBL_MANT_DIG 53
#define CL_DBL_MAX_10_EXP +308
#define CL_DBL_MAX_EXP +1024
#define CL_DBL_MIN_10_EXP -307
#define CL_DBL_MIN_EXP -1021
#define CL_DBL_RADIX 2
#define CL_DBL_MAX 0x1.fffffffffffffp1023
#define CL_DBL_MIN 0x1.0p-1022
#define CL_DBL_EPSILON 0x1.0p-52
#define CL_M_E 2.718281828459045090796
#define CL_M_LOG2E 1.442695040888963387005
#define CL_M_LOG10E 0.434294481903251816668
#define CL_M_LN2 0.693147180559945286227
#define CL_M_LN10 2.302585092994045901094
#define CL_M_PI 3.141592653589793115998
#define CL_M_PI_2 1.570796326794896557999
#define CL_M_PI_4 0.785398163397448278999
#define CL_M_1_PI 0.318309886183790691216
#define CL_M_2_PI 0.636619772367581382433
#define CL_M_2_SQRTPI 1.128379167095512558561
#define CL_M_SQRT2 1.414213562373095145475
#define CL_M_SQRT1_2 0.707106781186547572737
#define CL_M_E_F 2.71828174591064f
#define CL_M_LOG2E_F 1.44269502162933f
#define CL_M_LOG10E_F 0.43429449200630f
#define CL_M_LN2_F 0.69314718246460f
#define CL_M_LN10_F 2.30258512496948f
#define CL_M_PI_F 3.14159274101257f
#define CL_M_PI_2_F 1.57079637050629f
#define CL_M_PI_4_F 0.78539818525314f
#define CL_M_1_PI_F 0.31830987334251f
#define CL_M_2_PI_F 0.63661974668503f
#define CL_M_2_SQRTPI_F 1.12837922573090f
#define CL_M_SQRT2_F 1.41421353816986f
#define CL_M_SQRT1_2_F 0.70710676908493f
#if defined( __GNUC__ )
#define CL_HUGE_VALF __builtin_huge_valf()
#define CL_HUGE_VAL __builtin_huge_val()
#define CL_NAN __builtin_nanf( "" )
#else
#define CL_HUGE_VALF ((cl_float) 1e50)
#define CL_HUGE_VAL ((cl_double) 1e500)
float nanf( const char * );
#define CL_NAN nanf( "" )
#endif
#define CL_MAXFLOAT CL_FLT_MAX
#define CL_INFINITY CL_HUGE_VALF
#endif
#include
/* Mirror types to GL types. Mirror types allow us to avoid deciding which 87s to load based on whether we are using GL or GLES here. */
typedef unsigned int cl_GLuint;
typedef int cl_GLint;
typedef unsigned int cl_GLenum;
/*
* Vector types
*
* Note: OpenCL requires that all types be naturally aligned.
* This means that vector types must be naturally aligned.
* For example, a vector of four floats must be aligned to
* a 16 byte boundary (calculated as 4 * the natural 4-byte
* alignment of the float). The alignment qualifiers here
* will only function properly if your compiler supports them
* and if you don't actively work to defeat them. For example,
* in order for a cl_float4 to be 16 byte aligned in a struct,
* the start of the struct must itself be 16-byte aligned.
*
* Maintaining proper alignment is the user's responsibility.
*/
/* Define basic vector types */
#if defined( __VEC__ )
#include /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */
typedef vector unsigned char __cl_uchar16;
typedef vector signed char __cl_char16;
typedef vector unsigned short __cl_ushort8;
typedef vector signed short __cl_short8;
typedef vector unsigned int __cl_uint4;
typedef vector signed int __cl_int4;
typedef vector float __cl_float4;
#define __CL_UCHAR16__ 1
#define __CL_CHAR16__ 1
#define __CL_USHORT8__ 1
#define __CL_SHORT8__ 1
#define __CL_UINT4__ 1
#define __CL_INT4__ 1
#define __CL_FLOAT4__ 1
#endif
#if defined( __SSE__ )
#if defined( __MINGW64__ )
#include
#else
#include
#endif
#if defined( __GNUC__ )
typedef float __cl_float4 __attribute__((vector_size(16)));
#else
typedef __m128 __cl_float4;
#endif
#define __CL_FLOAT4__ 1
#endif
#if defined( __SSE2__ )
#if defined( __MINGW64__ )
#include
#else
#include
#endif
#if defined( __GNUC__ )
typedef cl_uchar __cl_uchar16 __attribute__((vector_size(16)));
typedef cl_char __cl_char16 __attribute__((vector_size(16)));
typedef cl_ushort __cl_ushort8 __attribute__((vector_size(16)));
typedef cl_short __cl_short8 __attribute__((vector_size(16)));
typedef cl_uint __cl_uint4 __attribute__((vector_size(16)));
typedef cl_int __cl_int4 __attribute__((vector_size(16)));
typedef cl_ulong __cl_ulong2 __attribute__((vector_size(16)));
typedef cl_long __cl_long2 __attribute__((vector_size(16)));
typedef cl_double __cl_double2 __attribute__((vector_size(16)));
#else
typedef __m128i __cl_uchar16;
typedef __m128i __cl_char16;
typedef __m128i __cl_ushort8;
typedef __m128i __cl_short8;
typedef __m128i __cl_uint4;
typedef __m128i __cl_int4;
typedef __m128i __cl_ulong2;
typedef __m128i __cl_long2;
typedef __m128d __cl_double2;
#endif
#define __CL_UCHAR16__ 1
#define __CL_CHAR16__ 1
#define __CL_USHORT8__ 1
#define __CL_SHORT8__ 1
#define __CL_INT4__ 1
#define __CL_UINT4__ 1
#define __CL_ULONG2__ 1
#define __CL_LONG2__ 1
#define __CL_DOUBLE2__ 1
#endif
#if defined( __MMX__ )
#include
#if defined( __GNUC__ )
typedef cl_uchar __cl_uchar8 __attribute__((vector_size(8)));
typedef cl_char __cl_char8 __attribute__((vector_size(8)));
typedef cl_ushort __cl_ushort4 __attribute__((vector_size(8)));
typedef cl_short __cl_short4 __attribute__((vector_size(8)));
typedef cl_uint __cl_uint2 __attribute__((vector_size(8)));
typedef cl_int __cl_int2 __attribute__((vector_size(8)));
typedef cl_ulong __cl_ulong1 __attribute__((vector_size(8)));
typedef cl_long __cl_long1 __attribute__((vector_size(8)));
typedef cl_float __cl_float2 __attribute__((vector_size(8)));
#else
typedef __m64 __cl_uchar8;
typedef __m64 __cl_char8;
typedef __m64 __cl_ushort4;
typedef __m64 __cl_short4;
typedef __m64 __cl_uint2;
typedef __m64 __cl_int2;
typedef __m64 __cl_ulong1;
typedef __m64 __cl_long1;
typedef __m64 __cl_float2;
#endif
#define __CL_UCHAR8__ 1
#define __CL_CHAR8__ 1
#define __CL_USHORT4__ 1
#define __CL_SHORT4__ 1
#define __CL_INT2__ 1
#define __CL_UINT2__ 1
#define __CL_ULONG1__ 1
#define __CL_LONG1__ 1
#define __CL_FLOAT2__ 1
#endif
#if defined( __AVX__ )
#if defined( __MINGW64__ )
#include
#else
#include
#endif
#if defined( __GNUC__ )
typedef cl_float __cl_float8 __attribute__((vector_size(32)));
typedef cl_double __cl_double4 __attribute__((vector_size(32)));
#else
typedef __m256 __cl_float8;
typedef __m256d __cl_double4;
#endif
#define __CL_FLOAT8__ 1
#define __CL_DOUBLE4__ 1
#endif
/* Define alignment keys */
#if defined( __GNUC__ )
#define CL_ALIGNED(_x) __attribute__ ((aligned(_x)))
#elif defined( _WIN32) && (_MSC_VER)
/* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements */
/* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx */
/* #include */
/* #define CL_ALIGNED(_x) _CRT_ALIGN(_x) */
#define CL_ALIGNED(_x)
#else
#warning Need to implement some method to align data here
#define CL_ALIGNED(_x)
#endif
/* Indicate whether .xyzw, .s0123 and .hi.lo are supported */
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
/* .xyzw and .s0123...{f|F} are supported */
#define CL_HAS_NAMED_VECTOR_FIELDS 1
/* .hi and .lo are supported */
#define CL_HAS_HI_LO_VECTOR_FIELDS 1
#endif
/* Define cl_vector types */
/* ---- cl_charn ---- */
typedef union
{
cl_char CL_ALIGNED(2) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_char x, y; };
__extension__ struct{ cl_char s0, s1; };
__extension__ struct{ cl_char lo, hi; };
#endif
#if defined( __CL_CHAR2__)
__cl_char2 v2;
#endif
}cl_char2;
typedef union
{
cl_char CL_ALIGNED(4) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_char x, y, z, w; };
__extension__ struct{ cl_char s0, s1, s2, s3; };
__extension__ struct{ cl_char2 lo, hi; };
#endif
#if defined( __CL_CHAR2__)
__cl_char2 v2[2];
#endif
#if defined( __CL_CHAR4__)
__cl_char4 v4;
#endif
}cl_char4;
/* cl_char3 is identical in size, alignment and behavior to cl_char4. See section 6.1.5. */
typedef cl_char4 cl_char3;
typedef union
{
cl_char CL_ALIGNED(8) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_char x, y, z, w; };
__extension__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_char4 lo, hi; };
#endif
#if defined( __CL_CHAR2__)
__cl_char2 v2[4];
#endif
#if defined( __CL_CHAR4__)
__cl_char4 v4[2];
#endif
#if defined( __CL_CHAR8__ )
__cl_char8 v8;
#endif
}cl_char8;
typedef union
{
cl_char CL_ALIGNED(16) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_char x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_char8 lo, hi; };
#endif
#if defined( __CL_CHAR2__)
__cl_char2 v2[8];
#endif
#if defined( __CL_CHAR4__)
__cl_char4 v4[4];
#endif
#if defined( __CL_CHAR8__ )
__cl_char8 v8[2];
#endif
#if defined( __CL_CHAR16__ )
__cl_char16 v16;
#endif
}cl_char16;
/* ---- cl_ucharn ---- */
typedef union
{
cl_uchar CL_ALIGNED(2) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uchar x, y; };
__extension__ struct{ cl_uchar s0, s1; };
__extension__ struct{ cl_uchar lo, hi; };
#endif
#if defined( __cl_uchar2__)
__cl_uchar2 v2;
#endif
}cl_uchar2;
typedef union
{
cl_uchar CL_ALIGNED(4) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uchar x, y, z, w; };
__extension__ struct{ cl_uchar s0, s1, s2, s3; };
__extension__ struct{ cl_uchar2 lo, hi; };
#endif
#if defined( __CL_UCHAR2__)
__cl_uchar2 v2[2];
#endif
#if defined( __CL_UCHAR4__)
__cl_uchar4 v4;
#endif
}cl_uchar4;
/* cl_uchar3 is identical in size, alignment and behavior to cl_uchar4. See section 6.1.5. */
typedef cl_uchar4 cl_uchar3;
typedef union
{
cl_uchar CL_ALIGNED(8) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uchar x, y, z, w; };
__extension__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_uchar4 lo, hi; };
#endif
#if defined( __CL_UCHAR2__)
__cl_uchar2 v2[4];
#endif
#if defined( __CL_UCHAR4__)
__cl_uchar4 v4[2];
#endif
#if defined( __CL_UCHAR8__ )
__cl_uchar8 v8;
#endif
}cl_uchar8;
typedef union
{
cl_uchar CL_ALIGNED(16) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uchar x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_uchar8 lo, hi; };
#endif
#if defined( __CL_UCHAR2__)
__cl_uchar2 v2[8];
#endif
#if defined( __CL_UCHAR4__)
__cl_uchar4 v4[4];
#endif
#if defined( __CL_UCHAR8__ )
__cl_uchar8 v8[2];
#endif
#if defined( __CL_UCHAR16__ )
__cl_uchar16 v16;
#endif
}cl_uchar16;
/* ---- cl_shortn ---- */
typedef union
{
cl_short CL_ALIGNED(4) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_short x, y; };
__extension__ struct{ cl_short s0, s1; };
__extension__ struct{ cl_short lo, hi; };
#endif
#if defined( __CL_SHORT2__)
__cl_short2 v2;
#endif
}cl_short2;
typedef union
{
cl_short CL_ALIGNED(8) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_short x, y, z, w; };
__extension__ struct{ cl_short s0, s1, s2, s3; };
__extension__ struct{ cl_short2 lo, hi; };
#endif
#if defined( __CL_SHORT2__)
__cl_short2 v2[2];
#endif
#if defined( __CL_SHORT4__)
__cl_short4 v4;
#endif
}cl_short4;
/* cl_short3 is identical in size, alignment and behavior to cl_short4. See section 6.1.5. */
typedef cl_short4 cl_short3;
typedef union
{
cl_short CL_ALIGNED(16) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_short x, y, z, w; };
__extension__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_short4 lo, hi; };
#endif
#if defined( __CL_SHORT2__)
__cl_short2 v2[4];
#endif
#if defined( __CL_SHORT4__)
__cl_short4 v4[2];
#endif
#if defined( __CL_SHORT8__ )
__cl_short8 v8;
#endif
}cl_short8;
typedef union
{
cl_short CL_ALIGNED(32) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_short x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_short8 lo, hi; };
#endif
#if defined( __CL_SHORT2__)
__cl_short2 v2[8];
#endif
#if defined( __CL_SHORT4__)
__cl_short4 v4[4];
#endif
#if defined( __CL_SHORT8__ )
__cl_short8 v8[2];
#endif
#if defined( __CL_SHORT16__ )
__cl_short16 v16;
#endif
}cl_short16;
/* ---- cl_ushortn ---- */
typedef union
{
cl_ushort CL_ALIGNED(4) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ushort x, y; };
__extension__ struct{ cl_ushort s0, s1; };
__extension__ struct{ cl_ushort lo, hi; };
#endif
#if defined( __CL_USHORT2__)
__cl_ushort2 v2;
#endif
}cl_ushort2;
typedef union
{
cl_ushort CL_ALIGNED(8) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ushort x, y, z, w; };
__extension__ struct{ cl_ushort s0, s1, s2, s3; };
__extension__ struct{ cl_ushort2 lo, hi; };
#endif
#if defined( __CL_USHORT2__)
__cl_ushort2 v2[2];
#endif
#if defined( __CL_USHORT4__)
__cl_ushort4 v4;
#endif
}cl_ushort4;
/* cl_ushort3 is identical in size, alignment and behavior to cl_ushort4. See section 6.1.5. */
typedef cl_ushort4 cl_ushort3;
typedef union
{
cl_ushort CL_ALIGNED(16) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ushort x, y, z, w; };
__extension__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_ushort4 lo, hi; };
#endif
#if defined( __CL_USHORT2__)
__cl_ushort2 v2[4];
#endif
#if defined( __CL_USHORT4__)
__cl_ushort4 v4[2];
#endif
#if defined( __CL_USHORT8__ )
__cl_ushort8 v8;
#endif
}cl_ushort8;
typedef union
{
cl_ushort CL_ALIGNED(32) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ushort x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_ushort8 lo, hi; };
#endif
#if defined( __CL_USHORT2__)
__cl_ushort2 v2[8];
#endif
#if defined( __CL_USHORT4__)
__cl_ushort4 v4[4];
#endif
#if defined( __CL_USHORT8__ )
__cl_ushort8 v8[2];
#endif
#if defined( __CL_USHORT16__ )
__cl_ushort16 v16;
#endif
}cl_ushort16;
/* ---- cl_intn ---- */
typedef union
{
cl_int CL_ALIGNED(8) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_int x, y; };
__extension__ struct{ cl_int s0, s1; };
__extension__ struct{ cl_int lo, hi; };
#endif
#if defined( __CL_INT2__)
__cl_int2 v2;
#endif
}cl_int2;
typedef union
{
cl_int CL_ALIGNED(16) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_int x, y, z, w; };
__extension__ struct{ cl_int s0, s1, s2, s3; };
__extension__ struct{ cl_int2 lo, hi; };
#endif
#if defined( __CL_INT2__)
__cl_int2 v2[2];
#endif
#if defined( __CL_INT4__)
__cl_int4 v4;
#endif
}cl_int4;
/* cl_int3 is identical in size, alignment and behavior to cl_int4. See section 6.1.5. */
typedef cl_int4 cl_int3;
typedef union
{
cl_int CL_ALIGNED(32) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_int x, y, z, w; };
__extension__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_int4 lo, hi; };
#endif
#if defined( __CL_INT2__)
__cl_int2 v2[4];
#endif
#if defined( __CL_INT4__)
__cl_int4 v4[2];
#endif
#if defined( __CL_INT8__ )
__cl_int8 v8;
#endif
}cl_int8;
typedef union
{
cl_int CL_ALIGNED(64) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_int x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_int8 lo, hi; };
#endif
#if defined( __CL_INT2__)
__cl_int2 v2[8];
#endif
#if defined( __CL_INT4__)
__cl_int4 v4[4];
#endif
#if defined( __CL_INT8__ )
__cl_int8 v8[2];
#endif
#if defined( __CL_INT16__ )
__cl_int16 v16;
#endif
}cl_int16;
/* ---- cl_uintn ---- */
typedef union
{
cl_uint CL_ALIGNED(8) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uint x, y; };
__extension__ struct{ cl_uint s0, s1; };
__extension__ struct{ cl_uint lo, hi; };
#endif
#if defined( __CL_UINT2__)
__cl_uint2 v2;
#endif
}cl_uint2;
typedef union
{
cl_uint CL_ALIGNED(16) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uint x, y, z, w; };
__extension__ struct{ cl_uint s0, s1, s2, s3; };
__extension__ struct{ cl_uint2 lo, hi; };
#endif
#if defined( __CL_UINT2__)
__cl_uint2 v2[2];
#endif
#if defined( __CL_UINT4__)
__cl_uint4 v4;
#endif
}cl_uint4;
/* cl_uint3 is identical in size, alignment and behavior to cl_uint4. See section 6.1.5. */
typedef cl_uint4 cl_uint3;
typedef union
{
cl_uint CL_ALIGNED(32) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uint x, y, z, w; };
__extension__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_uint4 lo, hi; };
#endif
#if defined( __CL_UINT2__)
__cl_uint2 v2[4];
#endif
#if defined( __CL_UINT4__)
__cl_uint4 v4[2];
#endif
#if defined( __CL_UINT8__ )
__cl_uint8 v8;
#endif
}cl_uint8;
typedef union
{
cl_uint CL_ALIGNED(64) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_uint x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_uint8 lo, hi; };
#endif
#if defined( __CL_UINT2__)
__cl_uint2 v2[8];
#endif
#if defined( __CL_UINT4__)
__cl_uint4 v4[4];
#endif
#if defined( __CL_UINT8__ )
__cl_uint8 v8[2];
#endif
#if defined( __CL_UINT16__ )
__cl_uint16 v16;
#endif
}cl_uint16;
/* ---- cl_longn ---- */
typedef union
{
cl_long CL_ALIGNED(16) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_long x, y; };
__extension__ struct{ cl_long s0, s1; };
__extension__ struct{ cl_long lo, hi; };
#endif
#if defined( __CL_LONG2__)
__cl_long2 v2;
#endif
}cl_long2;
typedef union
{
cl_long CL_ALIGNED(32) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_long x, y, z, w; };
__extension__ struct{ cl_long s0, s1, s2, s3; };
__extension__ struct{ cl_long2 lo, hi; };
#endif
#if defined( __CL_LONG2__)
__cl_long2 v2[2];
#endif
#if defined( __CL_LONG4__)
__cl_long4 v4;
#endif
}cl_long4;
/* cl_long3 is identical in size, alignment and behavior to cl_long4. See section 6.1.5. */
typedef cl_long4 cl_long3;
typedef union
{
cl_long CL_ALIGNED(64) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_long x, y, z, w; };
__extension__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_long4 lo, hi; };
#endif
#if defined( __CL_LONG2__)
__cl_long2 v2[4];
#endif
#if defined( __CL_LONG4__)
__cl_long4 v4[2];
#endif
#if defined( __CL_LONG8__ )
__cl_long8 v8;
#endif
}cl_long8;
typedef union
{
cl_long CL_ALIGNED(128) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_long x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_long8 lo, hi; };
#endif
#if defined( __CL_LONG2__)
__cl_long2 v2[8];
#endif
#if defined( __CL_LONG4__)
__cl_long4 v4[4];
#endif
#if defined( __CL_LONG8__ )
__cl_long8 v8[2];
#endif
#if defined( __CL_LONG16__ )
__cl_long16 v16;
#endif
}cl_long16;
/* ---- cl_ulongn ---- */
typedef union
{
cl_ulong CL_ALIGNED(16) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ulong x, y; };
__extension__ struct{ cl_ulong s0, s1; };
__extension__ struct{ cl_ulong lo, hi; };
#endif
#if defined( __CL_ULONG2__)
__cl_ulong2 v2;
#endif
}cl_ulong2;
typedef union
{
cl_ulong CL_ALIGNED(32) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ulong x, y, z, w; };
__extension__ struct{ cl_ulong s0, s1, s2, s3; };
__extension__ struct{ cl_ulong2 lo, hi; };
#endif
#if defined( __CL_ULONG2__)
__cl_ulong2 v2[2];
#endif
#if defined( __CL_ULONG4__)
__cl_ulong4 v4;
#endif
}cl_ulong4;
/* cl_ulong3 is identical in size, alignment and behavior to cl_ulong4. See section 6.1.5. */
typedef cl_ulong4 cl_ulong3;
typedef union
{
cl_ulong CL_ALIGNED(64) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ulong x, y, z, w; };
__extension__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_ulong4 lo, hi; };
#endif
#if defined( __CL_ULONG2__)
__cl_ulong2 v2[4];
#endif
#if defined( __CL_ULONG4__)
__cl_ulong4 v4[2];
#endif
#if defined( __CL_ULONG8__ )
__cl_ulong8 v8;
#endif
}cl_ulong8;
typedef union
{
cl_ulong CL_ALIGNED(128) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_ulong x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_ulong8 lo, hi; };
#endif
#if defined( __CL_ULONG2__)
__cl_ulong2 v2[8];
#endif
#if defined( __CL_ULONG4__)
__cl_ulong4 v4[4];
#endif
#if defined( __CL_ULONG8__ )
__cl_ulong8 v8[2];
#endif
#if defined( __CL_ULONG16__ )
__cl_ulong16 v16;
#endif
}cl_ulong16;
/* --- cl_floatn ---- */
typedef union
{
cl_float CL_ALIGNED(8) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_float x, y; };
__extension__ struct{ cl_float s0, s1; };
__extension__ struct{ cl_float lo, hi; };
#endif
#if defined( __CL_FLOAT2__)
__cl_float2 v2;
#endif
}cl_float2;
typedef union
{
cl_float CL_ALIGNED(16) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_float x, y, z, w; };
__extension__ struct{ cl_float s0, s1, s2, s3; };
__extension__ struct{ cl_float2 lo, hi; };
#endif
#if defined( __CL_FLOAT2__)
__cl_float2 v2[2];
#endif
#if defined( __CL_FLOAT4__)
__cl_float4 v4;
#endif
}cl_float4;
/* cl_float3 is identical in size, alignment and behavior to cl_float4. See section 6.1.5. */
typedef cl_float4 cl_float3;
typedef union
{
cl_float CL_ALIGNED(32) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_float x, y, z, w; };
__extension__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_float4 lo, hi; };
#endif
#if defined( __CL_FLOAT2__)
__cl_float2 v2[4];
#endif
#if defined( __CL_FLOAT4__)
__cl_float4 v4[2];
#endif
#if defined( __CL_FLOAT8__ )
__cl_float8 v8;
#endif
}cl_float8;
typedef union
{
cl_float CL_ALIGNED(64) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_float x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_float8 lo, hi; };
#endif
#if defined( __CL_FLOAT2__)
__cl_float2 v2[8];
#endif
#if defined( __CL_FLOAT4__)
__cl_float4 v4[4];
#endif
#if defined( __CL_FLOAT8__ )
__cl_float8 v8[2];
#endif
#if defined( __CL_FLOAT16__ )
__cl_float16 v16;
#endif
}cl_float16;
/* --- cl_doublen ---- */
typedef union
{
cl_double CL_ALIGNED(16) s[2];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_double x, y; };
__extension__ struct{ cl_double s0, s1; };
__extension__ struct{ cl_double lo, hi; };
#endif
#if defined( __CL_DOUBLE2__)
__cl_double2 v2;
#endif
}cl_double2;
typedef union
{
cl_double CL_ALIGNED(32) s[4];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_double x, y, z, w; };
__extension__ struct{ cl_double s0, s1, s2, s3; };
__extension__ struct{ cl_double2 lo, hi; };
#endif
#if defined( __CL_DOUBLE2__)
__cl_double2 v2[2];
#endif
#if defined( __CL_DOUBLE4__)
__cl_double4 v4;
#endif
}cl_double4;
/* cl_double3 is identical in size, alignment and behavior to cl_double4. See section 6.1.5. */
typedef cl_double4 cl_double3;
typedef union
{
cl_double CL_ALIGNED(64) s[8];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_double x, y, z, w; };
__extension__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7; };
__extension__ struct{ cl_double4 lo, hi; };
#endif
#if defined( __CL_DOUBLE2__)
__cl_double2 v2[4];
#endif
#if defined( __CL_DOUBLE4__)
__cl_double4 v4[2];
#endif
#if defined( __CL_DOUBLE8__ )
__cl_double8 v8;
#endif
}cl_double8;
typedef union
{
cl_double CL_ALIGNED(128) s[16];
#if defined( __GNUC__) && ! defined( __STRICT_ANSI__ )
__extension__ struct{ cl_double x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; };
__extension__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; };
__extension__ struct{ cl_double8 lo, hi; };
#endif
#if defined( __CL_DOUBLE2__)
__cl_double2 v2[8];
#endif
#if defined( __CL_DOUBLE4__)
__cl_double4 v4[4];
#endif
#if defined( __CL_DOUBLE8__ )
__cl_double8 v8[2];
#endif
#if defined( __CL_DOUBLE16__ )
__cl_double16 v16;
#endif
}cl_double16;
/* Macro to facilitate debugging
* Usage:
* Place CL_PROGRAM_STRING_DEBUG_INFO on the line before the first line of your source.
* The first line ends with: CL_PROGRAM_STRING_DEBUG_INFO \"
* Each line thereafter of OpenCL C source must end with: \n\
* The last line ends in ";
*
* Example:
*
* const char *my_program = CL_PROGRAM_STRING_DEBUG_INFO "\
* kernel void foo( int a, float * b ) \n\
* { \n\
* // my comment \n\
* *b[ get_global_id(0)] = a; \n\
* } \n\
* ";
*
* This should correctly set up the line, (column) and file information for your source
* string so you can do source level debugging.
*/
#define __CL_STRINGIFY( _x ) # _x
#define _CL_STRINGIFY( _x ) __CL_STRINGIFY( _x )
#define CL_PROGRAM_STRING_DEBUG_INFO "#line " _CL_STRINGIFY(__LINE__) " \"" __FILE__ "\" \n\n"
#ifdef __cplusplus
}
#endif
#endif /* __CL_PLATFORM_H */
HandBrake-0.10.2/libhb/hb_dict.h 0000664 0001752 0001752 00000003260 12463330511 016663 0 ustar handbrake handbrake /* hb_dict.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#if !defined(HB_DICT_H)
#define HB_DICT_H
typedef struct hb_dict_entry_s hb_dict_entry_t;
typedef struct hb_dict_s hb_dict_t;
/* Basic dictionary implementation.
*
* an hb_dict_t must be initialized with hb_dict_init() before use.
*
* "key" must be a string with non-zero length (NULL and "" are invalid keys).
* "value" can be NULL (the zero-length string "" is mapped to NULL).
*
* hb_dict_next( dict, NULL ) returns the first key in the dictionary.
* hb_dict_next( dict, previous ) returns key directly following previous, or
* NULL if the end of the dictionary was reached.
*
* hb_encopts_to_dict() converts an op1=val1:opt2=val2:opt3=val3 type string to
* an hb_dict_t dictionary. */
hb_dict_t * hb_dict_init( int alloc );
void hb_dict_free( hb_dict_t ** dict_ptr );
void hb_dict_set( hb_dict_t ** dict_ptr, const char * key, const char * value );
void hb_dict_unset( hb_dict_t ** dict_ptr, const char * key );
hb_dict_entry_t * hb_dict_get( hb_dict_t * dict, const char * key );
hb_dict_entry_t * hb_dict_next( hb_dict_t * dict, hb_dict_entry_t * previous );
hb_dict_t * hb_encopts_to_dict( const char * encopts, int encoder );
char * hb_dict_to_encopts( hb_dict_t * dict );
struct hb_dict_entry_s
{
char * key;
char * value;
};
struct hb_dict_s
{
int alloc;
int count;
hb_dict_entry_t * objects;
};
#endif // !defined(HB_DICT_H)
HandBrake-0.10.2/libhb/module.defs 0000664 0001752 0001752 00000011343 12417602031 017245 0 ustar handbrake handbrake __deps__ := A52DEC BZIP2 LIBVPX FFMPEG FONTCONFIG FREETYPE LAME LIBASS LIBDCA \
LIBDVDREAD LIBDVDNAV LIBICONV LIBOGG LIBSAMPLERATE LIBTHEORA LIBVORBIS LIBXML2 \
PTHREADW32 X264 X265 ZLIB LIBBLURAY FDKAAC LIBMFX LIBGNURX
$(eval $(call import.MODULE.defs,LIBHB,libhb,$(__deps__)))
$(eval $(call import.GCC,LIBHB))
###############################################################################
LIBHB.src/ = $(SRC/)libhb/
LIBHB.build/ = $(BUILD/)libhb/
LIBHB.m4.in = $(wildcard $(LIBHB.src/)*.m4)
LIBHB.m4.out = $(patsubst $(LIBHB.src/)%.m4,$(LIBHB.build/)%,$(LIBHB.m4.in))
LIBHB.c = $(wildcard $(LIBHB.src/)*.c)
LIBHB.c.o = $(patsubst $(SRC/)%.c,$(BUILD/)%.o,$(LIBHB.c))
LIBHB.d = $(LIBHB.m4.out) $(LIBHB.h.out) \
$(foreach n,$(LIBHB.prerequisites),$($n.INSTALL.target) )
LIBHB.h.in = $(wildcard $(LIBHB.src/)*.h)
LIBHB.h.in += $(wildcard $(LIBHB.src/)extras/*.h)
LIBHB.h.out = $(patsubst $(SRC/)%,$(BUILD/)%,$(LIBHB.h.in))
###############################################################################
LIBHB.a = $(LIBHB.build/)$(call TARGET.archive,handbrake)
###############################################################################
LIBHB.out += $(LIBHB.m4.out)
LIBHB.out += $(LIBHB.c.o)
LIBHB.out += $(LIBHB.h.out)
LIBHB.out += $(LIBHB.a)
###############################################################################
ifeq (1,$(FEATURE.hwd))
LIBHB.GCC.D += USE_HWD
endif
ifeq (1,$(FEATURE.libav_aac))
LIBHB.GCC.D += USE_LIBAV_AAC
endif
LIBHB.GCC.D += __LIBHB__ USE_PTHREAD
LIBHB.GCC.I += $(LIBHB.build/) $(CONTRIB.build/)include
ifeq ($(BUILD.system),cygwin)
LIBHB.GCC.D += SYS_CYGWIN
else ifeq ($(BUILD.system),darwin)
LIBHB.GCC.D += SYS_DARWIN
LIBHB.c += $(wildcard $(LIBHB.src/)platform/macosx/*.c)
else ifeq ($(BUILD.system),linux)
LIBHB.GCC.D += SYS_LINUX _LARGEFILE_SOURCE _FILE_OFFSET_BITS=64
else ifeq ($(BUILD.system),mingw)
LIBHB.GCC.D += SYS_MINGW PTW32_STATIC_LIB
LIBHB.GCC.args.extra.dylib++ += -Wl,--enable-auto-import -static
else ifeq ($(BUILD.system),solaris)
LIBHB.GCC.D += SYS_SunOS _LARGEFILE_SOURCE _FILE_OFFSET_BITS=64 _POSIX_C_SOURCE=200112L __EXTENSIONS__
else
LIBHB.platform.D = SYS_UNKNOWN
endif
ifeq (1,$(FEATURE.qsv))
LIBHB.GCC.D += USE_QSV HAVE_THREADS=1
endif
ifeq (1,$(FEATURE.x265))
LIBHB.GCC.D += USE_X265
endif
## required for
ifneq (,$(filter $(BUILD.arch),ppc ppc64))
LIBHB.GCC.D += WORDS_BIGENDIAN
endif
###############################################################################
## when defined this gives us the subdir name, or flavor of asm implementation
ifneq (disabled,$(FEATURE.asm))
LIBHB.yasm.src/ = $(LIBHB.src/)$(FEATURE.asm)/
LIBHB.yasm.build/ = $(LIBHB.build/)$(FEATURE.asm)/
LIBHB.yasm.asm = $(LIBHB.yasm.src/)deinterlace-a.asm
LIBHB.yasm.o = $(LIBHB.yasm.asm:$(LIBHB.yasm.src/)%.asm=$(LIBHB.yasm.build/)%.o)
LIBHB.yasm.d = $(wildcard $(LIBHB.yasmsrc/)*.h)
LIBHB.YASM.I = $(LIBHB.yasm.src/)
LIBHB.YASM.ASM_O = $(strip $(YASM.exe) \
-f $(LIBHB.YASM.f) \
-m $(LIBHB.YASM.m) \
$(LIBHB.YASM.D:%=-D%) \
$(LIBHB.YASM.I:%=-I%) \
-o $(1) $(2))
LIBHB.out += $(LIBHB.yasm.o)
endif
###############################################################################
ifeq (1-mingw,$(BUILD.cross)-$(BUILD.system))
LIBHB.dll = $(LIBHB.build/)hb.dll
LIBHB.lib = $(LIBHB.build/)hb.lib
LIBHB.dll.libs = $(foreach n, \
ass avcodec avformat avutil avresample dvdnav dvdread \
fontconfig freetype mp3lame \
ogg samplerate swscale vpx theora vorbis vorbisenc x264 xml2 bluray, \
$(CONTRIB.build/)lib/lib$(n).a )
ifeq (1,$(FEATURE.fdk_aac))
LIBHB.dll.libs += $(CONTRIB.build/)lib/libfdk-aac.a
endif
ifeq (1,$(FEATURE.qsv))
LIBHB.dll.libs += $(CONTRIB.build/)lib/libmfx.a
endif
ifeq (1,$(FEATURE.x265))
LIBHB.dll.libs += $(CONTRIB.build/)lib/libx265.a
endif
ifneq ($(HAS.iconv),1)
LIBHB.dll.libs += $(CONTRIB.build/)lib/libiconv.a
else
LIBHB.GCC.l += iconv
endif
ifneq ($(HAS.regex),1)
LIBHB.dll.libs += $(CONTRIB.build/)lib/libregex.a
else
LIBHB.GCC.l += regex
endif
ifeq (1,$(FRIBIDI.enabled))
LIBHB.dll.libs += $(CONTRIB.build/)lib/libfribidi.a
else
LIBHB.GCC.l += fribidi
endif
ifneq ($(HAS.pthread),1)
LIBHB.dll.libs += $(CONTRIB.build/)lib/libpthreadGC2.a
else
LIBHB.GCC.l += pthreadGC2
endif
ifneq ($(HAS.bz2),1)
LIBHB.dll.libs += $(CONTRIB.build/)lib/libbz2.a
else
LIBHB.GCC.l += bz2
endif
ifneq ($(HAS.libz),1)
LIBHB.dll.libs += $(CONTRIB.build/)lib/libz.a
else
LIBHB.GCC.l += z
endif
LIBHB.GCC.args.extra.dylib++ += -Wl,--out-implib,$(LIBHB.lib)
LIBHB.GCC.l += ws2_32
ifeq ($(HAS.dlfcn),1)
LIBHB.GCC.l += dl
endif
LIBHB.out += $(LIBHB.dll) $(LIBHB.lib)
endif
###############################################################################
BUILD.out += $(LIBHB.out)
HandBrake-0.10.2/libhb/h264_common.h 0000664 0001752 0001752 00000003212 12463330511 017317 0 ustar handbrake handbrake /* h264_common.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_H264_COMMON_H
#define HB_H264_COMMON_H
static const char * const hb_h264_profile_names[] = { "auto", "high", "main", "baseline", NULL, };
static const char * const hb_h264_level_names[] = { "auto", "1.0", "1b", "1.1", "1.2", "1.3", "2.0", "2.1", "2.2", "3.0", "3.1", "3.2", "4.0", "4.1", "4.2", "5.0", "5.1", "5.2", NULL, };
static const int const hb_h264_level_values[] = { -1, 10, 9, 11, 12, 13, 20, 21, 22, 30, 31, 32, 40, 41, 42, 50, 51, 52, 0, };
// stolen from libx264's x264.h
static const char * const hb_h264_fullrange_names[] = { "off", "on", NULL, };
static const char * const hb_h264_vidformat_names[] = { "component", "pal", "ntsc", "secam", "mac", "undef", NULL, };
static const char * const hb_h264_colorprim_names[] = { "", "bt709", "undef", "", "bt470m", "bt470bg", "smpte170m", "smpte240m", "film", "bt2020", NULL, };
static const char * const hb_h264_transfer_names[] = { "", "bt709", "undef", "", "bt470m", "bt470bg", "smpte170m", "smpte240m", "linear", "log100", "log316", "iec61966-2-4", "bt1361e", "iec61966-2-1", "bt2020-10", "bt2020-12", NULL, };
static const char * const hb_h264_colmatrix_names[] = { "GBR", "bt709", "undef", "", "fcc", "bt470bg", "smpte170m", "smpte240m", "YCgCo", "bt2020nc", "bt2020c", NULL, };
#endif //HB_H264_COMMON_H
HandBrake-0.10.2/libhb/common.c 0000664 0001752 0001752 00000404043 12470166275 016572 0 ustar handbrake handbrake /* common.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include
#include
#include
#include "hb.h"
#include "x264.h"
#include "lang.h"
#include "common.h"
#include "h264_common.h"
#include "h265_common.h"
#ifdef USE_QSV
#include "qsv_common.h"
#endif
#ifdef USE_X265
#include "x265.h"
#endif
#ifdef SYS_MINGW
#include
#endif
/**********************************************************************
* Global variables
*********************************************************************/
static hb_error_handler_t *error_handler = NULL;
/* Generic IDs for encoders, containers, etc. */
enum
{
HB_GID_NONE = -1, // encoders must NEVER use it
HB_GID_VCODEC_H264,
HB_GID_VCODEC_H265,
HB_GID_VCODEC_MPEG2,
HB_GID_VCODEC_MPEG4,
HB_GID_VCODEC_THEORA,
HB_GID_VCODEC_VP8,
HB_GID_ACODEC_AAC,
HB_GID_ACODEC_AAC_HE,
HB_GID_ACODEC_AAC_PASS,
HB_GID_ACODEC_AC3,
HB_GID_ACODEC_AC3_PASS,
HB_GID_ACODEC_AUTO_PASS,
HB_GID_ACODEC_DTS_PASS,
HB_GID_ACODEC_DTSHD_PASS,
HB_GID_ACODEC_FLAC,
HB_GID_ACODEC_MP3,
HB_GID_ACODEC_MP3_PASS,
HB_GID_ACODEC_VORBIS,
HB_GID_MUX_MKV,
HB_GID_MUX_MP4,
};
typedef struct
{
hb_rate_t item;
hb_rate_t *next;
int enabled;
} hb_rate_internal_t;
hb_rate_t *hb_video_rates_first_item = NULL;
hb_rate_t *hb_video_rates_last_item = NULL;
hb_rate_internal_t hb_video_rates[] =
{
// legacy framerates (disabled)
{ { "23.976 (NTSC Film)", 1126125, }, NULL, 0, },
{ { "25 (PAL Film/Video)", 1080000, }, NULL, 0, },
{ { "29.97 (NTSC Video)", 900900, }, NULL, 0, },
// actual framerates
{ { "5", 5400000, }, NULL, 1, },
{ { "10", 2700000, }, NULL, 1, },
{ { "12", 2250000, }, NULL, 1, },
{ { "15", 1800000, }, NULL, 1, },
{ { "23.976", 1126125, }, NULL, 1, },
{ { "24", 1125000, }, NULL, 1, },
{ { "25", 1080000, }, NULL, 1, },
{ { "29.97", 900900, }, NULL, 1, },
{ { "30", 900000, }, NULL, 1, },
{ { "50", 540000, }, NULL, 1, },
{ { "59.94", 450450, }, NULL, 1, },
{ { "60", 450000, }, NULL, 1, },
};
int hb_video_rates_count = sizeof(hb_video_rates) / sizeof(hb_video_rates[0]);
hb_rate_t *hb_audio_rates_first_item = NULL;
hb_rate_t *hb_audio_rates_last_item = NULL;
hb_rate_internal_t hb_audio_rates[] =
{
{ { "8", 8000, }, NULL, 1, },
{ { "11.025", 11025, }, NULL, 1, },
{ { "12", 12000, }, NULL, 1, },
{ { "16", 16000, }, NULL, 1, },
{ { "22.05", 22050, }, NULL, 1, },
{ { "24", 24000, }, NULL, 1, },
{ { "32", 32000, }, NULL, 1, },
{ { "44.1", 44100, }, NULL, 1, },
{ { "48", 48000, }, NULL, 1, },
};
int hb_audio_rates_count = sizeof(hb_audio_rates) / sizeof(hb_audio_rates[0]);
hb_rate_t *hb_audio_bitrates_first_item = NULL;
hb_rate_t *hb_audio_bitrates_last_item = NULL;
hb_rate_internal_t hb_audio_bitrates[] =
{
// AC3-compatible bitrates
{ { "32", 32, }, NULL, 1, },
{ { "40", 40, }, NULL, 1, },
{ { "48", 48, }, NULL, 1, },
{ { "56", 56, }, NULL, 1, },
{ { "64", 64, }, NULL, 1, },
{ { "80", 80, }, NULL, 1, },
{ { "96", 96, }, NULL, 1, },
{ { "112", 112, }, NULL, 1, },
{ { "128", 128, }, NULL, 1, },
{ { "160", 160, }, NULL, 1, },
{ { "192", 192, }, NULL, 1, },
{ { "224", 224, }, NULL, 1, },
{ { "256", 256, }, NULL, 1, },
{ { "320", 320, }, NULL, 1, },
{ { "384", 384, }, NULL, 1, },
{ { "448", 448, }, NULL, 1, },
{ { "512", 512, }, NULL, 1, },
{ { "576", 576, }, NULL, 1, },
{ { "640", 640, }, NULL, 1, },
// additional bitrates
{ { "768", 768, }, NULL, 1, },
{ { "960", 960, }, NULL, 1, },
{ { "1152", 1152, }, NULL, 1, },
{ { "1344", 1344, }, NULL, 1, },
{ { "1536", 1536, }, NULL, 1, },
};
int hb_audio_bitrates_count = sizeof(hb_audio_bitrates) / sizeof(hb_audio_bitrates[0]);
typedef struct
{
hb_dither_t item;
hb_dither_t *next;
int enabled;
} hb_dither_internal_t;
hb_dither_t *hb_audio_dithers_first_item = NULL;
hb_dither_t *hb_audio_dithers_last_item = NULL;
hb_dither_internal_t hb_audio_dithers[] =
{
{ { "default", "auto", AV_RESAMPLE_DITHER_NONE - 1, }, NULL, 1, },
{ { "none", "none", AV_RESAMPLE_DITHER_NONE, }, NULL, 1, },
{ { "rectangular", "rectangular", AV_RESAMPLE_DITHER_RECTANGULAR, }, NULL, 1, },
{ { "triangular", "triangular", AV_RESAMPLE_DITHER_TRIANGULAR, }, NULL, 1, },
{ { "triangular with high pass", "triangular_hp", AV_RESAMPLE_DITHER_TRIANGULAR_HP, }, NULL, 1, },
{ { "triangular with noise shaping", "triangular_ns", AV_RESAMPLE_DITHER_TRIANGULAR_NS, }, NULL, 1, },
};
int hb_audio_dithers_count = sizeof(hb_audio_dithers) / sizeof(hb_audio_dithers[0]);
typedef struct
{
hb_mixdown_t item;
hb_mixdown_t *next;
int enabled;
} hb_mixdown_internal_t;
hb_mixdown_t *hb_audio_mixdowns_first_item = NULL;
hb_mixdown_t *hb_audio_mixdowns_last_item = NULL;
hb_mixdown_internal_t hb_audio_mixdowns[] =
{
// legacy mixdowns, back to HB 0.9.4 whenever possible (disabled)
{ { "AC3 Passthru", "", HB_AMIXDOWN_NONE, }, NULL, 0, },
{ { "DTS Passthru", "", HB_AMIXDOWN_NONE, }, NULL, 0, },
{ { "DTS-HD Passthru", "", HB_AMIXDOWN_NONE, }, NULL, 0, },
{ { "6-channel discrete", "6ch", HB_AMIXDOWN_5POINT1, }, NULL, 0, },
// actual mixdowns
{ { "None", "none", HB_AMIXDOWN_NONE, }, NULL, 1, },
{ { "Mono", "mono", HB_AMIXDOWN_MONO, }, NULL, 1, },
{ { "Mono (Left Only)", "left_only", HB_AMIXDOWN_LEFT, }, NULL, 1, },
{ { "Mono (Right Only)", "right_only", HB_AMIXDOWN_RIGHT, }, NULL, 1, },
{ { "Stereo", "stereo", HB_AMIXDOWN_STEREO, }, NULL, 1, },
{ { "Dolby Surround", "dpl1", HB_AMIXDOWN_DOLBY, }, NULL, 1, },
{ { "Dolby Pro Logic II", "dpl2", HB_AMIXDOWN_DOLBYPLII, }, NULL, 1, },
{ { "5.1 Channels", "5point1", HB_AMIXDOWN_5POINT1, }, NULL, 1, },
{ { "6.1 Channels", "6point1", HB_AMIXDOWN_6POINT1, }, NULL, 1, },
{ { "7.1 Channels", "7point1", HB_AMIXDOWN_7POINT1, }, NULL, 1, },
{ { "7.1 (5F/2R/LFE)", "5_2_lfe", HB_AMIXDOWN_5_2_LFE, }, NULL, 1, },
};
int hb_audio_mixdowns_count = sizeof(hb_audio_mixdowns) / sizeof(hb_audio_mixdowns[0]);
typedef struct
{
hb_encoder_t item;
hb_encoder_t *next;
int enabled;
int gid;
} hb_encoder_internal_t;
hb_encoder_t *hb_video_encoders_first_item = NULL;
hb_encoder_t *hb_video_encoders_last_item = NULL;
hb_encoder_internal_t hb_video_encoders[] =
{
// legacy encoders, back to HB 0.9.4 whenever possible (disabled)
{ { "FFmpeg", "ffmpeg", NULL, HB_VCODEC_FFMPEG_MPEG4, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_VCODEC_MPEG4, },
{ { "MPEG-4 (FFmpeg)", "ffmpeg4", NULL, HB_VCODEC_FFMPEG_MPEG4, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_VCODEC_MPEG4, },
{ { "MPEG-2 (FFmpeg)", "ffmpeg2", NULL, HB_VCODEC_FFMPEG_MPEG2, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_VCODEC_MPEG2, },
{ { "VP3 (Theora)", "libtheora", NULL, HB_VCODEC_THEORA, HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_VCODEC_THEORA, },
// actual encoders
{ { "H.264 (x264)", "x264", "H.264 (libx264)", HB_VCODEC_X264, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_H264, },
{ { "H.264 (Intel QSV)", "qsv_h264", "H.264 (Intel Media SDK)", HB_VCODEC_QSV_H264, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_H264, },
{ { "H.265 (x265)", "x265", "H.265 (libx265)", HB_VCODEC_X265, HB_MUX_AV_MP4|HB_MUX_AV_MKV, }, NULL, 1, HB_GID_VCODEC_H265, },
{ { "MPEG-4", "mpeg4", "MPEG-4 (libavcodec)", HB_VCODEC_FFMPEG_MPEG4, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_MPEG4, },
{ { "MPEG-2", "mpeg2", "MPEG-2 (libavcodec)", HB_VCODEC_FFMPEG_MPEG2, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_MPEG2, },
{ { "VP8", "VP8", "VP8 (libvpx)", HB_VCODEC_FFMPEG_VP8, HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_VP8, },
{ { "Theora", "theora", "Theora (libtheora)", HB_VCODEC_THEORA, HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_VCODEC_THEORA, },
};
int hb_video_encoders_count = sizeof(hb_video_encoders) / sizeof(hb_video_encoders[0]);
static int hb_video_encoder_is_enabled(int encoder)
{
#ifdef USE_QSV
if (encoder & HB_VCODEC_QSV_MASK)
{
return hb_qsv_video_encoder_is_enabled(encoder);
}
#endif
switch (encoder)
{
// the following encoders are always enabled
case HB_VCODEC_X264:
case HB_VCODEC_THEORA:
case HB_VCODEC_FFMPEG_MPEG4:
case HB_VCODEC_FFMPEG_MPEG2:
case HB_VCODEC_FFMPEG_VP8:
#ifdef USE_X265
case HB_VCODEC_X265:
#endif
return 1;
default:
return 0;
}
}
hb_encoder_t *hb_audio_encoders_first_item = NULL;
hb_encoder_t *hb_audio_encoders_last_item = NULL;
hb_encoder_internal_t hb_audio_encoders[] =
{
// legacy encoders, back to HB 0.9.4 whenever possible (disabled)
{ { "", "dts", NULL, HB_ACODEC_DCA_PASS, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_DTS_PASS, },
{ { "AAC (faac)", "faac", NULL, 0, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_AAC, },
{ { "AAC (ffmpeg)", "ffaac", NULL, HB_ACODEC_FFAAC, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_AAC, },
{ { "AC3 (ffmpeg)", "ffac3", NULL, HB_ACODEC_AC3, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_AC3, },
{ { "MP3 (lame)", "lame", NULL, HB_ACODEC_LAME, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_MP3, },
{ { "Vorbis (vorbis)", "libvorbis", NULL, HB_ACODEC_VORBIS, HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_VORBIS, },
{ { "FLAC (ffmpeg)", "ffflac", NULL, HB_ACODEC_FFFLAC, HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_FLAC, },
{ { "FLAC (24-bit)", "ffflac24", NULL, HB_ACODEC_FFFLAC24, HB_MUX_MASK_MKV, }, NULL, 0, HB_GID_ACODEC_FLAC, },
// actual encoders
{ { "AAC (CoreAudio)", "ca_aac", "AAC (Apple AudioToolbox)", HB_ACODEC_CA_AAC, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AAC, },
{ { "HE-AAC (CoreAudio)", "ca_haac", "HE-AAC (Apple AudioToolbox)", HB_ACODEC_CA_HAAC, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AAC_HE, },
{ { "AAC (avcodec)", "av_aac", "AAC (libavcodec)", HB_ACODEC_FFAAC, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AAC, },
{ { "AAC (FDK)", "fdk_aac", "AAC (libfdk_aac)", HB_ACODEC_FDK_AAC, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AAC, },
{ { "HE-AAC (FDK)", "fdk_haac", "HE-AAC (libfdk_aac)", HB_ACODEC_FDK_HAAC, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AAC_HE, },
{ { "AAC Passthru", "copy:aac", "AAC Passthru", HB_ACODEC_AAC_PASS, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AAC_PASS, },
{ { "AC3", "ac3", "AC3 (libavcodec)", HB_ACODEC_AC3, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AC3, },
{ { "AC3 Passthru", "copy:ac3", "AC3 Passthru", HB_ACODEC_AC3_PASS, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AC3_PASS, },
{ { "DTS Passthru", "copy:dts", "DTS Passthru", HB_ACODEC_DCA_PASS, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_DTS_PASS, },
{ { "DTS-HD Passthru", "copy:dtshd", "DTS-HD Passthru", HB_ACODEC_DCA_HD_PASS, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_DTSHD_PASS, },
{ { "MP3", "mp3", "MP3 (libmp3lame)", HB_ACODEC_LAME, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_MP3, },
{ { "MP3 Passthru", "copy:mp3", "MP3 Passthru", HB_ACODEC_MP3_PASS, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_MP3_PASS, },
{ { "Vorbis", "vorbis", "Vorbis (libvorbis)", HB_ACODEC_VORBIS, HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_VORBIS, },
{ { "FLAC 16-bit", "flac16", "FLAC 16-bit (libavcodec)", HB_ACODEC_FFFLAC, HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_FLAC, },
{ { "FLAC 24-bit", "flac24", "FLAC 24-bit (libavcodec)", HB_ACODEC_FFFLAC24, HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_FLAC, },
{ { "Auto Passthru", "copy", "Auto Passthru", HB_ACODEC_AUTO_PASS, HB_MUX_MASK_MP4|HB_MUX_MASK_MKV, }, NULL, 1, HB_GID_ACODEC_AUTO_PASS, },
};
int hb_audio_encoders_count = sizeof(hb_audio_encoders) / sizeof(hb_audio_encoders[0]);
static int hb_audio_encoder_is_enabled(int encoder)
{
if (encoder & HB_ACODEC_PASS_FLAG)
{
// Passthru encoders are always enabled
return 1;
}
switch (encoder)
{
#ifdef __APPLE__
case HB_ACODEC_CA_AAC:
case HB_ACODEC_CA_HAAC:
return 1;
#endif
#ifdef USE_LIBAV_AAC
case HB_ACODEC_FFAAC:
return avcodec_find_encoder_by_name("aac") != NULL;
#endif
case HB_ACODEC_FDK_AAC:
case HB_ACODEC_FDK_HAAC:
return avcodec_find_encoder_by_name("libfdk_aac") != NULL;
case HB_ACODEC_AC3:
return avcodec_find_encoder(AV_CODEC_ID_AC3) != NULL;
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
return avcodec_find_encoder(AV_CODEC_ID_FLAC) != NULL;
// the following encoders are always enabled
case HB_ACODEC_LAME:
case HB_ACODEC_VORBIS:
return 1;
default:
return 0;
}
}
typedef struct
{
hb_container_t item;
hb_container_t *next;
int enabled;
int gid;
} hb_container_internal_t;
hb_container_t *hb_containers_first_item = NULL;
hb_container_t *hb_containers_last_item = NULL;
hb_container_internal_t hb_containers[] =
{
// legacy muxers, back to HB 0.9.4 whenever possible (disabled)
{ { "M4V file", "m4v", NULL, "m4v", 0, }, NULL, 0, HB_GID_MUX_MP4, },
{ { "MP4 file", "mp4", NULL, "mp4", 0, }, NULL, 0, HB_GID_MUX_MP4, },
{ { "MKV file", "mkv", NULL, "mkv", 0, }, NULL, 0, HB_GID_MUX_MKV, },
// actual muxers
{ { "MPEG-4 (avformat)", "av_mp4", "MPEG-4 (libavformat)", "mp4", HB_MUX_AV_MP4, }, NULL, 1, HB_GID_MUX_MP4, },
{ { "MPEG-4 (mp4v2)", "mp4v2", "MPEG-4 (libmp4v2)", "mp4", HB_MUX_MP4V2, }, NULL, 1, HB_GID_MUX_MP4, },
{ { "Matroska (avformat)", "av_mkv", "Matroska (libavformat)", "mkv", HB_MUX_AV_MKV, }, NULL, 1, HB_GID_MUX_MKV, },
{ { "Matroska (libmkv)", "libmkv", "Matroska (libmkv)", "mkv", HB_MUX_LIBMKV, }, NULL, 1, HB_GID_MUX_MKV, },
};
int hb_containers_count = sizeof(hb_containers) / sizeof(hb_containers[0]);
static int hb_container_is_enabled(int format)
{
switch (format)
{
case HB_MUX_AV_MP4:
case HB_MUX_AV_MKV:
return 1;
default:
return 0;
}
}
void hb_common_global_init()
{
static int common_init_done = 0;
if (common_init_done)
return;
int i, j;
// video framerates
for (i = 0; i < hb_video_rates_count; i++)
{
if (hb_video_rates[i].enabled)
{
if (hb_video_rates_first_item == NULL)
{
hb_video_rates_first_item = &hb_video_rates[i].item;
}
else
{
((hb_rate_internal_t*)hb_video_rates_last_item)->next =
&hb_video_rates[i].item;
}
hb_video_rates_last_item = &hb_video_rates[i].item;
}
}
// fallbacks are static for now (no setup required)
// audio samplerates
for (i = 0; i < hb_audio_rates_count; i++)
{
if (hb_audio_rates[i].enabled)
{
if (hb_audio_rates_first_item == NULL)
{
hb_audio_rates_first_item = &hb_audio_rates[i].item;
}
else
{
((hb_rate_internal_t*)hb_audio_rates_last_item)->next =
&hb_audio_rates[i].item;
}
hb_audio_rates_last_item = &hb_audio_rates[i].item;
}
}
// fallbacks are static for now (no setup required)
// audio bitrates
for (i = 0; i < hb_audio_bitrates_count; i++)
{
if (hb_audio_bitrates[i].enabled)
{
if (hb_audio_bitrates_first_item == NULL)
{
hb_audio_bitrates_first_item = &hb_audio_bitrates[i].item;
}
else
{
((hb_rate_internal_t*)hb_audio_bitrates_last_item)->next =
&hb_audio_bitrates[i].item;
}
hb_audio_bitrates_last_item = &hb_audio_bitrates[i].item;
}
}
// fallbacks are static for now (no setup required)
// audio dithers
for (i = 0; i < hb_audio_dithers_count; i++)
{
if (hb_audio_dithers[i].enabled)
{
if (hb_audio_dithers_first_item == NULL)
{
hb_audio_dithers_first_item = &hb_audio_dithers[i].item;
}
else
{
((hb_dither_internal_t*)hb_audio_dithers_last_item)->next =
&hb_audio_dithers[i].item;
}
hb_audio_dithers_last_item = &hb_audio_dithers[i].item;
}
}
// fallbacks are static for now (no setup required)
// audio mixdowns
for (i = 0; i < hb_audio_mixdowns_count; i++)
{
if (hb_audio_mixdowns[i].enabled)
{
if (hb_audio_mixdowns_first_item == NULL)
{
hb_audio_mixdowns_first_item = &hb_audio_mixdowns[i].item;
}
else
{
((hb_mixdown_internal_t*)hb_audio_mixdowns_last_item)->next =
&hb_audio_mixdowns[i].item;
}
hb_audio_mixdowns_last_item = &hb_audio_mixdowns[i].item;
}
}
// fallbacks are static for now (no setup required)
// video encoders
for (i = 0; i < hb_video_encoders_count; i++)
{
if (hb_video_encoders[i].enabled)
{
// we still need to check
hb_video_encoders[i].enabled =
hb_video_encoder_is_enabled(hb_video_encoders[i].item.codec);
}
if (hb_video_encoders[i].enabled)
{
if (hb_video_encoders_first_item == NULL)
{
hb_video_encoders_first_item = &hb_video_encoders[i].item;
}
else
{
((hb_encoder_internal_t*)hb_video_encoders_last_item)->next =
&hb_video_encoders[i].item;
}
hb_video_encoders_last_item = &hb_video_encoders[i].item;
}
}
// setup fallbacks
for (i = 0; i < hb_video_encoders_count; i++)
{
if (!hb_video_encoders[i].enabled)
{
if ((hb_video_encoders[i].item.codec & HB_VCODEC_MASK) &&
(hb_video_encoder_is_enabled(hb_video_encoders[i].item.codec)))
{
// we have a specific fallback and it's enabled
continue;
}
for (j = 0; j < hb_video_encoders_count; j++)
{
if (hb_video_encoders[j].enabled &&
hb_video_encoders[j].gid == hb_video_encoders[i].gid)
{
hb_video_encoders[i].item.codec = hb_video_encoders[j].item.codec;
break;
}
}
}
}
// audio encoders
for (i = 0; i < hb_audio_encoders_count; i++)
{
if (hb_audio_encoders[i].enabled)
{
// we still need to check
hb_audio_encoders[i].enabled =
hb_audio_encoder_is_enabled(hb_audio_encoders[i].item.codec);
}
if (hb_audio_encoders[i].enabled)
{
if (hb_audio_encoders_first_item == NULL)
{
hb_audio_encoders_first_item = &hb_audio_encoders[i].item;
}
else
{
((hb_encoder_internal_t*)hb_audio_encoders_last_item)->next =
&hb_audio_encoders[i].item;
}
hb_audio_encoders_last_item = &hb_audio_encoders[i].item;
}
}
// setup fallbacks
for (i = 0; i < hb_audio_encoders_count; i++)
{
if (!hb_audio_encoders[i].enabled)
{
if ((hb_audio_encoders[i].item.codec & HB_ACODEC_MASK) &&
(hb_audio_encoder_is_enabled(hb_audio_encoders[i].item.codec)))
{
// we have a specific fallback and it's enabled
continue;
}
for (j = 0; j < hb_audio_encoders_count; j++)
{
if (hb_audio_encoders[j].enabled &&
hb_audio_encoders[j].gid == hb_audio_encoders[i].gid)
{
hb_audio_encoders[i].item.codec = hb_audio_encoders[j].item.codec;
break;
}
}
if ((hb_audio_encoders[i].item.codec & HB_ACODEC_MASK) == 0 &&
(hb_audio_encoders[i].gid == HB_GID_ACODEC_AAC_HE))
{
// try to find an AAC fallback if no HE-AAC encoder is available
for (j = 0; j < hb_audio_encoders_count; j++)
{
if (hb_audio_encoders[j].enabled &&
hb_audio_encoders[j].gid == HB_GID_ACODEC_AAC)
{
hb_audio_encoders[i].item.codec = hb_audio_encoders[j].item.codec;
break;
}
}
}
}
}
// video containers
for (i = 0; i < hb_containers_count; i++)
{
if (hb_containers[i].enabled)
{
// we still need to check
hb_containers[i].enabled =
hb_container_is_enabled(hb_containers[i].item.format);
}
if (hb_containers[i].enabled)
{
if (hb_containers_first_item == NULL)
{
hb_containers_first_item = &hb_containers[i].item;
}
else
{
((hb_container_internal_t*)hb_containers_last_item)->next =
&hb_containers[i].item;
}
hb_containers_last_item = &hb_containers[i].item;
}
}
// setup fallbacks
for (i = 0; i < hb_containers_count; i++)
{
if (!hb_containers[i].enabled)
{
if ((hb_containers[i].item.format & HB_MUX_MASK) &&
(hb_container_is_enabled(hb_containers[i].item.format)))
{
// we have a specific fallback and it's enabled
continue;
}
for (j = 0; j < hb_containers_count; j++)
{
if (hb_containers[j].enabled &&
hb_containers[j].gid == hb_containers[i].gid)
{
hb_containers[i].item.format = hb_containers[j].item.format;
break;
}
}
}
}
// we're done, yay!
common_init_done = 1;
}
int hb_video_framerate_get_from_name(const char *name)
{
if (name == NULL || *name == '\0')
goto fail;
int i;
for (i = 0; i < hb_video_rates_count; i++)
{
if (!strcasecmp(hb_video_rates[i].item.name, name))
{
return hb_video_rates[i].item.rate;
}
}
fail:
return -1;
}
const char* hb_video_framerate_get_name(int framerate)
{
if (framerate > hb_video_rates_first_item->rate ||
framerate < hb_video_rates_last_item ->rate)
goto fail;
const hb_rate_t *video_framerate = NULL;
while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
{
if (video_framerate->rate == framerate)
{
return video_framerate->name;
}
}
fail:
return NULL;
}
const char* hb_video_framerate_sanitize_name(const char *name)
{
return hb_video_framerate_get_name(hb_video_framerate_get_from_name(name));
}
const hb_rate_t* hb_video_framerate_get_next(const hb_rate_t *last)
{
if (last == NULL)
{
return hb_video_rates_first_item;
}
return ((hb_rate_internal_t*)last)->next;
}
int hb_audio_samplerate_get_best(uint32_t codec, int samplerate, int *sr_shift)
{
int best_samplerate;
if (samplerate < 32000 && (codec == HB_ACODEC_AC3 ||
codec == HB_ACODEC_CA_HAAC))
{
// ca_haac can't do samplerates < 32 kHz
// AC-3 < 32 kHz suffers from poor hardware compatibility
best_samplerate = 32000;
}
else if (samplerate < 16000 && codec == HB_ACODEC_FDK_HAAC)
{
// fdk_haac can't do samplerates < 16 kHz
best_samplerate = 16000;
}
else
{
best_samplerate = hb_audio_rates_first_item->rate;
const hb_rate_t *audio_samplerate = NULL;
while ((audio_samplerate = hb_audio_samplerate_get_next(audio_samplerate)) != NULL)
{
if (samplerate == audio_samplerate->rate)
{
// valid samplerate
best_samplerate = audio_samplerate->rate;
break;
}
if (samplerate > audio_samplerate->rate)
{
// samplerates are sanitized downwards
best_samplerate = audio_samplerate->rate;
}
}
}
if (sr_shift != NULL)
{
/* sr_shift: 0 -> 48000, 44100, 32000 Hz
* 1 -> 24000, 22050, 16000 Hz
* 2 -> 12000, 11025, 8000 Hz
*
* also, since samplerates are sanitized downwards:
*
* (samplerate < 32000) implies (samplerate <= 24000)
*/
*sr_shift = ((best_samplerate < 16000) ? 2 :
(best_samplerate < 32000) ? 1 : 0);
}
return best_samplerate;
}
int hb_audio_samplerate_get_from_name(const char *name)
{
if (name == NULL || *name == '\0')
goto fail;
int i;
for (i = 0; i < hb_audio_rates_count; i++)
{
if (!strcasecmp(hb_audio_rates[i].item.name, name))
{
return hb_audio_rates[i].item.rate;
}
}
// maybe the samplerate was specified in Hz
i = atoi(name);
if (i >= hb_audio_rates_first_item->rate &&
i <= hb_audio_rates_last_item ->rate)
{
return hb_audio_samplerate_get_best(0, i, NULL);
}
fail:
return -1;
}
const char* hb_audio_samplerate_get_name(int samplerate)
{
if (samplerate < hb_audio_rates_first_item->rate ||
samplerate > hb_audio_rates_last_item ->rate)
goto fail;
const hb_rate_t *audio_samplerate = NULL;
while ((audio_samplerate = hb_audio_samplerate_get_next(audio_samplerate)) != NULL)
{
if (audio_samplerate->rate == samplerate)
{
return audio_samplerate->name;
}
}
fail:
return NULL;
}
const hb_rate_t* hb_audio_samplerate_get_next(const hb_rate_t *last)
{
if (last == NULL)
{
return hb_audio_rates_first_item;
}
return ((hb_rate_internal_t*)last)->next;
}
// Given an input bitrate, find closest match in the set of allowed bitrates
static int hb_audio_bitrate_find_closest(int bitrate)
{
// Check if bitrate mode was disabled
if (bitrate <= 0)
return bitrate;
int closest_bitrate = hb_audio_bitrates_first_item->rate;
const hb_rate_t *audio_bitrate = NULL;
while ((audio_bitrate = hb_audio_bitrate_get_next(audio_bitrate)) != NULL)
{
if (bitrate == audio_bitrate->rate)
{
// valid bitrate
closest_bitrate = audio_bitrate->rate;
break;
}
if (bitrate > audio_bitrate->rate)
{
// bitrates are sanitized downwards
closest_bitrate = audio_bitrate->rate;
}
}
return closest_bitrate;
}
// Given an input bitrate, sanitize it.
// Check low and high limits and make sure it is in the set of allowed bitrates.
int hb_audio_bitrate_get_best(uint32_t codec, int bitrate, int samplerate,
int mixdown)
{
int low, high;
hb_audio_bitrate_get_limits(codec, samplerate, mixdown, &low, &high);
if (bitrate > high)
bitrate = high;
if (bitrate < low)
bitrate = low;
return hb_audio_bitrate_find_closest(bitrate);
}
// Get the default bitrate for a given codec/samplerate/mixdown triplet.
int hb_audio_bitrate_get_default(uint32_t codec, int samplerate, int mixdown)
{
if ((codec & HB_ACODEC_PASS_FLAG) || !(codec & HB_ACODEC_MASK))
goto fail;
int bitrate, nchannels, sr_shift;
/* full-bandwidth channels, sr_shift */
nchannels = (hb_mixdown_get_discrete_channel_count(mixdown) -
hb_mixdown_get_low_freq_channel_count(mixdown));
hb_audio_samplerate_get_best(codec, samplerate, &sr_shift);
switch (codec)
{
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
goto fail;
// 96, 224, 640 Kbps
case HB_ACODEC_AC3:
bitrate = (nchannels * 128) - (32 * (nchannels < 5));
break;
case HB_ACODEC_CA_HAAC:
case HB_ACODEC_FDK_HAAC:
bitrate = nchannels * 32;
break;
default:
bitrate = nchannels * 80;
break;
}
// sample_rate adjustment
bitrate >>= sr_shift;
return hb_audio_bitrate_get_best(codec, bitrate, samplerate, mixdown);
fail:
return -1;
}
/* Get the bitrate low and high limits for a codec/samplerate/mixdown triplet.
*
* Encoder 1.0 channel 2.0 channels 5.1 channels 6.1 channels 7.1 channels
* --------------------------------------------------------------------------------------
*
* ffaac
* -----
* supported samplerates: 8 - 48 kHz
* libavcodec/aacenc.c defines a maximum bitrate:
* -> 6144 * samplerate / 1024 bps (per channel, incl. LFE).
* But output bitrates don't go as high as the theoretical maximums:
* 12 kHz 61 (72) 123 (144)
* 24 kHz 121 (144) 242 (288)
* 48 kHz 236 (288) 472 (576)
* Also, ffaac isn't a great encoder, so you don't want to allow too low a bitrate.
* Limits: minimum of 32 Kbps per channel
* maximum of 192 Kbps per channel at 32 kHz, adjusted for sr_shift
* maximum of 256 Kbps per channel at 44.1-48 kHz, adjusted for sr_shift
*
* vorbis
* ------
* supported samplerates: 8 - 48 kHz
* lib/modes/setup_*.h provides a range of allowed bitrates for various configurations.
* for each samplerate, the highest minimums and lowest maximums are:
* 8 kHz Minimum 8 Kbps, maximum 32 Kbps (per channel, incl. LFE).
* 12 kHz Minimum 14 Kbps, maximum 44 Kbps (per channel, incl. LFE).
* 16 kHz Minimum 16 Kbps, maximum 86 Kbps (per channel, incl. LFE).
* 24 kHz Minimum 22 Kbps, maximum 86 Kbps (per channel, incl. LFE).
* 32 kHz Minimum 26 Kbps, maximum 190 Kbps (per channel, incl. LFE).
* 48 kHz Minimum 28 Kbps, maximum 240 Kbps (per channel, incl. LFE).
* Limits: minimum of 14/22/28 Kbps per channel (8-12, 16-24, 32-48 kHz)
* maximum of 32/86/190/240 Kbps per channel (8-12, 16-24, 32, 44.1-48 kHz)
*
* lame
* ----
* supported samplerates: 8 - 48 kHz
* lame_init_params() allows the following bitrates:
* 12 kHz Minimum 8 Kbps, maximum 64 Kbps
* 24 kHz Minimum 8 Kbps, maximum 160 Kbps
* 48 kHz Minimum 32 Kbps, maximum 320 Kbps
* Limits: minimum of 8/8/32 Kbps (8-12, 16-24, 32-48 kHz)
* maximum of 64/160/320 Kbps (8-12, 16-24, 32-48 kHz)
*
* ffac3
* -----
* supported samplerates: 32 - 48 kHz (< 32 kHz disabled for compatibility reasons)
* Dolby's encoder has a min. of 224 Kbps for 5 full-bandwidth channels (5.0, 5.1)
* The maximum AC3 bitrate is 640 Kbps
* Limits: minimum of 224/5 Kbps per full-bandwidth channel, maximum of 640 Kbps
*
* ca_aac
* ------
* supported samplerates: 8 - 48 kHz
* Core Audio API provides a range of allowed bitrates:
* 8 kHz 8 - 24 16 - 48 40 - 112 48 - 144 56 - 160
* 12 kHz 12 - 32 24 - 64 64 - 160 72 - 192 96 - 224
* 16 kHz 12 - 48 24 - 96 64 - 224 72 - 288 96 - 320
* 24 kHz 16 - 64 32 - 128 80 - 320 96 - 384 112 - 448
* 32 kHz 24 - 96 48 - 192 128 - 448 144 - 576 192 - 640
* 48 kHz 32 - 256 64 - 320 160 - 768 192 - 960 224 - 960
* Limits:
* 8 kHz -> minimum of 8 Kbps and maximum of 24 Kbps per full-bandwidth channel
* 12 kHz -> minimum of 12 Kbps and maximum of 32 Kbps per full-bandwidth channel
* 16 kHz -> minimum of 12 Kbps and maximum of 48 Kbps per full-bandwidth channel
* 24 kHz -> minimum of 16 Kbps and maximum of 64 Kbps per full-bandwidth channel
* 32 kHz -> minimum of 24 Kbps and maximum of 96 Kbps per full-bandwidth channel
* 48 kHz -> minimum of 32 Kbps and maximum of 160 Kbps per full-bandwidth channel
* 48 kHz -> maximum of +96 Kbps for Mono
* Note: encCoreAudioInit() will sanitize any mistake made here.
*
* ca_haac
* -------
* supported samplerates: 32 - 48 kHz
* Core Audio API provides a range of allowed bitrates:
* 32 kHz 12 - 40 24 - 80 64 - 192 N/A 96 - 256
* 48 kHz 16 - 40 32 - 80 80 - 192 N/A 112 - 256
* Limits: minimum of 12 Kbps per full-bandwidth channel (<= 32 kHz)
* minimum of 16 Kbps per full-bandwidth channel ( > 32 kHz)
* maximum of 40 Kbps per full-bandwidth channel
* Note: encCoreAudioInit() will sanitize any mistake made here.
*
* fdk_aac
* -------
* supported samplerates: 8 - 48 kHz
* libfdk limits the bitrate to the following values:
* 8 kHz 48 96 240
* 12 kHz 72 144 360
* 16 kHz 96 192 480
* 24 kHz 144 288 720
* 32 kHz 192 384 960
* 48 kHz 288 576 1440
* Limits: minimum of samplerate * 2/3 Kbps per full-bandwidth channel (see ca_aac)
* maximum of samplerate * 6.0 Kbps per full-bandwidth channel
*
* fdk_haac
* --------
* supported samplerates: 16 - 48 kHz
* libfdk limits the bitrate to the following values:
* 16 kHz 8 - 48 16 - 96 45 - 199
* 24 kHz 8 - 63 16 - 127 45 - 266
* 32 kHz 8 - 63 16 - 127 45 - 266
* 48 kHz 12 - 63 16 - 127 50 - 266
* Limits: minimum of 12 Kbps per full-bandwidth channel (<= 32 kHz) (see ca_haac)
* minimum of 16 Kbps per full-bandwidth channel ( > 32 kHz) (see ca_haac)
* maximum of 48, 96 or 192 Kbps (1.0, 2.0, 5.1) (<= 16 kHz)
* maximum of 64, 128 or 256 Kbps (1.0, 2.0, 5.1) ( > 16 kHz)
*/
void hb_audio_bitrate_get_limits(uint32_t codec, int samplerate, int mixdown,
int *low, int *high)
{
/* samplerate, sr_shift */
int sr_shift;
samplerate = hb_audio_samplerate_get_best(codec, samplerate, &sr_shift);
/* LFE, full-bandwidth channels */
int lfe_count, nchannels;
lfe_count = hb_mixdown_get_low_freq_channel_count(mixdown);
nchannels = hb_mixdown_get_discrete_channel_count(mixdown) - lfe_count;
switch (codec)
{
// Bitrates don't apply to "lossless" audio
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
*low = *high = -1;
return;
case HB_ACODEC_AC3:
*low = 224 * nchannels / 5;
*high = 640;
break;
case HB_ACODEC_CA_AAC:
{
switch (samplerate)
{
case 8000:
*low = nchannels * 8;
*high = nchannels * 24;
break;
case 11025:
case 12000:
*low = nchannels * 12;
*high = nchannels * 32;
break;
case 16000:
*low = nchannels * 12;
*high = nchannels * 48;
break;
case 22050:
case 24000:
*low = nchannels * 16;
*high = nchannels * 64;
break;
case 32000:
*low = nchannels * 24;
*high = nchannels * 96;
break;
case 44100:
case 48000:
default:
*low = nchannels * 32;
*high = nchannels * (160 + (96 * (nchannels == 1)));
break;
} break;
}
case HB_ACODEC_CA_HAAC:
*low = nchannels * (12 + (4 * (samplerate >= 44100)));
*high = nchannels * 40;
break;
case HB_ACODEC_FDK_AAC:
*low = nchannels * samplerate * 2 / 3000;
*high = nchannels * samplerate * 6 / 1000;
break;
case HB_ACODEC_FDK_HAAC:
*low = (nchannels * (12 + (4 * (samplerate >= 44100))));
*high = (nchannels - (nchannels > 2)) * (48 +
(16 *
(samplerate >= 22050)));
break;
case HB_ACODEC_FFAAC:
*low = ((nchannels + lfe_count) * 32);
*high = ((nchannels + lfe_count) *
((192 + (64 * ((samplerate << sr_shift) >= 44100)))
>> sr_shift));
break;
case HB_ACODEC_LAME:
*low = 8 + (24 * (sr_shift < 1));
*high = 64 + (96 * (sr_shift < 2)) + (160 * (sr_shift < 1));
break;
case HB_ACODEC_VORBIS:
*low = (nchannels + lfe_count) * (14 +
(8 * (sr_shift < 2)) +
(6 * (sr_shift < 1)));
*high = (nchannels + lfe_count) * (32 +
( 54 * (sr_shift < 2)) +
(104 * (sr_shift < 1)) +
( 50 * (samplerate >= 44100)));
break;
// Bitrates don't apply to passthrough audio, but may apply if we
// fall back to an encoder when the source can't be passed through.
default:
*low = hb_audio_bitrates_first_item->rate;
*high = hb_audio_bitrates_last_item ->rate;
break;
}
// sanitize max. bitrate
if (*high < hb_audio_bitrates_first_item->rate)
*high = hb_audio_bitrates_first_item->rate;
if (*high > hb_audio_bitrates_last_item ->rate)
*high = hb_audio_bitrates_last_item ->rate;
}
const hb_rate_t* hb_audio_bitrate_get_next(const hb_rate_t *last)
{
if (last == NULL)
{
return hb_audio_bitrates_first_item;
}
return ((hb_rate_internal_t*)last)->next;
}
// Get limits and hints for the UIs.
//
// granularity sets the minimum step increments that should be used
// (it's ok to round up to some nice multiple if you like)
//
// direction says whether 'low' limit is highest or lowest
// quality (direction 0 == lowest value is worst quality)
void hb_video_quality_get_limits(uint32_t codec, float *low, float *high,
float *granularity, int *direction)
{
#ifdef USE_QSV
if (codec & HB_VCODEC_QSV_MASK)
{
return hb_qsv_video_quality_get_limits(codec, low, high, granularity,
direction);
}
#endif
switch (codec)
{
case HB_VCODEC_X264:
#ifdef USE_X265
case HB_VCODEC_X265:
#endif
*direction = 1;
*granularity = 0.1;
*low = 0.;
*high = 51.;
break;
case HB_VCODEC_THEORA:
*direction = 0;
*granularity = 1.;
*low = 0.;
*high = 63.;
break;
case HB_VCODEC_FFMPEG_VP8:
*direction = 1;
*granularity = 1.;
*low = 0.;
*high = 63.;
break;
case HB_VCODEC_FFMPEG_MPEG2:
case HB_VCODEC_FFMPEG_MPEG4:
default:
*direction = 1;
*granularity = 1.;
*low = 1.;
*high = 31.;
break;
}
}
const char* hb_video_quality_get_name(uint32_t codec)
{
#ifdef USE_QSV
if (codec & HB_VCODEC_QSV_MASK)
{
return hb_qsv_video_quality_get_name(codec);
}
#endif
switch (codec)
{
case HB_VCODEC_X264:
#ifdef USE_X265
case HB_VCODEC_X265:
#endif
return "RF";
case HB_VCODEC_FFMPEG_VP8:
return "CQ";
default:
return "QP";
}
}
const char* const* hb_video_encoder_get_presets(int encoder)
{
#ifdef USE_QSV
if (encoder & HB_VCODEC_QSV_MASK)
{
return hb_qsv_preset_get_names();
}
#endif
switch (encoder)
{
case HB_VCODEC_X264:
return x264_preset_names;
#ifdef USE_X265
case HB_VCODEC_X265:
return x265_preset_names;
#endif
default:
return NULL;
}
}
const char* const* hb_video_encoder_get_tunes(int encoder)
{
switch (encoder)
{
case HB_VCODEC_X264:
return x264_tune_names;
#ifdef USE_X265
case HB_VCODEC_X265:
return x265_tune_names;
#endif
default:
return NULL;
}
}
const char* const* hb_video_encoder_get_profiles(int encoder)
{
#ifdef USE_QSV
if (encoder & HB_VCODEC_QSV_MASK)
{
return hb_qsv_profile_get_names(encoder);
}
#endif
switch (encoder)
{
case HB_VCODEC_X264:
return hb_h264_profile_names;
case HB_VCODEC_X265:
return hb_h265_profile_names;
default:
return NULL;
}
}
const char* const* hb_video_encoder_get_levels(int encoder)
{
#ifdef USE_QSV
if (encoder & HB_VCODEC_QSV_MASK)
{
return hb_qsv_level_get_names(encoder);
}
#endif
switch (encoder)
{
case HB_VCODEC_X264:
return hb_h264_level_names;
default:
return NULL;
}
}
// Get limits and hints for the UIs.
//
// granularity sets the minimum step increments that should be used
// (it's ok to round up to some nice multiple if you like)
//
// direction says whether 'low' limit is highest or lowest
// quality (direction 0 == lowest value is worst quality)
void hb_audio_quality_get_limits(uint32_t codec, float *low, float *high,
float *granularity, int *direction)
{
switch (codec)
{
case HB_ACODEC_LAME:
*direction = 1;
*granularity = 0.5;
*low = 0.;
*high = 10.;
break;
case HB_ACODEC_VORBIS:
*direction = 0;
*granularity = 0.5;
*low = -2.;
*high = 10.;
break;
case HB_ACODEC_CA_AAC:
*direction = 0;
*granularity = 9.;
*low = 1.;
*high = 127.;
break;
default:
*direction = 0;
*granularity = 1.;
*low = *high = HB_INVALID_AUDIO_QUALITY;
break;
}
}
float hb_audio_quality_get_best(uint32_t codec, float quality)
{
int direction;
float low, high, granularity;
hb_audio_quality_get_limits(codec, &low, &high, &granularity, &direction);
if (quality > high)
quality = high;
if (quality < low)
quality = low;
return quality;
}
float hb_audio_quality_get_default(uint32_t codec)
{
switch (codec)
{
case HB_ACODEC_LAME:
return 2.;
case HB_ACODEC_VORBIS:
return 5.;
case HB_ACODEC_CA_AAC:
return 91.;
default:
return HB_INVALID_AUDIO_QUALITY;
}
}
// Get limits and hints for the UIs.
//
// granularity sets the minimum step increments that should be used
// (it's ok to round up to some nice multiple if you like)
//
// direction says whether 'low' limit is highest or lowest
// compression level (direction 0 == lowest value is worst compression level)
void hb_audio_compression_get_limits(uint32_t codec, float *low, float *high,
float *granularity, int *direction)
{
switch (codec)
{
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
*direction = 0;
*granularity = 1.;
*high = 12.;
*low = 0.;
break;
case HB_ACODEC_LAME:
*direction = 1;
*granularity = 1.;
*high = 9.;
*low = 0.;
break;
default:
*direction = 0;
*granularity = 1.;
*low = *high = -1.;
break;
}
}
float hb_audio_compression_get_best(uint32_t codec, float compression)
{
int direction;
float low, high, granularity;
hb_audio_compression_get_limits(codec, &low, &high, &granularity, &direction);
if( compression > high )
compression = high;
if( compression < low )
compression = low;
return compression;
}
float hb_audio_compression_get_default(uint32_t codec)
{
switch (codec)
{
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
return 5.;
case HB_ACODEC_LAME:
return 2.;
default:
return -1.;
}
}
int hb_audio_dither_get_default()
{
// "auto"
return hb_audio_dithers_first_item->method;
}
int hb_audio_dither_get_default_method()
{
/*
* input could be s16 (possibly already dithered) converted to flt, so
* let's use a "low-risk" dither algorithm (standard triangular).
*/
return AV_RESAMPLE_DITHER_TRIANGULAR;
}
int hb_audio_dither_is_supported(uint32_t codec)
{
// encoder's input sample format must be s16(p)
switch (codec)
{
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FDK_AAC:
case HB_ACODEC_FDK_HAAC:
return 1;
default:
return 0;
}
}
int hb_audio_dither_get_from_name(const char *name)
{
if (name == NULL || *name == '\0')
goto fail;
int i;
for ( i = 0; i < hb_audio_dithers_count; i++)
{
if (!strcasecmp(hb_audio_dithers[i].item.short_name, name) ||
!strcasecmp(hb_audio_dithers[i].item.description, name))
{
return hb_audio_dithers[i].item.method;
}
}
fail:
return hb_audio_dither_get_default();
}
const char* hb_audio_dither_get_description(int method)
{
if (method < hb_audio_dithers_first_item->method ||
method > hb_audio_dithers_last_item ->method)
goto fail;
const hb_dither_t *audio_dither = NULL;
while ((audio_dither = hb_audio_dither_get_next(audio_dither)) != NULL)
{
if (audio_dither->method == method)
{
return audio_dither->description;
}
}
fail:
return NULL;
}
const hb_dither_t* hb_audio_dither_get_next(const hb_dither_t *last)
{
if (last == NULL)
{
return hb_audio_dithers_first_item;
}
return ((hb_dither_internal_t*)last)->next;
}
int hb_mixdown_is_supported(int mixdown, uint32_t codec, uint64_t layout)
{
return (hb_mixdown_has_codec_support(mixdown, codec) &&
hb_mixdown_has_remix_support(mixdown, layout));
}
int hb_mixdown_has_codec_support(int mixdown, uint32_t codec)
{
// Passthru, only "None" mixdown is supported
if (codec & HB_ACODEC_PASS_FLAG)
return (mixdown == HB_AMIXDOWN_NONE);
// Not passthru, "None" mixdown never supported
if (mixdown == HB_AMIXDOWN_NONE)
return 0;
switch (codec)
{
case HB_ACODEC_VORBIS:
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
return (mixdown <= HB_AMIXDOWN_7POINT1);
case HB_ACODEC_LAME:
return (mixdown <= HB_AMIXDOWN_DOLBYPLII);
case HB_ACODEC_CA_AAC:
case HB_ACODEC_CA_HAAC:
return ((mixdown <= HB_AMIXDOWN_5POINT1) ||
(mixdown == HB_AMIXDOWN_5_2_LFE));
default:
return (mixdown <= HB_AMIXDOWN_5POINT1);
}
}
int hb_mixdown_has_remix_support(int mixdown, uint64_t layout)
{
switch (mixdown)
{
// stereo + front left/right of center
case HB_AMIXDOWN_5_2_LFE:
return ((layout & AV_CH_FRONT_LEFT_OF_CENTER) &&
(layout & AV_CH_FRONT_RIGHT_OF_CENTER) &&
(layout & AV_CH_LAYOUT_STEREO) == AV_CH_LAYOUT_STEREO);
// 7.0 or better
case HB_AMIXDOWN_7POINT1:
return ((layout & AV_CH_LAYOUT_7POINT0) == AV_CH_LAYOUT_7POINT0);
// 6.0 or better
case HB_AMIXDOWN_6POINT1:
return ((layout & AV_CH_LAYOUT_7POINT0) == AV_CH_LAYOUT_7POINT0 ||
(layout & AV_CH_LAYOUT_6POINT0) == AV_CH_LAYOUT_6POINT0 ||
(layout & AV_CH_LAYOUT_HEXAGONAL) == AV_CH_LAYOUT_HEXAGONAL);
// stereo + either of front center, side or back left/right, back center
case HB_AMIXDOWN_5POINT1:
return ((layout & AV_CH_LAYOUT_2_1) == AV_CH_LAYOUT_2_1 ||
(layout & AV_CH_LAYOUT_2_2) == AV_CH_LAYOUT_2_2 ||
(layout & AV_CH_LAYOUT_QUAD) == AV_CH_LAYOUT_QUAD ||
(layout & AV_CH_LAYOUT_SURROUND) == AV_CH_LAYOUT_SURROUND);
// stereo + either of side or back left/right, back center
// also, allow Dolby Surrounbd output if the input is already Dolby
case HB_AMIXDOWN_DOLBY:
case HB_AMIXDOWN_DOLBYPLII:
return ((layout & AV_CH_LAYOUT_2_1) == AV_CH_LAYOUT_2_1 ||
(layout & AV_CH_LAYOUT_2_2) == AV_CH_LAYOUT_2_2 ||
(layout & AV_CH_LAYOUT_QUAD) == AV_CH_LAYOUT_QUAD ||
(layout == AV_CH_LAYOUT_STEREO_DOWNMIX &&
mixdown == HB_AMIXDOWN_DOLBY));
// more than 1 channel
case HB_AMIXDOWN_STEREO:
return (av_get_channel_layout_nb_channels(layout) > 1);
// regular stereo (not Dolby)
case HB_AMIXDOWN_LEFT:
case HB_AMIXDOWN_RIGHT:
return (layout == AV_CH_LAYOUT_STEREO);
// mono remix always supported
// HB_AMIXDOWN_NONE always supported (for Passthru)
case HB_AMIXDOWN_MONO:
case HB_AMIXDOWN_NONE:
return 1;
// unknown mixdown, should never happen
default:
return 0;
}
}
int hb_mixdown_get_discrete_channel_count(int amixdown)
{
switch (amixdown)
{
case HB_AMIXDOWN_5_2_LFE:
case HB_AMIXDOWN_7POINT1:
return 8;
case HB_AMIXDOWN_6POINT1:
return 7;
case HB_AMIXDOWN_5POINT1:
return 6;
case HB_AMIXDOWN_MONO:
case HB_AMIXDOWN_LEFT:
case HB_AMIXDOWN_RIGHT:
return 1;
case HB_AMIXDOWN_NONE:
return 0;
default:
return 2;
}
}
int hb_mixdown_get_low_freq_channel_count(int amixdown)
{
switch (amixdown)
{
case HB_AMIXDOWN_5POINT1:
case HB_AMIXDOWN_6POINT1:
case HB_AMIXDOWN_7POINT1:
case HB_AMIXDOWN_5_2_LFE:
return 1;
default:
return 0;
}
}
int hb_mixdown_get_best(uint32_t codec, uint64_t layout, int mixdown)
{
// Passthru, only "None" mixdown is supported
if (codec & HB_ACODEC_PASS_FLAG)
return HB_AMIXDOWN_NONE;
int best_mixdown = HB_INVALID_AMIXDOWN;
const hb_mixdown_t *audio_mixdown = hb_mixdown_get_next(NULL);
// test all non-None mixdowns while the value is <= the requested mixdown
// HB_INVALID_AMIXDOWN means the highest supported mixdown was requested
while ((audio_mixdown = hb_mixdown_get_next(audio_mixdown)) != NULL)
{
if ((mixdown == HB_INVALID_AMIXDOWN || audio_mixdown->amixdown <= mixdown) &&
(hb_mixdown_is_supported(audio_mixdown->amixdown, codec, layout)))
{
best_mixdown = audio_mixdown->amixdown;
}
}
return best_mixdown;
}
int hb_mixdown_get_default(uint32_t codec, uint64_t layout)
{
int mixdown;
switch (codec)
{
// the FLAC encoder defaults to the best mixdown up to 7.1
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
mixdown = HB_AMIXDOWN_7POINT1;
break;
// the AC3 encoder defaults to the best mixdown up to 5.1
case HB_ACODEC_AC3:
mixdown = HB_AMIXDOWN_5POINT1;
break;
// other encoders default to the best mixdown up to DPLII
default:
mixdown = HB_AMIXDOWN_DOLBYPLII;
break;
}
// return the best available mixdown up to the selected default
return hb_mixdown_get_best(codec, layout, mixdown);
}
int hb_mixdown_get_from_name(const char *name)
{
if (name == NULL || *name == '\0')
goto fail;
int i;
for (i = 0; i < hb_audio_mixdowns_count; i++)
{
if (!strcasecmp(hb_audio_mixdowns[i].item.name, name) ||
!strcasecmp(hb_audio_mixdowns[i].item.short_name, name))
{
return hb_audio_mixdowns[i].item.amixdown;
}
}
fail:
return HB_INVALID_AMIXDOWN;
}
const char* hb_mixdown_get_name(int mixdown)
{
if (mixdown < hb_audio_mixdowns_first_item->amixdown ||
mixdown > hb_audio_mixdowns_last_item ->amixdown)
goto fail;
const hb_mixdown_t *audio_mixdown = NULL;
while ((audio_mixdown = hb_mixdown_get_next(audio_mixdown)) != NULL)
{
if (audio_mixdown->amixdown == mixdown)
{
return audio_mixdown->name;
}
}
fail:
return NULL;
}
const char* hb_mixdown_get_short_name(int mixdown)
{
if (mixdown < hb_audio_mixdowns_first_item->amixdown ||
mixdown > hb_audio_mixdowns_last_item ->amixdown)
goto fail;
const hb_mixdown_t *audio_mixdown = NULL;
while ((audio_mixdown = hb_mixdown_get_next(audio_mixdown)) != NULL)
{
if (audio_mixdown->amixdown == mixdown)
{
return audio_mixdown->short_name;
}
}
fail:
return NULL;
}
const char* hb_mixdown_sanitize_name(const char *name)
{
return hb_mixdown_get_name(hb_mixdown_get_from_name(name));
}
const hb_mixdown_t* hb_mixdown_get_next(const hb_mixdown_t *last)
{
if (last == NULL)
{
return hb_audio_mixdowns_first_item;
}
return ((hb_mixdown_internal_t*)last)->next;
}
int hb_video_encoder_get_default(int muxer)
{
if (!(muxer & HB_MUX_MASK))
goto fail;
const hb_encoder_t *video_encoder = NULL;
while ((video_encoder = hb_video_encoder_get_next(video_encoder)) != NULL)
{
if (video_encoder->muxers & muxer)
{
return video_encoder->codec;
}
}
fail:
return 0;
}
int hb_video_encoder_get_from_name(const char *name)
{
if (name == NULL || *name == '\0')
goto fail;
int i;
for (i = 0; i < hb_video_encoders_count; i++)
{
if (!strcasecmp(hb_video_encoders[i].item.name, name) ||
!strcasecmp(hb_video_encoders[i].item.short_name, name))
{
return hb_video_encoders[i].item.codec;
}
}
fail:
return 0;
}
const char* hb_video_encoder_get_name(int encoder)
{
if (!(encoder & HB_VCODEC_MASK))
goto fail;
const hb_encoder_t *video_encoder = NULL;
while ((video_encoder = hb_video_encoder_get_next(video_encoder)) != NULL)
{
if (video_encoder->codec == encoder)
{
return video_encoder->name;
}
}
fail:
return NULL;
}
const char* hb_video_encoder_get_short_name(int encoder)
{
if (!(encoder & HB_VCODEC_MASK))
goto fail;
const hb_encoder_t *video_encoder = NULL;
while ((video_encoder = hb_video_encoder_get_next(video_encoder)) != NULL)
{
if (video_encoder->codec == encoder)
{
return video_encoder->short_name;
}
}
fail:
return NULL;
}
const char* hb_video_encoder_get_long_name(int encoder)
{
if (!(encoder & HB_VCODEC_MASK))
goto fail;
const hb_encoder_t *video_encoder = NULL;
while ((video_encoder = hb_video_encoder_get_next(video_encoder)) != NULL)
{
if (video_encoder->codec == encoder)
{
return video_encoder->long_name;
}
}
fail:
return NULL;
}
const char* hb_video_encoder_sanitize_name(const char *name)
{
return hb_video_encoder_get_name(hb_video_encoder_get_from_name(name));
}
const hb_encoder_t* hb_video_encoder_get_next(const hb_encoder_t *last)
{
if (last == NULL)
{
return hb_video_encoders_first_item;
}
return ((hb_encoder_internal_t*)last)->next;
}
// for a valid passthru, return the matching encoder for that codec (if any),
// else return -1 (i.e. drop the track)
int hb_audio_encoder_get_fallback_for_passthru(int passthru)
{
int gid;
const hb_encoder_t *audio_encoder = NULL;
switch (passthru)
{
case HB_ACODEC_AAC_PASS:
gid = HB_GID_ACODEC_AAC;
break;
case HB_ACODEC_AC3_PASS:
gid = HB_GID_ACODEC_AC3;
break;
case HB_ACODEC_MP3_PASS:
gid = HB_GID_ACODEC_MP3;
break;
default:
gid = HB_GID_NONE; // will never match an enabled encoder
break;
}
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
if (((hb_encoder_internal_t*)audio_encoder)->gid == gid)
{
return audio_encoder->codec;
}
}
// passthru tracks are often the second audio from the same source track
// if we don't have an encoder matching the passthru codec, return 0
// dropping the track, as well as ensuring that there is at least one
// audio track in the output is then up to the UIs
return 0;
}
int hb_audio_encoder_get_default(int muxer)
{
if (!(muxer & HB_MUX_MASK))
goto fail;
int codec = 0;
const hb_encoder_t *audio_encoder = NULL;
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
// default encoder should not be passthru
if ((audio_encoder->muxers & muxer) &&
(audio_encoder->codec & HB_ACODEC_PASS_FLAG) == 0)
{
codec = audio_encoder->codec;
break;
}
}
// Lame is better than our low-end AAC encoders
// if the container is MKV, use the former
// AAC is still used when the container is MP4 (for better compatibility)
if (codec == HB_ACODEC_FFAAC && (muxer & HB_MUX_MASK_MKV) == muxer)
{
return HB_ACODEC_LAME;
}
else
{
return codec;
}
fail:
return 0;
}
int hb_audio_encoder_get_from_name(const char *name)
{
if (name == NULL || *name == '\0')
goto fail;
int i;
for (i = 0; i < hb_audio_encoders_count; i++)
{
if (!strcasecmp(hb_audio_encoders[i].item.name, name) ||
!strcasecmp(hb_audio_encoders[i].item.short_name, name))
{
return hb_audio_encoders[i].item.codec;
}
}
fail:
return 0;
}
const char* hb_audio_encoder_get_name(int encoder)
{
if (!(encoder & HB_ACODEC_ANY))
goto fail;
const hb_encoder_t *audio_encoder = NULL;
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
if (audio_encoder->codec == encoder)
{
return audio_encoder->name;
}
}
fail:
return NULL;
}
const char* hb_audio_encoder_get_short_name(int encoder)
{
if (!(encoder & HB_ACODEC_ANY))
goto fail;
const hb_encoder_t *audio_encoder = NULL;
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
if (audio_encoder->codec == encoder)
{
return audio_encoder->short_name;
}
}
fail:
return NULL;
}
const char* hb_audio_encoder_get_long_name(int encoder)
{
if (!(encoder & HB_ACODEC_ANY))
goto fail;
const hb_encoder_t *audio_encoder = NULL;
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
if (audio_encoder->codec == encoder)
{
return audio_encoder->long_name;
}
}
fail:
return NULL;
}
const char* hb_audio_encoder_sanitize_name(const char *name)
{
return hb_audio_encoder_get_name(hb_audio_encoder_get_from_name(name));
}
const hb_encoder_t* hb_audio_encoder_get_next(const hb_encoder_t *last)
{
if (last == NULL)
{
return hb_audio_encoders_first_item;
}
return ((hb_encoder_internal_t*)last)->next;
}
void hb_autopassthru_apply_settings(hb_job_t *job)
{
hb_audio_t *audio;
int i, already_printed;
for (i = already_printed = 0; i < hb_list_count(job->list_audio);)
{
audio = hb_list_item(job->list_audio, i);
if (audio->config.out.codec == HB_ACODEC_AUTO_PASS)
{
if (!already_printed)
hb_autopassthru_print_settings(job);
already_printed = 1;
audio->config.out.codec = hb_autopassthru_get_encoder(audio->config.in.codec,
job->acodec_copy_mask,
job->acodec_fallback,
job->mux);
if (!(audio->config.out.codec & HB_ACODEC_PASS_FLAG) &&
!(audio->config.out.codec & HB_ACODEC_MASK))
{
hb_log("Auto Passthru: passthru not possible and no valid fallback specified, dropping track %d",
audio->config.out.track );
hb_list_rem(job->list_audio, audio);
hb_audio_close(&audio);
continue;
}
if (!(audio->config.out.codec & HB_ACODEC_PASS_FLAG))
{
if (audio->config.out.codec == job->acodec_fallback)
{
hb_log("Auto Passthru: passthru not possible for track %d, using fallback",
audio->config.out.track);
}
else
{
hb_log("Auto Passthru: passthru and fallback not possible for track %d, using default encoder",
audio->config.out.track);
}
if (audio->config.out.mixdown <= 0)
{
audio->config.out.mixdown =
hb_mixdown_get_default(audio->config.out.codec,
audio->config.in.channel_layout);
}
else
{
audio->config.out.mixdown =
hb_mixdown_get_best(audio->config.out.codec,
audio->config.in.channel_layout,
audio->config.out.mixdown);
}
if (audio->config.out.samplerate <= 0)
audio->config.out.samplerate = audio->config.in.samplerate;
audio->config.out.samplerate =
hb_audio_samplerate_get_best(audio->config.out.codec,
audio->config.out.samplerate,
NULL);
int quality_not_allowed =
hb_audio_quality_get_default(audio->config.out.codec)
== HB_INVALID_AUDIO_QUALITY;
if (audio->config.out.bitrate > 0)
{
// Use best bitrate
audio->config.out.bitrate =
hb_audio_bitrate_get_best(audio->config.out.codec,
audio->config.out.bitrate,
audio->config.out.samplerate,
audio->config.out.mixdown);
}
else if (quality_not_allowed ||
audio->config.out.quality != HB_INVALID_AUDIO_QUALITY)
{
// Use default bitrate
audio->config.out.bitrate =
hb_audio_bitrate_get_default(audio->config.out.codec,
audio->config.out.samplerate,
audio->config.out.mixdown);
}
else
{
// Use best quality
audio->config.out.quality =
hb_audio_quality_get_best(audio->config.out.codec,
audio->config.out.quality);
}
if (audio->config.out.compression_level < 0)
{
audio->config.out.compression_level =
hb_audio_compression_get_default(
audio->config.out.codec);
}
else
{
audio->config.out.compression_level =
hb_audio_compression_get_best(audio->config.out.codec,
audio->config.out.compression_level);
}
}
else
{
const hb_encoder_t *audio_encoder = NULL;
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
if (audio_encoder->codec == audio->config.out.codec)
{
hb_log("Auto Passthru: using %s for track %d",
audio_encoder->name,
audio->config.out.track);
break;
}
}
}
}
/* Adjust output track number, in case we removed one.
* Output tracks sadly still need to be in sequential order.
* Note: out.track starts at 1, i starts at 0 */
audio->config.out.track = ++i;
}
}
void hb_autopassthru_print_settings(hb_job_t *job)
{
char *mask = NULL, *tmp;
const char *fallback = NULL;
const hb_encoder_t *audio_encoder = NULL;
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
if ((audio_encoder->codec & HB_ACODEC_PASS_FLAG) &&
(audio_encoder->codec != HB_ACODEC_AUTO_PASS) &&
(audio_encoder->codec & job->acodec_copy_mask))
{
if (mask != NULL)
{
tmp = hb_strncat_dup(mask, ", ", 2);
if (tmp != NULL)
{
free(mask);
mask = tmp;
}
}
// passthru name without " Passthru"
tmp = hb_strncat_dup(mask, audio_encoder->name,
strlen(audio_encoder->name) - 9);
if (tmp != NULL)
{
free(mask);
mask = tmp;
}
}
else if ((audio_encoder->codec & HB_ACODEC_PASS_FLAG) == 0 &&
(audio_encoder->codec == job->acodec_fallback))
{
fallback = audio_encoder->name;
}
}
if (mask == NULL)
hb_log("Auto Passthru: no codecs allowed");
else
hb_log("Auto Passthru: allowed codecs are %s", mask);
if (fallback == NULL)
hb_log("Auto Passthru: no valid fallback specified");
else
hb_log("Auto Passthru: fallback is %s", fallback);
}
int hb_autopassthru_get_encoder(int in_codec, int copy_mask, int fallback,
int muxer)
{
int i = 0;
const hb_encoder_t *audio_encoder = NULL;
int out_codec = (copy_mask & in_codec) | HB_ACODEC_PASS_FLAG;
// sanitize fallback encoder and selected passthru
// note: invalid fallbacks are caught in hb_autopassthru_apply_settings
while ((audio_encoder = hb_audio_encoder_get_next(audio_encoder)) != NULL)
{
if (audio_encoder->codec == out_codec)
{
i++;
if (!(audio_encoder->muxers & muxer))
out_codec = 0;
}
else if (audio_encoder->codec == fallback)
{
i++;
if (!(audio_encoder->muxers & muxer))
fallback = hb_audio_encoder_get_default(muxer);
}
if (i > 1)
{
break;
}
}
return (out_codec & HB_ACODEC_PASS_MASK) ? out_codec : fallback;
}
int hb_container_get_from_name(const char *name)
{
if (name == NULL || *name == '\0')
goto fail;
int i;
for (i = 0; i < hb_containers_count; i++)
{
if (!strcasecmp(hb_containers[i].item.name, name) ||
!strcasecmp(hb_containers[i].item.short_name, name))
{
return hb_containers[i].item.format;
}
}
fail:
return 0;
}
int hb_container_get_from_extension(const char *extension)
{
if (extension == NULL || *extension == '\0')
goto fail;
int i;
for (i = 0; i < hb_containers_count; i++)
{
if (!strcasecmp(hb_containers[i].item.default_extension, extension))
{
return hb_containers[i].item.format;
}
}
fail:
return 0;
}
const char* hb_container_get_name(int format)
{
if (!(format & HB_MUX_MASK))
goto fail;
const hb_container_t *container = NULL;
while ((container = hb_container_get_next(container)) != NULL)
{
if (container->format == format)
{
return container->name;
}
}
fail:
return NULL;
}
const char* hb_container_get_short_name(int format)
{
if (!(format & HB_MUX_MASK))
goto fail;
const hb_container_t *container = NULL;
while ((container = hb_container_get_next(container)) != NULL)
{
if (container->format == format)
{
return container->short_name;
}
}
fail:
return NULL;
}
const char* hb_container_get_long_name(int format)
{
if (!(format & HB_MUX_MASK))
goto fail;
const hb_container_t *container = NULL;
while ((container = hb_container_get_next(container)) != NULL)
{
if (container->format == format)
{
return container->long_name;
}
}
fail:
return NULL;
}
const char* hb_container_get_default_extension(int format)
{
if (!(format & HB_MUX_MASK))
goto fail;
const hb_container_t *container = NULL;
while ((container = hb_container_get_next(container)) != NULL)
{
if (container->format == format)
{
return container->default_extension;
}
}
fail:
return NULL;
}
const char* hb_container_sanitize_name(const char *name)
{
return hb_container_get_name(hb_container_get_from_name(name));
}
const hb_container_t* hb_container_get_next(const hb_container_t *last)
{
if (last == NULL)
{
return hb_containers_first_item;
}
return ((hb_container_internal_t*)last)->next;
}
/**********************************************************************
* hb_reduce
**********************************************************************
* Given a numerator (num) and a denominator (den), reduce them to an
* equivalent fraction and store the result in x and y.
*********************************************************************/
void hb_reduce( int *x, int *y, int num, int den )
{
// find the greatest common divisor of num & den by Euclid's algorithm
int n = num, d = den;
while ( d )
{
int t = d;
d = n % d;
n = t;
}
// at this point n is the gcd. if it's non-zero remove it from num
// and den. Otherwise just return the original values.
if ( n )
{
*x = num / n;
*y = den / n;
}
else
{
*x = num;
*y = den;
}
}
/**********************************************************************
* hb_reduce64
**********************************************************************
* Given a numerator (num) and a denominator (den), reduce them to an
* equivalent fraction and store the result in x and y.
*********************************************************************/
void hb_reduce64( int64_t *x, int64_t *y, int64_t num, int64_t den )
{
// find the greatest common divisor of num & den by Euclid's algorithm
int64_t n = num, d = den;
while ( d )
{
int64_t t = d;
d = n % d;
n = t;
}
// at this point n is the gcd. if it's non-zero remove it from num
// and den. Otherwise just return the original values.
if ( n )
{
num /= n;
den /= n;
}
*x = num;
*y = den;
}
void hb_limit_rational64( int64_t *x, int64_t *y, int64_t num, int64_t den, int64_t limit )
{
hb_reduce64( &num, &den, num, den );
if ( num < limit && den < limit )
{
*x = num;
*y = den;
return;
}
if ( num > den )
{
double div = (double)limit / num;
num = limit;
den *= div;
}
else
{
double div = (double)limit / den;
den = limit;
num *= div;
}
*x = num;
*y = den;
}
/**********************************************************************
* hb_list implementation
**********************************************************************
* Basic and slow, but enough for what we need
*********************************************************************/
#define HB_LIST_DEFAULT_SIZE 20
struct hb_list_s
{
/* Pointers to items in the list */
void ** items;
/* How many (void *) allocated in 'items' */
int items_alloc;
/* How many valid pointers in 'items' */
int items_count;
};
/**********************************************************************
* hb_list_init
**********************************************************************
* Allocates an empty list ready for HB_LIST_DEFAULT_SIZE items
*********************************************************************/
hb_list_t * hb_list_init()
{
hb_list_t * l;
l = calloc( sizeof( hb_list_t ), 1 );
l->items = calloc( HB_LIST_DEFAULT_SIZE * sizeof( void * ), 1 );
l->items_alloc = HB_LIST_DEFAULT_SIZE;
return l;
}
/**********************************************************************
* hb_list_count
**********************************************************************
* Returns the number of items currently in the list
*********************************************************************/
int hb_list_count( const hb_list_t * l )
{
return l->items_count;
}
/**********************************************************************
* hb_list_add
**********************************************************************
* Adds an item at the end of the list, making it bigger if necessary.
* Can safely be called with a NULL pointer to add, it will be ignored.
*********************************************************************/
void hb_list_add( hb_list_t * l, void * p )
{
if( !p )
{
return;
}
if( l->items_count == l->items_alloc )
{
/* We need a bigger boat */
l->items_alloc += HB_LIST_DEFAULT_SIZE;
l->items = realloc( l->items,
l->items_alloc * sizeof( void * ) );
}
l->items[l->items_count] = p;
(l->items_count)++;
}
/**********************************************************************
* hb_list_insert
**********************************************************************
* Adds an item at the specifiec position in the list, making it bigger
* if necessary.
* Can safely be called with a NULL pointer to add, it will be ignored.
*********************************************************************/
void hb_list_insert( hb_list_t * l, int pos, void * p )
{
if( !p )
{
return;
}
if( l->items_count == l->items_alloc )
{
/* We need a bigger boat */
l->items_alloc += HB_LIST_DEFAULT_SIZE;
l->items = realloc( l->items,
l->items_alloc * sizeof( void * ) );
}
if ( l->items_count != pos )
{
/* Shift all items after it sizeof( void * ) bytes earlier */
memmove( &l->items[pos+1], &l->items[pos],
( l->items_count - pos ) * sizeof( void * ) );
}
l->items[pos] = p;
(l->items_count)++;
}
/**********************************************************************
* hb_list_rem
**********************************************************************
* Remove an item from the list. Bad things will happen if called
* with a NULL pointer or if the item is not in the list.
*********************************************************************/
void hb_list_rem( hb_list_t * l, void * p )
{
int i;
/* Find the item in the list */
for( i = 0; i < l->items_count; i++ )
{
if( l->items[i] == p )
{
/* Shift all items after it sizeof( void * ) bytes earlier */
memmove( &l->items[i], &l->items[i+1],
( l->items_count - i - 1 ) * sizeof( void * ) );
(l->items_count)--;
break;
}
}
}
/**********************************************************************
* hb_list_item
**********************************************************************
* Returns item at position i, or NULL if there are not that many
* items in the list
*********************************************************************/
void * hb_list_item( const hb_list_t * l, int i )
{
if( i < 0 || i >= l->items_count )
{
return NULL;
}
return l->items[i];
}
/**********************************************************************
* hb_list_bytes
**********************************************************************
* Assuming all items are of type hb_buffer_t, returns the total
* number of bytes in the list
*********************************************************************/
int hb_list_bytes( hb_list_t * l )
{
hb_buffer_t * buf;
int ret;
int i;
ret = 0;
for( i = 0; i < hb_list_count( l ); i++ )
{
buf = hb_list_item( l, i );
ret += buf->size - buf->offset;
}
return ret;
}
/**********************************************************************
* hb_list_seebytes
**********************************************************************
* Assuming all items are of type hb_buffer_t, copy bytes from
* the list to , keeping the list unmodified.
*********************************************************************/
void hb_list_seebytes( hb_list_t * l, uint8_t * dst, int size )
{
hb_buffer_t * buf;
int copied;
int copying;
int i;
for( i = 0, copied = 0; copied < size; i++ )
{
buf = hb_list_item( l, i );
copying = MIN( buf->size - buf->offset, size - copied );
memcpy( &dst[copied], &buf->data[buf->offset], copying );
copied += copying;
}
}
/**********************************************************************
* hb_list_getbytes
**********************************************************************
* Assuming all items are of type hb_buffer_t, copy bytes from
* the list to . What's copied is removed from the list.
* The variable pointed by is set to the PTS of the buffer the
* first byte has been got from.
* The variable pointed by is set to the position of that byte
* in that buffer.
*********************************************************************/
void hb_list_getbytes( hb_list_t * l, uint8_t * dst, int size,
uint64_t * pts, uint64_t * pos )
{
hb_buffer_t * buf;
int copied;
int copying;
uint8_t has_pts;
/* So we won't have to deal with NULL pointers */
uint64_t dummy1, dummy2;
if( !pts ) pts = &dummy1;
if( !pos ) pos = &dummy2;
for( copied = 0, has_pts = 0; copied < size; )
{
buf = hb_list_item( l, 0 );
copying = MIN( buf->size - buf->offset, size - copied );
memcpy( &dst[copied], &buf->data[buf->offset], copying );
if( !has_pts )
{
*pts = buf->s.start;
*pos = buf->offset;
has_pts = 1;
}
buf->offset += copying;
if( buf->offset >= buf->size )
{
hb_list_rem( l, buf );
hb_buffer_close( &buf );
}
copied += copying;
}
}
/**********************************************************************
* hb_list_empty
**********************************************************************
* Assuming all items are of type hb_buffer_t, close them all and
* close the list.
*********************************************************************/
void hb_list_empty( hb_list_t ** _l )
{
hb_list_t * l = *_l;
hb_buffer_t * b;
while( ( b = hb_list_item( l, 0 ) ) )
{
hb_list_rem( l, b );
hb_buffer_close( &b );
}
hb_list_close( _l );
}
/**********************************************************************
* hb_list_close
**********************************************************************
* Free memory allocated by hb_list_init. Does NOT free contents of
* items still in the list.
*********************************************************************/
void hb_list_close( hb_list_t ** _l )
{
hb_list_t * l = *_l;
free( l->items );
free( l );
*_l = NULL;
}
int global_verbosity_level; //Necessary for hb_deep_log
/**********************************************************************
* hb_valog
**********************************************************************
* If verbose mode is one, print message with timestamp. Messages
* longer than 180 characters are stripped ;p
*********************************************************************/
void hb_valog( hb_debug_level_t level, const char * prefix, const char * log, va_list args)
{
char string[362]; /* 360 chars + \n + \0 */
time_t _now;
struct tm * now;
if( !getenv( "HB_DEBUG" ) )
{
/* We don't want to print it */
return;
}
if( global_verbosity_level < level )
{
/* Hiding message */
return;
}
/* Get the time */
_now = time( NULL );
now = localtime( &_now );
if ( prefix && *prefix )
{
// limit the prefix length
snprintf( string, 40, "[%02d:%02d:%02d] %s ",
now->tm_hour, now->tm_min, now->tm_sec, prefix );
}
else
{
sprintf( string, "[%02d:%02d:%02d] ",
now->tm_hour, now->tm_min, now->tm_sec );
}
int end = strlen( string );
/* Convert the message to a string */
vsnprintf( string + end, 361 - end, log, args );
/* Add the end of line */
strcat( string, "\n" );
#ifdef SYS_MINGW
wchar_t wstring[2*362]; /* 360 chars + \n + \0 */
// Convert internal utf8 to "console output code page".
//
// This is just bizarre windows behavior. You would expect that
// printf would automatically convert a wide character string to
// the current "console output code page" when using the "%ls" format
// specifier. But it doesn't... so we must do it.
if (!MultiByteToWideChar(CP_UTF8, 0, string, -1, wstring, sizeof(wstring)))
return;
if (!WideCharToMultiByte(GetConsoleOutputCP(), 0, wstring, -1, string, sizeof(string), NULL, NULL))
return;
#endif
/* Print it */
fprintf( stderr, "%s", string );
}
/**********************************************************************
* hb_log
**********************************************************************
* If verbose mode is one, print message with timestamp. Messages
* longer than 180 characters are stripped ;p
*********************************************************************/
void hb_log( char * log, ... )
{
va_list args;
va_start( args, log );
hb_valog( 0, NULL, log, args );
va_end( args );
}
/**********************************************************************
* hb_deep_log
**********************************************************************
* If verbose mode is >= level, print message with timestamp. Messages
* longer than 360 characters are stripped ;p
*********************************************************************/
void hb_deep_log( hb_debug_level_t level, char * log, ... )
{
va_list args;
va_start( args, log );
hb_valog( level, NULL, log, args );
va_end( args );
}
/**********************************************************************
* hb_error
**********************************************************************
* Using whatever output is available display this error.
*********************************************************************/
void hb_error( char * log, ... )
{
char string[181]; /* 180 chars + \0 */
char rep_string[181];
static char last_string[181];
static int last_error_count = 0;
static uint64_t last_series_error_time = 0;
static hb_lock_t *mutex = 0;
va_list args;
uint64_t time_now;
/* Convert the message to a string */
va_start( args, log );
vsnprintf( string, 180, log, args );
va_end( args );
if( !mutex )
{
mutex = hb_lock_init();
}
hb_lock( mutex );
time_now = hb_get_date();
if( strcmp( string, last_string) == 0 )
{
/*
* The last error and this one are the same, don't log it
* just count it instead, unless it was more than one second
* ago.
*/
last_error_count++;
if( last_series_error_time + ( 1000 * 1 ) > time_now )
{
hb_unlock( mutex );
return;
}
}
/*
* A new error, or the same one more than 10sec since the last one
* did we have any of the same counted up?
*/
if( last_error_count > 0 )
{
/*
* Print out the last error to ensure context for the last
* repeated message.
*/
if( error_handler )
{
error_handler( last_string );
} else {
hb_log( "%s", last_string );
}
if( last_error_count > 1 )
{
/*
* Only print out the repeat message for more than 2 of the
* same, since we just printed out two of them already.
*/
snprintf( rep_string, 180, "Last error repeated %d times",
last_error_count - 1 );
if( error_handler )
{
error_handler( rep_string );
} else {
hb_log( "%s", rep_string );
}
}
last_error_count = 0;
}
last_series_error_time = time_now;
strcpy( last_string, string );
/*
* Got the error in a single string, send it off to be dispatched.
*/
if( error_handler )
{
error_handler( string );
} else {
hb_log( "%s", string );
}
hb_unlock( mutex );
}
void hb_register_error_handler( hb_error_handler_t * handler )
{
error_handler = handler;
}
static void hb_update_str( char **dst, const char *src )
{
if ( dst )
{
free( *dst );
*dst = NULL;
if ( src )
{
*dst = strdup( src );
}
}
}
/**********************************************************************
* hb_title_init
**********************************************************************
*
*********************************************************************/
hb_title_t * hb_title_init( char * path, int index )
{
hb_title_t * t;
t = calloc( sizeof( hb_title_t ), 1 );
t->index = index;
t->playlist = -1;
t->list_audio = hb_list_init();
t->list_chapter = hb_list_init();
t->list_subtitle = hb_list_init();
t->list_attachment = hb_list_init();
t->metadata = hb_metadata_init();
strcat( t->path, path );
// default to decoding mpeg2
t->video_id = 0xE0;
t->video_codec = WORK_DECAVCODECV;
t->video_codec_param = AV_CODEC_ID_MPEG2VIDEO;
t->angle_count = 1;
t->pixel_aspect_width = 1;
t->pixel_aspect_height = 1;
return t;
}
/**********************************************************************
* hb_title_close
**********************************************************************
*
*********************************************************************/
void hb_title_close( hb_title_t ** _t )
{
hb_title_t * t = *_t;
hb_audio_t * audio;
hb_chapter_t * chapter;
hb_subtitle_t * subtitle;
hb_attachment_t * attachment;
while( ( chapter = hb_list_item( t->list_chapter, 0 ) ) )
{
hb_list_rem( t->list_chapter, chapter );
hb_chapter_close( &chapter );
}
hb_list_close( &t->list_chapter );
while( ( audio = hb_list_item( t->list_audio, 0 ) ) )
{
hb_list_rem( t->list_audio, audio );
hb_audio_close( &audio );
}
hb_list_close( &t->list_audio );
while( ( subtitle = hb_list_item( t->list_subtitle, 0 ) ) )
{
hb_list_rem( t->list_subtitle, subtitle );
hb_subtitle_close( &subtitle );
}
hb_list_close( &t->list_subtitle );
while( ( attachment = hb_list_item( t->list_attachment, 0 ) ) )
{
hb_list_rem( t->list_attachment, attachment );
hb_attachment_close( &attachment );
}
hb_list_close( &t->list_attachment );
hb_metadata_close( &t->metadata );
free( t->video_codec_name );
free(t->container_name);
#if defined(HB_TITLE_JOBS)
hb_job_close( &t->job );
#endif
free( t );
*_t = NULL;
}
// The mac ui expects certain fields of the job struct to be cleaned up
// and others to remain untouched.
// e.g. picture settings like cropping, width, height, should remain untouched.
//
// So only initialize job elements that we know get set up by prepareJob and
// prepareJobForPreview.
//
// This should all get resolved in some future mac ui refactoring.
static void job_reset_for_mac_ui( hb_job_t * job, hb_title_t * title )
{
if ( job == NULL || title == NULL )
return;
job->title = title;
/* Set defaults settings */
job->chapter_start = 1;
job->chapter_end = hb_list_count( title->list_chapter );
job->list_chapter = hb_chapter_list_copy( title->list_chapter );
job->vcodec = HB_VCODEC_FFMPEG_MPEG4;
job->vquality = -1.0;
job->vbitrate = 1000;
job->pass = 0;
job->vrate = title->rate;
job->vrate_base = title->rate_base;
job->list_audio = hb_list_init();
job->list_subtitle = hb_list_init();
job->list_filter = hb_list_init();
job->list_attachment = hb_attachment_list_copy( title->list_attachment );
job->metadata = hb_metadata_copy( title->metadata );
}
static void job_setup( hb_job_t * job, hb_title_t * title )
{
if ( job == NULL || title == NULL )
return;
job->title = title;
/* Set defaults settings */
job->chapter_start = 1;
job->chapter_end = hb_list_count( title->list_chapter );
job->list_chapter = hb_chapter_list_copy( title->list_chapter );
/* Autocrop by default. Gnark gnark */
memcpy( job->crop, title->crop, 4 * sizeof( int ) );
/* Preserve a source's pixel aspect, if it's available. */
if( title->pixel_aspect_width && title->pixel_aspect_height )
{
job->anamorphic.par_width = title->pixel_aspect_width;
job->anamorphic.par_height = title->pixel_aspect_height;
}
if( title->aspect != 0 && title->aspect != 1. &&
!job->anamorphic.par_width && !job->anamorphic.par_height)
{
hb_reduce( &job->anamorphic.par_width, &job->anamorphic.par_height,
(int)(title->aspect * title->height + 0.5), title->width );
}
job->width = title->width - job->crop[2] - job->crop[3];
job->height = title->height - job->crop[0] - job->crop[1];
job->anamorphic.keep_display_aspect = 1;
int width, height, par_width, par_height;
hb_set_anamorphic_size(job, &width, &height, &par_width, &par_height);
job->width = width;
job->height = height;
job->anamorphic.par_width = par_width;
job->anamorphic.par_height = par_height;
job->vcodec = HB_VCODEC_FFMPEG_MPEG4;
job->vquality = -1.0;
job->vbitrate = 1000;
job->pass = 0;
job->vrate = title->rate;
job->vrate_base = title->rate_base;
job->mux = HB_MUX_MP4;
job->list_audio = hb_list_init();
job->list_subtitle = hb_list_init();
job->list_filter = hb_list_init();
job->list_attachment = hb_attachment_list_copy( title->list_attachment );
job->metadata = hb_metadata_copy( title->metadata );
#ifdef USE_QSV
job->qsv.enc_info.is_init_done = 0;
job->qsv.async_depth = AV_QSV_ASYNC_DEPTH_DEFAULT;
job->qsv.decode = !!(title->video_decode_support &
HB_DECODE_SUPPORT_QSV);
#endif
}
static void job_clean( hb_job_t * job )
{
if (job)
{
hb_chapter_t *chapter;
hb_audio_t *audio;
hb_subtitle_t *subtitle;
hb_filter_object_t *filter;
hb_attachment_t *attachment;
free(job->encoder_preset);
job->encoder_preset = NULL;
free(job->encoder_tune);
job->encoder_tune = NULL;
free(job->encoder_options);
job->encoder_options = NULL;
free(job->encoder_profile);
job->encoder_profile = NULL;
free(job->encoder_level);
job->encoder_level = NULL;
free(job->file);
job->file = NULL;
// clean up chapter list
while( ( chapter = hb_list_item( job->list_chapter, 0 ) ) )
{
hb_list_rem( job->list_chapter, chapter );
hb_chapter_close( &chapter );
}
hb_list_close( &job->list_chapter );
// clean up audio list
while( ( audio = hb_list_item( job->list_audio, 0 ) ) )
{
hb_list_rem( job->list_audio, audio );
hb_audio_close( &audio );
}
hb_list_close( &job->list_audio );
// clean up subtitle list
while( ( subtitle = hb_list_item( job->list_subtitle, 0 ) ) )
{
hb_list_rem( job->list_subtitle, subtitle );
hb_subtitle_close( &subtitle );
}
hb_list_close( &job->list_subtitle );
// clean up filter list
while( ( filter = hb_list_item( job->list_filter, 0 ) ) )
{
hb_list_rem( job->list_filter, filter );
hb_filter_close( &filter );
}
hb_list_close( &job->list_filter );
// clean up attachment list
while( ( attachment = hb_list_item( job->list_attachment, 0 ) ) )
{
hb_list_rem( job->list_attachment, attachment );
hb_attachment_close( &attachment );
}
hb_list_close( &job->list_attachment );
// clean up metadata
hb_metadata_close( &job->metadata );
}
}
hb_title_t * hb_find_title_by_index( hb_handle_t *h, int title_index )
{
hb_title_set_t *title_set = hb_get_title_set( h );
int ii;
for (ii = 0; ii < hb_list_count(title_set->list_title); ii++)
{
hb_title_t *title = hb_list_item(title_set->list_title, ii);
if (title_index == title->index)
{
return title;
}
}
return NULL;
}
/*
* Create a pristine job structure from a title
* title_index is 1 based
*/
hb_job_t * hb_job_init_by_index( hb_handle_t * h, int title_index )
{
hb_title_set_t *title_set = hb_get_title_set( h );
hb_title_t * title = hb_list_item( title_set->list_title,
title_index - 1 );
return hb_job_init( title );
}
hb_job_t * hb_job_init( hb_title_t * title )
{
hb_job_t * job;
if ( title == NULL )
return NULL;
job = calloc( sizeof( hb_job_t ), 1 );
job_setup(job, title);
return job;
}
/**
* Clean up the job structure so that is is ready for setting up a new job.
* Should be called by front-ends after hb_add().
*/
/**********************************************************************
* hb_job_reset
**********************************************************************
*
*********************************************************************/
void hb_job_reset( hb_job_t * job )
{
if ( job )
{
hb_title_t * title = job->title;
job_clean(job);
job_reset_for_mac_ui(job, title);
}
}
/**********************************************************************
* hb_job_close
**********************************************************************
*
*********************************************************************/
void hb_job_close( hb_job_t ** _j )
{
if (_j && *_j)
{
job_clean(*_j);
free( *_j );
_j = NULL;
}
}
void hb_job_set_encoder_preset(hb_job_t *job, const char *preset)
{
if (job != NULL)
{
hb_update_str(&job->encoder_preset, preset);
}
}
void hb_job_set_encoder_tune(hb_job_t *job, const char *tune)
{
if (job != NULL)
{
hb_update_str(&job->encoder_tune, tune);
}
}
void hb_job_set_encoder_options(hb_job_t *job, const char *options)
{
if (job != NULL)
{
hb_update_str(&job->encoder_options, options);
}
}
void hb_job_set_encoder_profile(hb_job_t *job, const char *profile)
{
if (job != NULL)
{
hb_update_str(&job->encoder_profile, profile);
}
}
void hb_job_set_encoder_level(hb_job_t *job, const char *level)
{
if (job != NULL)
{
hb_update_str(&job->encoder_level, level);
}
}
void hb_job_set_file(hb_job_t *job, const char *file)
{
if (job != NULL)
{
hb_update_str(&job->file, file);
}
}
hb_filter_object_t * hb_filter_copy( hb_filter_object_t * filter )
{
if( filter == NULL )
return NULL;
hb_filter_object_t * filter_copy = malloc( sizeof( hb_filter_object_t ) );
memcpy( filter_copy, filter, sizeof( hb_filter_object_t ) );
if( filter->settings )
filter_copy->settings = strdup( filter->settings );
return filter_copy;
}
/**********************************************************************
* hb_filter_list_copy
**********************************************************************
*
*********************************************************************/
hb_list_t *hb_filter_list_copy(const hb_list_t *src)
{
hb_list_t *list = hb_list_init();
hb_filter_object_t *filter = NULL;
int i;
if( src )
{
for( i = 0; i < hb_list_count(src); i++ )
{
if( ( filter = hb_list_item( src, i ) ) )
{
hb_list_add( list, hb_filter_copy(filter) );
}
}
}
return list;
}
/**
* Gets a filter object with the given type
* @param filter_id The type of filter to get.
* @returns The requested filter object.
*/
hb_filter_object_t * hb_filter_init( int filter_id )
{
hb_filter_object_t * filter;
switch( filter_id )
{
case HB_FILTER_DETELECINE:
filter = &hb_filter_detelecine;
break;
case HB_FILTER_DECOMB:
filter = &hb_filter_decomb;
break;
case HB_FILTER_DEINTERLACE:
filter = &hb_filter_deinterlace;
break;
case HB_FILTER_VFR:
filter = &hb_filter_vfr;
break;
case HB_FILTER_DEBLOCK:
filter = &hb_filter_deblock;
break;
case HB_FILTER_DENOISE:
filter = &hb_filter_denoise;
break;
case HB_FILTER_NLMEANS:
filter = &hb_filter_nlmeans;
break;
case HB_FILTER_RENDER_SUB:
filter = &hb_filter_render_sub;
break;
case HB_FILTER_CROP_SCALE:
filter = &hb_filter_crop_scale;
break;
case HB_FILTER_ROTATE:
filter = &hb_filter_rotate;
break;
#ifdef USE_QSV
case HB_FILTER_QSV:
filter = &hb_filter_qsv;
break;
case HB_FILTER_QSV_PRE:
filter = &hb_filter_qsv_pre;
break;
case HB_FILTER_QSV_POST:
filter = &hb_filter_qsv_post;
break;
#endif
default:
filter = NULL;
break;
}
return hb_filter_copy( filter );
}
/**********************************************************************
* hb_filter_close
**********************************************************************
*
*********************************************************************/
void hb_filter_close( hb_filter_object_t ** _f )
{
hb_filter_object_t * f = *_f;
if( f->settings )
free( f->settings );
free( f );
*_f = NULL;
}
/**********************************************************************
* hb_chapter_copy
**********************************************************************
*
*********************************************************************/
hb_chapter_t *hb_chapter_copy(const hb_chapter_t *src)
{
hb_chapter_t *chap = NULL;
if ( src )
{
chap = calloc( 1, sizeof(*chap) );
memcpy( chap, src, sizeof(*chap) );
if ( src->title )
{
chap->title = strdup( src->title );
}
}
return chap;
}
/**********************************************************************
* hb_chapter_list_copy
**********************************************************************
*
*********************************************************************/
hb_list_t *hb_chapter_list_copy(const hb_list_t *src)
{
hb_list_t *list = hb_list_init();
hb_chapter_t *chapter = NULL;
int i;
if( src )
{
for( i = 0; i < hb_list_count(src); i++ )
{
if( ( chapter = hb_list_item( src, i ) ) )
{
hb_list_add( list, hb_chapter_copy(chapter) );
}
}
}
return list;
}
/**********************************************************************
* hb_chapter_close
**********************************************************************
*
*********************************************************************/
void hb_chapter_close(hb_chapter_t **chap)
{
if ( chap && *chap )
{
free((*chap)->title);
free(*chap);
*chap = NULL;
}
}
/**********************************************************************
* hb_chapter_set_title
**********************************************************************
*
*********************************************************************/
void hb_chapter_set_title( hb_chapter_t *chapter, const char *title )
{
if ( chapter )
{
hb_update_str( &chapter->title, title );
}
}
/**********************************************************************
* hb_chapter_set_title_by_index
**********************************************************************
* Applies information from the given job to the official job instance.
* @param job Handle to hb_job_t.
* @param chapter The chapter to apply the name to (1-based).
* @param titel to apply.
*********************************************************************/
void hb_chapter_set_title_by_index( hb_job_t * job, int chapter_index, const char * title )
{
hb_chapter_t * chapter = hb_list_item( job->list_chapter, chapter_index - 1 );
hb_chapter_set_title( chapter, title );
}
/**********************************************************************
* hb_audio_copy
**********************************************************************
*
*********************************************************************/
hb_audio_t *hb_audio_copy(const hb_audio_t *src)
{
hb_audio_t *audio = NULL;
if( src )
{
audio = calloc(1, sizeof(*audio));
memcpy(audio, src, sizeof(*audio));
if ( src->config.out.name )
{
audio->config.out.name = strdup(src->config.out.name);
}
}
return audio;
}
/**********************************************************************
* hb_audio_list_copy
**********************************************************************
*
*********************************************************************/
hb_list_t *hb_audio_list_copy(const hb_list_t *src)
{
hb_list_t *list = hb_list_init();
hb_audio_t *audio = NULL;
int i;
if( src )
{
for( i = 0; i < hb_list_count(src); i++ )
{
if( ( audio = hb_list_item( src, i ) ) )
{
hb_list_add( list, hb_audio_copy(audio) );
}
}
}
return list;
}
/**********************************************************************
* hb_audio_close
**********************************************************************
*
*********************************************************************/
void hb_audio_close( hb_audio_t **audio )
{
if ( audio && *audio )
{
free((*audio)->config.out.name);
free(*audio);
*audio = NULL;
}
}
/**********************************************************************
* hb_audio_new
**********************************************************************
*
*********************************************************************/
void hb_audio_config_init(hb_audio_config_t * audiocfg)
{
/* Set read-only paramaters to invalid values */
audiocfg->in.codec = 0;
audiocfg->in.codec_param = 0;
audiocfg->in.reg_desc = 0;
audiocfg->in.stream_type = 0;
audiocfg->in.substream_type = 0;
audiocfg->in.version = 0;
audiocfg->in.flags = 0;
audiocfg->in.mode = 0;
audiocfg->in.samplerate = -1;
audiocfg->in.samples_per_frame = -1;
audiocfg->in.bitrate = -1;
audiocfg->in.matrix_encoding = AV_MATRIX_ENCODING_NONE;
audiocfg->in.channel_layout = 0;
audiocfg->in.channel_map = NULL;
audiocfg->lang.description[0] = 0;
audiocfg->lang.simple[0] = 0;
audiocfg->lang.iso639_2[0] = 0;
/* Initalize some sensible defaults */
audiocfg->in.track = audiocfg->out.track = 0;
audiocfg->out.codec = hb_audio_encoder_get_default(HB_MUX_MP4); // default container
audiocfg->out.samplerate = -1;
audiocfg->out.samples_per_frame = -1;
audiocfg->out.bitrate = -1;
audiocfg->out.quality = HB_INVALID_AUDIO_QUALITY;
audiocfg->out.compression_level = -1;
audiocfg->out.mixdown = HB_INVALID_AMIXDOWN;
audiocfg->out.dynamic_range_compression = 0;
audiocfg->out.gain = 0;
audiocfg->out.normalize_mix_level = 0;
audiocfg->out.dither_method = hb_audio_dither_get_default();
audiocfg->out.name = NULL;
}
/**********************************************************************
* hb_audio_add
**********************************************************************
*
*********************************************************************/
int hb_audio_add(const hb_job_t * job, const hb_audio_config_t * audiocfg)
{
hb_title_t *title = job->title;
hb_audio_t *audio;
audio = hb_audio_copy( hb_list_item( title->list_audio, audiocfg->in.track ) );
if( audio == NULL )
{
/* We fail! */
return 0;
}
if( (audiocfg->in.bitrate != -1) && (audiocfg->in.codec != 0xDEADBEEF) )
{
/* This most likely means the client didn't call hb_audio_config_init
* so bail. */
return 0;
}
/* Set the job's "in track" to the value passed in audiocfg.
* HandBrakeCLI assumes this value is preserved in the jobs
* audio list, but in.track in the title's audio list is not
* required to be the same. */
audio->config.in.track = audiocfg->in.track;
/* Really shouldn't ignore the passed out track, but there is currently no
* way to handle duplicates or out-of-order track numbers. */
audio->config.out.track = hb_list_count(job->list_audio) + 1;
audio->config.out.codec = audiocfg->out.codec;
if((audiocfg->out.codec & HB_ACODEC_PASS_FLAG) &&
((audiocfg->out.codec == HB_ACODEC_AUTO_PASS) ||
(audiocfg->out.codec & audio->config.in.codec & HB_ACODEC_PASS_MASK)))
{
/* Pass-through, copy from input. */
audio->config.out.samplerate = audio->config.in.samplerate;
audio->config.out.bitrate = audio->config.in.bitrate;
audio->config.out.mixdown = HB_AMIXDOWN_NONE;
audio->config.out.dynamic_range_compression = 0;
audio->config.out.gain = 0;
audio->config.out.normalize_mix_level = 0;
audio->config.out.compression_level = -1;
audio->config.out.quality = HB_INVALID_AUDIO_QUALITY;
audio->config.out.dither_method = hb_audio_dither_get_default();
}
else
{
/* Non pass-through, use what is given. */
audio->config.out.codec &= ~HB_ACODEC_PASS_FLAG;
audio->config.out.samplerate = audiocfg->out.samplerate;
audio->config.out.bitrate = audiocfg->out.bitrate;
audio->config.out.compression_level = audiocfg->out.compression_level;
audio->config.out.quality = audiocfg->out.quality;
audio->config.out.dynamic_range_compression = audiocfg->out.dynamic_range_compression;
audio->config.out.mixdown = audiocfg->out.mixdown;
audio->config.out.gain = audiocfg->out.gain;
audio->config.out.normalize_mix_level = audiocfg->out.normalize_mix_level;
audio->config.out.dither_method = audiocfg->out.dither_method;
}
if (audiocfg->out.name && *audiocfg->out.name)
{
audio->config.out.name = strdup(audiocfg->out.name);
}
hb_list_add(job->list_audio, audio);
return 1;
}
hb_audio_config_t * hb_list_audio_config_item(hb_list_t * list, int i)
{
hb_audio_t *audio = NULL;
if( (audio = hb_list_item(list, i)) )
return &(audio->config);
return NULL;
}
/**********************************************************************
* hb_subtitle_copy
**********************************************************************
*
*********************************************************************/
hb_subtitle_t *hb_subtitle_copy(const hb_subtitle_t *src)
{
hb_subtitle_t *subtitle = NULL;
if( src )
{
subtitle = calloc(1, sizeof(*subtitle));
memcpy(subtitle, src, sizeof(*subtitle));
if ( src->extradata )
{
subtitle->extradata = malloc( src->extradata_size );
memcpy( subtitle->extradata, src->extradata, src->extradata_size );
}
}
return subtitle;
}
/**********************************************************************
* hb_subtitle_list_copy
**********************************************************************
*
*********************************************************************/
hb_list_t *hb_subtitle_list_copy(const hb_list_t *src)
{
hb_list_t *list = hb_list_init();
hb_subtitle_t *subtitle = NULL;
int i;
if( src )
{
for( i = 0; i < hb_list_count(src); i++ )
{
if( ( subtitle = hb_list_item( src, i ) ) )
{
hb_list_add( list, hb_subtitle_copy(subtitle) );
}
}
}
return list;
}
/**********************************************************************
* hb_subtitle_close
**********************************************************************
*
*********************************************************************/
void hb_subtitle_close( hb_subtitle_t **sub )
{
if ( sub && *sub )
{
free ((*sub)->extradata);
free(*sub);
*sub = NULL;
}
}
/**********************************************************************
* hb_subtitle_add
**********************************************************************
*
*********************************************************************/
int hb_subtitle_add_ssa_header(hb_subtitle_t *subtitle, int w, int h)
{
// Free any pre-existing extradata
free(subtitle->extradata);
int fs = h * .066;
// SRT subtitles are represented internally as SSA
// Create an SSA header
const char * ssa_header =
"[Script Info]\r\n"
"ScriptType: v4.00+\r\n"
"Collisions: Normal\r\n"
"PlayResX: %d\r\n"
"PlayResY: %d\r\n"
"Timer: 100.0\r\n"
"WrapStyle: 0\r\n"
"\r\n"
"[V4+ Styles]\r\n"
"Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding\r\n"
"Style: Default,Arial,%d,&H00FFFFFF,&H00FFFFFF,&H000F0F0F,&H000F0F0F,0,0,0,0,100,100,0,0.00,1,2,3,2,20,20,20,0\r\n";
subtitle->extradata = (uint8_t*)hb_strdup_printf(ssa_header, w, h, fs);
if (subtitle->extradata == NULL)
{
hb_error("hb_subtitle_add_ssa_header: malloc failed");
return 0;
}
subtitle->extradata_size = strlen((char*)subtitle->extradata) + 1;
return 1;
}
int hb_subtitle_add(const hb_job_t * job, const hb_subtitle_config_t * subtitlecfg, int track)
{
hb_title_t *title = job->title;
hb_subtitle_t *subtitle;
subtitle = hb_subtitle_copy( hb_list_item( title->list_subtitle, track ) );
if( subtitle == NULL )
{
/* We fail! */
return 0;
}
subtitle->config = *subtitlecfg;
subtitle->out_track = hb_list_count(job->list_subtitle) + 1;
hb_list_add(job->list_subtitle, subtitle);
return 1;
}
int hb_srt_add( const hb_job_t * job,
const hb_subtitle_config_t * subtitlecfg,
const char *lang )
{
hb_subtitle_t *subtitle;
iso639_lang_t *language = NULL;
subtitle = calloc( 1, sizeof( *subtitle ) );
if (subtitle == NULL)
{
hb_error("hb_srt_add: malloc failed");
return 0;
}
subtitle->id = (hb_list_count(job->list_subtitle) << 8) | 0xFF;
subtitle->format = TEXTSUB;
subtitle->source = SRTSUB;
subtitle->codec = WORK_DECSRTSUB;
language = lang_for_code2(lang);
if (language == NULL)
{
hb_log("hb_srt_add: unknown language code (%s)", lang);
language = lang_for_code2("und");
}
strcpy(subtitle->lang, language->eng_name);
strcpy(subtitle->iso639_2, language->iso639_2);
subtitle->config = *subtitlecfg;
hb_list_add(job->list_subtitle, subtitle);
return 1;
}
int hb_subtitle_can_force( int source )
{
return source == VOBSUB || source == PGSSUB;
}
int hb_subtitle_can_burn( int source )
{
return source == VOBSUB || source == PGSSUB || source == SSASUB ||
source == SRTSUB || source == CC608SUB || source == UTF8SUB ||
source == TX3GSUB;
}
int hb_subtitle_can_pass( int source, int mux )
{
switch (mux)
{
case HB_MUX_AV_MKV:
case HB_MUX_LIBMKV:
switch( source )
{
case PGSSUB:
case VOBSUB:
case SSASUB:
case SRTSUB:
case UTF8SUB:
case TX3GSUB:
case CC608SUB:
case CC708SUB:
return 1;
default:
return 0;
} break;
case HB_MUX_MP4V2:
if (source == VOBSUB)
{
return 1;
} // fall through to next case...
case HB_MUX_AV_MP4:
switch( source )
{
case VOBSUB:
case SSASUB:
case SRTSUB:
case UTF8SUB:
case TX3GSUB:
case CC608SUB:
case CC708SUB:
return 1;
default:
return 0;
} break;
default:
// Internal error. Should never get here.
hb_error("internel error. Bad mux %d\n", mux);
return 0;
}
}
int hb_audio_can_apply_drc(uint32_t codec, uint32_t codec_param, int encoder)
{
if (encoder & HB_ACODEC_PASS_FLAG)
{
// can't apply DRC to passthrough audio
return 0;
}
else if (codec & HB_ACODEC_FF_MASK)
{
return (codec_param == AV_CODEC_ID_AC3 ||
codec_param == AV_CODEC_ID_EAC3);
}
else if (codec == HB_ACODEC_AC3)
{
return 1;
}
else
{
return 0;
}
}
/**********************************************************************
* hb_metadata_init
**********************************************************************
*
*********************************************************************/
hb_metadata_t *hb_metadata_init()
{
hb_metadata_t *metadata = calloc( 1, sizeof(*metadata) );
return metadata;
}
/**********************************************************************
* hb_metadata_copy
**********************************************************************
*
*********************************************************************/
hb_metadata_t *hb_metadata_copy( const hb_metadata_t *src )
{
hb_metadata_t *metadata = NULL;
if ( src )
{
metadata = calloc( 1, sizeof(*metadata) );
if ( src->name )
{
metadata->name = strdup(src->name);
}
if ( src->artist )
{
metadata->artist = strdup(src->artist);
}
if ( src->album_artist )
{
metadata->album_artist = strdup(src->album_artist);
}
if ( src->composer )
{
metadata->composer = strdup(src->composer);
}
if ( src->release_date )
{
metadata->release_date = strdup(src->release_date);
}
if ( src->comment )
{
metadata->comment = strdup(src->comment);
}
if ( src->album )
{
metadata->album = strdup(src->album);
}
if ( src->genre )
{
metadata->genre = strdup(src->genre);
}
if ( src->description )
{
metadata->description = strdup(src->description);
}
if ( src->long_description )
{
metadata->long_description = strdup(src->long_description);
}
if ( src->list_coverart )
{
int ii;
for ( ii = 0; ii < hb_list_count( src->list_coverart ); ii++ )
{
hb_coverart_t *art = hb_list_item( src->list_coverart, ii );
hb_metadata_add_coverart(
metadata, art->data, art->size, art->type );
}
}
}
return metadata;
}
/**********************************************************************
* hb_metadata_close
**********************************************************************
*
*********************************************************************/
void hb_metadata_close( hb_metadata_t **_m )
{
if ( _m && *_m )
{
hb_metadata_t *m = *_m;
hb_coverart_t *art;
free( m->name );
free( m->artist );
free( m->composer );
free( m->release_date );
free( m->comment );
free( m->album );
free( m->album_artist );
free( m->genre );
free( m->description );
free( m->long_description );
if ( m->list_coverart )
{
while( ( art = hb_list_item( m->list_coverart, 0 ) ) )
{
hb_list_rem( m->list_coverart, art );
free( art->data );
free( art );
}
hb_list_close( &m->list_coverart );
}
free( m );
*_m = NULL;
}
}
/**********************************************************************
* hb_metadata_set_*
**********************************************************************
*
*********************************************************************/
void hb_metadata_set_name( hb_metadata_t *metadata, const char *name )
{
if ( metadata )
{
hb_update_str( &metadata->name, name );
}
}
void hb_metadata_set_artist( hb_metadata_t *metadata, const char *artist )
{
if ( metadata )
{
hb_update_str( &metadata->artist, artist );
}
}
void hb_metadata_set_composer( hb_metadata_t *metadata, const char *composer )
{
if ( metadata )
{
hb_update_str( &metadata->composer, composer );
}
}
void hb_metadata_set_release_date( hb_metadata_t *metadata, const char *release_date )
{
if ( metadata )
{
hb_update_str( &metadata->release_date, release_date );
}
}
void hb_metadata_set_comment( hb_metadata_t *metadata, const char *comment )
{
if ( metadata )
{
hb_update_str( &metadata->comment, comment );
}
}
void hb_metadata_set_genre( hb_metadata_t *metadata, const char *genre )
{
if ( metadata )
{
hb_update_str( &metadata->genre, genre );
}
}
void hb_metadata_set_album( hb_metadata_t *metadata, const char *album )
{
if ( metadata )
{
hb_update_str( &metadata->album, album );
}
}
void hb_metadata_set_album_artist( hb_metadata_t *metadata, const char *album_artist )
{
if ( metadata )
{
hb_update_str( &metadata->album_artist, album_artist );
}
}
void hb_metadata_set_description( hb_metadata_t *metadata, const char *description )
{
if ( metadata )
{
hb_update_str( &metadata->description, description );
}
}
void hb_metadata_set_long_description( hb_metadata_t *metadata, const char *long_description )
{
if ( metadata )
{
hb_update_str( &metadata->long_description, long_description );
}
}
void hb_metadata_add_coverart( hb_metadata_t *metadata, const uint8_t *data, int size, int type )
{
if ( metadata )
{
if ( metadata->list_coverart == NULL )
{
metadata->list_coverart = hb_list_init();
}
hb_coverart_t *art = calloc( 1, sizeof(hb_coverart_t) );
art->data = malloc( size );
memcpy( art->data, data, size );
art->size = size;
art->type = type;
hb_list_add( metadata->list_coverart, art );
}
}
void hb_metadata_rem_coverart( hb_metadata_t *metadata, int idx )
{
if ( metadata )
{
hb_coverart_t *art = hb_list_item( metadata->list_coverart, idx );
if ( art )
{
hb_list_rem( metadata->list_coverart, art );
free( art->data );
free( art );
}
}
}
char * hb_strdup_printf( const char * fmt, ... )
{
int len;
va_list ap;
int size = 256;
char * str;
char * tmp;
str = malloc( size );
if ( str == NULL )
return NULL;
while (1)
{
/* Try to print in the allocated space. */
va_start( ap, fmt );
len = vsnprintf( str, size, fmt, ap );
va_end( ap );
/* If that worked, return the string. */
if ( len > -1 && len < size )
{
return str;
}
/* Else try again with more space. */
if ( len > -1 ) /* glibc 2.1 */
size = len + 1; /* precisely what is needed */
else /* glibc 2.0 */
size *= 2; /* twice the old size */
tmp = realloc( str, size );
if ( tmp == NULL )
{
free( str );
return NULL;
}
else
str = tmp;
}
}
char * hb_strncat_dup( const char * s1, const char * s2, size_t n )
{
size_t len;
char * str;
len = 0;
if( s1 )
len += strlen( s1 );
if( s2 )
len += MAX( strlen( s2 ), n );
if( !len )
return NULL;
str = malloc( len + 1 );
if( !str )
return NULL;
if( s1 )
strcpy( str, s1 );
else
strcpy( str, "" );
strncat( str, s2, n );
return str;
}
/**********************************************************************
* hb_attachment_copy
**********************************************************************
*
*********************************************************************/
hb_attachment_t *hb_attachment_copy(const hb_attachment_t *src)
{
hb_attachment_t *attachment = NULL;
if( src )
{
attachment = calloc(1, sizeof(*attachment));
memcpy(attachment, src, sizeof(*attachment));
if ( src->name )
{
attachment->name = strdup( src->name );
}
if ( src->data )
{
attachment->data = malloc( src->size );
memcpy( attachment->data, src->data, src->size );
}
}
return attachment;
}
/**********************************************************************
* hb_attachment_list_copy
**********************************************************************
*
*********************************************************************/
hb_list_t *hb_attachment_list_copy(const hb_list_t *src)
{
hb_list_t *list = hb_list_init();
hb_attachment_t *attachment = NULL;
int i;
if( src )
{
for( i = 0; i < hb_list_count(src); i++ )
{
if( ( attachment = hb_list_item( src, i ) ) )
{
hb_list_add( list, hb_attachment_copy(attachment) );
}
}
}
return list;
}
/**********************************************************************
* hb_attachment_close
**********************************************************************
*
*********************************************************************/
void hb_attachment_close( hb_attachment_t **attachment )
{
if ( attachment && *attachment )
{
free((*attachment)->data);
free((*attachment)->name);
free(*attachment);
*attachment = NULL;
}
}
/**********************************************************************
* hb_yuv2rgb
**********************************************************************
* Converts a YCrCb pixel to an RGB pixel.
*
* This conversion is lossy (due to rounding and clamping).
*
* Algorithm:
* http://en.wikipedia.org/w/index.php?title=YCbCr&oldid=361987695#Technical_details
*********************************************************************/
int hb_yuv2rgb(int yuv)
{
double y, Cr, Cb;
int r, g, b;
y = (yuv >> 16) & 0xff;
Cr = (yuv >> 8) & 0xff;
Cb = (yuv ) & 0xff;
r = 1.164 * (y - 16) + 1.596 * (Cr - 128);
g = 1.164 * (y - 16) - 0.392 * (Cb - 128) - 0.813 * (Cr - 128);
b = 1.164 * (y - 16) + 2.017 * (Cb - 128);
r = (r < 0) ? 0 : r;
g = (g < 0) ? 0 : g;
b = (b < 0) ? 0 : b;
r = (r > 255) ? 255 : r;
g = (g > 255) ? 255 : g;
b = (b > 255) ? 255 : b;
return (r << 16) | (g << 8) | b;
}
/**********************************************************************
* hb_rgb2yuv
**********************************************************************
* Converts an RGB pixel to a YCrCb pixel.
*
* This conversion is lossy (due to rounding and clamping).
*
* Algorithm:
* http://en.wikipedia.org/w/index.php?title=YCbCr&oldid=361987695#Technical_details
*********************************************************************/
int hb_rgb2yuv(int rgb)
{
double r, g, b;
int y, Cr, Cb;
r = (rgb >> 16) & 0xff;
g = (rgb >> 8) & 0xff;
b = (rgb ) & 0xff;
y = 16. + ( 0.257 * r) + (0.504 * g) + (0.098 * b);
Cb = 128. + (-0.148 * r) - (0.291 * g) + (0.439 * b);
Cr = 128. + ( 0.439 * r) - (0.368 * g) - (0.071 * b);
y = (y < 0) ? 0 : y;
Cb = (Cb < 0) ? 0 : Cb;
Cr = (Cr < 0) ? 0 : Cr;
y = (y > 255) ? 255 : y;
Cb = (Cb > 255) ? 255 : Cb;
Cr = (Cr > 255) ? 255 : Cr;
return (y << 16) | (Cr << 8) | Cb;
}
const char * hb_subsource_name( int source )
{
switch (source)
{
case VOBSUB:
return "VOBSUB";
case SRTSUB:
return "SRT";
case CC608SUB:
return "CC";
case CC708SUB:
return "CC";
case UTF8SUB:
return "UTF-8";
case TX3GSUB:
return "TX3G";
case SSASUB:
return "SSA";
case PGSSUB:
return "PGS";
default:
return "Unknown";
}
}
void hb_hexdump( hb_debug_level_t level, const char * label, const uint8_t * data, int len )
{
int ii;
char line[80], ascii[19], *p;
ascii[18] = 0;
ascii[0] = '|';
ascii[17] = '|';
memset(&ascii[1], '.', 16);
p = line;
if( label )
hb_deep_log(level, "++++ %s ++++", label);
else
hb_deep_log(level, "++++++++++++");
for( ii = 0; ii < len; ii++ )
{
if( ( ii & 0x0f ) == 0x0f )
{
p += sprintf( p, "%02x", data[ii] );
hb_deep_log( level, " %-50s%20s", line, ascii );
memset(&ascii[1], '.', 16);
p = line;
}
else if( ( ii & 0x07 ) == 0x07 )
{
p += sprintf( p, "%02x ", data[ii] );
}
else
{
p += sprintf( p, "%02x ", data[ii] );
}
if( isgraph( data[ii] ) )
ascii[(ii & 0x0f) + 1] = data[ii];
else
ascii[(ii & 0x0f) + 1] = '.';
}
if( p != line )
{
hb_deep_log( level, " %-50s%20s", line, ascii );
}
}
int hb_gui_use_hwd_flag = 0;
int hb_use_dxva( hb_title_t * title )
{
return ( (title->video_codec_param == AV_CODEC_ID_MPEG2VIDEO
|| title->video_codec_param == AV_CODEC_ID_H264
|| title->video_codec_param == AV_CODEC_ID_VC1
|| title->video_codec_param == AV_CODEC_ID_WMV3
|| title->video_codec_param == AV_CODEC_ID_MPEG4 )
&& title->opaque_priv );
}
HandBrake-0.10.2/libhb/audio_resample.c 0000664 0001752 0001752 00000023567 12463330511 020267 0 ustar handbrake handbrake /* audio_resample.c
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage:
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "common.h"
#include "hbffmpeg.h"
#include "audio_resample.h"
hb_audio_resample_t* hb_audio_resample_init(enum AVSampleFormat sample_fmt,
int hb_amixdown, int normalize_mix)
{
hb_audio_resample_t *resample = calloc(1, sizeof(hb_audio_resample_t));
if (resample == NULL)
{
hb_error("hb_audio_resample_init: failed to allocate resample");
goto fail;
}
// avresample context, initialized in hb_audio_resample_update()
resample->avresample = NULL;
// we don't support planar output yet
if (av_sample_fmt_is_planar(sample_fmt))
{
hb_error("hb_audio_resample_init: planar output not supported ('%s')",
av_get_sample_fmt_name(sample_fmt));
goto fail;
}
// convert mixdown to channel_layout/matrix_encoding combo
int matrix_encoding;
uint64_t channel_layout = hb_ff_mixdown_xlat(hb_amixdown, &matrix_encoding);
/*
* When downmixing, Dual Mono to Mono is a special case:
* the audio must remain 2-channel until all conversions are done.
*/
if (hb_amixdown == HB_AMIXDOWN_LEFT || hb_amixdown == HB_AMIXDOWN_RIGHT)
{
channel_layout = AV_CH_LAYOUT_STEREO;
resample->dual_mono_downmix = 1;
resample->dual_mono_right_only = (hb_amixdown == HB_AMIXDOWN_RIGHT);
}
else
{
resample->dual_mono_downmix = 0;
}
// requested output channel_layout, sample_fmt
resample->out.channels = av_get_channel_layout_nb_channels(channel_layout);
resample->out.channel_layout = channel_layout;
resample->out.matrix_encoding = matrix_encoding;
resample->out.normalize_mix_level = normalize_mix;
resample->out.sample_fmt = sample_fmt;
resample->out.sample_size = av_get_bytes_per_sample(sample_fmt);
// set default input characteristics
resample->in.sample_fmt = resample->out.sample_fmt;
resample->in.channel_layout = resample->out.channel_layout;
resample->in.lfe_mix_level = HB_MIXLEV_ZERO;
resample->in.center_mix_level = HB_MIXLEV_DEFAULT;
resample->in.surround_mix_level = HB_MIXLEV_DEFAULT;
// by default, no conversion needed
resample->resample_needed = 0;
return resample;
fail:
hb_audio_resample_free(resample);
return NULL;
}
void hb_audio_resample_set_channel_layout(hb_audio_resample_t *resample,
uint64_t channel_layout)
{
if (resample != NULL)
{
if (channel_layout == AV_CH_LAYOUT_STEREO_DOWNMIX)
{
// Dolby Surround is Stereo when it comes to remixing
channel_layout = AV_CH_LAYOUT_STEREO;
}
resample->in.channel_layout = channel_layout;
}
}
void hb_audio_resample_set_mix_levels(hb_audio_resample_t *resample,
double surround_mix_level,
double center_mix_level,
double lfe_mix_level)
{
if (resample != NULL)
{
resample->in.lfe_mix_level = lfe_mix_level;
resample->in.center_mix_level = center_mix_level;
resample->in.surround_mix_level = surround_mix_level;
}
}
void hb_audio_resample_set_sample_fmt(hb_audio_resample_t *resample,
enum AVSampleFormat sample_fmt)
{
if (resample != NULL)
{
resample->in.sample_fmt = sample_fmt;
}
}
int hb_audio_resample_update(hb_audio_resample_t *resample)
{
if (resample == NULL)
{
hb_error("hb_audio_resample_update: resample is NULL");
return 1;
}
int ret, resample_changed;
resample->resample_needed =
(resample->out.sample_fmt != resample->in.sample_fmt ||
resample->out.channel_layout != resample->in.channel_layout);
resample_changed =
(resample->resample_needed &&
(resample->resample.sample_fmt != resample->in.sample_fmt ||
resample->resample.channel_layout != resample->in.channel_layout ||
resample->resample.lfe_mix_level != resample->in.lfe_mix_level ||
resample->resample.center_mix_level != resample->in.center_mix_level ||
resample->resample.surround_mix_level != resample->in.surround_mix_level));
if (resample_changed || (resample->resample_needed &&
resample->avresample == NULL))
{
if (resample->avresample == NULL)
{
resample->avresample = avresample_alloc_context();
if (resample->avresample == NULL)
{
hb_error("hb_audio_resample_update: avresample_alloc_context() failed");
return 1;
}
av_opt_set_int(resample->avresample, "out_sample_fmt",
resample->out.sample_fmt, 0);
av_opt_set_int(resample->avresample, "out_channel_layout",
resample->out.channel_layout, 0);
av_opt_set_int(resample->avresample, "matrix_encoding",
resample->out.matrix_encoding, 0);
av_opt_set_int(resample->avresample, "normalize_mix_level",
resample->out.normalize_mix_level, 0);
}
else if (resample_changed)
{
avresample_close(resample->avresample);
}
av_opt_set_int(resample->avresample, "in_sample_fmt",
resample->in.sample_fmt, 0);
av_opt_set_int(resample->avresample, "in_channel_layout",
resample->in.channel_layout, 0);
av_opt_set_double(resample->avresample, "lfe_mix_level",
resample->in.lfe_mix_level, 0);
av_opt_set_double(resample->avresample, "center_mix_level",
resample->in.center_mix_level, 0);
av_opt_set_double(resample->avresample, "surround_mix_level",
resample->in.surround_mix_level, 0);
if ((ret = avresample_open(resample->avresample)))
{
char err_desc[64];
av_strerror(ret, err_desc, 63);
hb_error("hb_audio_resample_update: avresample_open() failed (%s)",
err_desc);
// avresample won't open, start over
avresample_free(&resample->avresample);
return ret;
}
resample->resample.sample_fmt = resample->in.sample_fmt;
resample->resample.channel_layout = resample->in.channel_layout;
resample->resample.channels =
av_get_channel_layout_nb_channels(resample->in.channel_layout);
resample->resample.lfe_mix_level = resample->in.lfe_mix_level;
resample->resample.center_mix_level = resample->in.center_mix_level;
resample->resample.surround_mix_level = resample->in.surround_mix_level;
}
return 0;
}
void hb_audio_resample_free(hb_audio_resample_t *resample)
{
if (resample != NULL)
{
if (resample->avresample != NULL)
{
avresample_free(&resample->avresample);
}
free(resample);
}
}
hb_buffer_t* hb_audio_resample(hb_audio_resample_t *resample,
uint8_t **samples, int nsamples)
{
if (resample == NULL)
{
hb_error("hb_audio_resample: resample is NULL");
return NULL;
}
if (resample->resample_needed && resample->avresample == NULL)
{
hb_error("hb_audio_resample: resample needed but libavresample context "
"is NULL");
return NULL;
}
hb_buffer_t *out;
int out_size, out_samples;
if (resample->resample_needed)
{
int in_linesize, out_linesize;
// set in/out linesize and out_size
av_samples_get_buffer_size(&in_linesize,
resample->resample.channels, nsamples,
resample->resample.sample_fmt, 0);
out_size = av_samples_get_buffer_size(&out_linesize,
resample->out.channels, nsamples,
resample->out.sample_fmt, 0);
out = hb_buffer_init(out_size);
out_samples = avresample_convert(resample->avresample,
&out->data, out_linesize, nsamples,
samples, in_linesize, nsamples);
if (out_samples <= 0)
{
if (out_samples < 0)
hb_log("hb_audio_resample: avresample_convert() failed");
// don't send empty buffers downstream (EOF)
hb_buffer_close(&out);
return NULL;
}
out->size = (out_samples *
resample->out.sample_size * resample->out.channels);
}
else
{
out_samples = nsamples;
out_size = (out_samples *
resample->out.sample_size * resample->out.channels);
out = hb_buffer_init(out_size);
memcpy(out->data, samples[0], out_size);
}
/*
* Dual Mono to Mono.
*
* Copy all left or right samples to the first half of the buffer and halve
* the buffer size.
*/
if (resample->dual_mono_downmix)
{
int ii, jj = !!resample->dual_mono_right_only;
int sample_size = resample->out.sample_size;
uint8_t *audio_samples = out->data;
for (ii = 0; ii < out_samples; ii++)
{
memcpy(audio_samples + (ii * sample_size),
audio_samples + (jj * sample_size), sample_size);
jj += 2;
}
out->size = out_samples * sample_size;
}
return out;
}
HandBrake-0.10.2/libhb/encvorbis.c 0000664 0001752 0001752 00000020364 12463330511 017260 0 ustar handbrake handbrake /* encvorbis.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "audio_remap.h"
#include "vorbis/vorbisenc.h"
#define OGGVORBIS_FRAME_SIZE 1024
int encvorbisInit( hb_work_object_t *, hb_job_t * );
int encvorbisWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
void encvorbisClose( hb_work_object_t * );
hb_work_object_t hb_encvorbis =
{
WORK_ENCVORBIS,
"Vorbis encoder (libvorbis)",
encvorbisInit,
encvorbisWork,
encvorbisClose
};
struct hb_work_private_s
{
uint8_t *buf;
hb_job_t *job;
hb_list_t *list;
vorbis_dsp_state vd;
vorbis_comment vc;
vorbis_block vb;
vorbis_info vi;
unsigned input_samples;
uint64_t pts;
int64_t prev_blocksize;
int out_discrete_channels;
int remap_table[8];
};
int encvorbisInit(hb_work_object_t *w, hb_job_t *job)
{
hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
hb_audio_t *audio = w->audio;
w->private_data = pv;
pv->job = job;
int i;
ogg_packet header[3];
hb_log("encvorbis: opening libvorbis");
/* init */
for (i = 0; i < 3; i++)
{
// Zero vorbis headers so that we don't crash in mk_laceXiph
// when vorbis_encode_setup_managed fails.
memset(w->config->vorbis.headers[i], 0, sizeof(ogg_packet));
}
vorbis_info_init(&pv->vi);
pv->out_discrete_channels =
hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
if (audio->config.out.bitrate > 0)
{
if (vorbis_encode_setup_managed(&pv->vi, pv->out_discrete_channels,
audio->config.out.samplerate, -1,
audio->config.out.bitrate * 1000, -1))
{
hb_error("encvorbis: vorbis_encode_setup_managed() failed");
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
return -1;
}
}
else if (audio->config.out.quality != HB_INVALID_AUDIO_QUALITY)
{
// map VBR quality to Vorbis API (divide by 10)
if (vorbis_encode_setup_vbr(&pv->vi, pv->out_discrete_channels,
audio->config.out.samplerate,
audio->config.out.quality / 10))
{
hb_error("encvorbis: vorbis_encode_setup_vbr() failed");
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
return -1;
}
}
if (vorbis_encode_ctl(&pv->vi, OV_ECTL_RATEMANAGE2_SET, NULL) ||
vorbis_encode_setup_init(&pv->vi))
{
hb_error("encvorbis: vorbis_encode_ctl(ratemanage2_set) OR vorbis_encode_setup_init() failed");
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
return -1;
}
/* add a comment */
vorbis_comment_init(&pv->vc);
vorbis_comment_add_tag(&pv->vc, "Encoder", "HandBrake");
vorbis_comment_add_tag(&pv->vc, "LANGUAGE", w->config->vorbis.language);
/* set up the analysis state and auxiliary encoding storage */
vorbis_analysis_init(&pv->vd, &pv->vi);
vorbis_block_init(&pv->vd, &pv->vb);
/* get the 3 headers */
vorbis_analysis_headerout(&pv->vd, &pv->vc,
&header[0], &header[1], &header[2]);
ogg_packet *pheader;
for (i = 0; i < 3; i++)
{
pheader = (ogg_packet*)w->config->theora.headers[i];
memcpy(pheader, &header[i], sizeof(ogg_packet));
pheader->packet = w->config->theora.headers[i] + sizeof(ogg_packet);
memcpy(pheader->packet, header[i].packet, header[i].bytes );
}
pv->input_samples = pv->out_discrete_channels * OGGVORBIS_FRAME_SIZE;
audio->config.out.samples_per_frame = OGGVORBIS_FRAME_SIZE;
pv->buf = malloc(pv->input_samples * sizeof(float));
pv->list = hb_list_init();
// channel remapping
uint64_t layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
hb_audio_remap_build_table(&hb_vorbis_chan_map,
audio->config.in.channel_map, layout,
pv->remap_table);
return 0;
}
/***********************************************************************
* Close
***********************************************************************
*
**********************************************************************/
void encvorbisClose(hb_work_object_t * w)
{
hb_work_private_t *pv = w->private_data;
vorbis_comment_clear(&pv->vc);
vorbis_block_clear(&pv->vb);
vorbis_info_clear(&pv->vi);
vorbis_dsp_clear(&pv->vd);
if (pv->list)
{
hb_list_empty(&pv->list);
}
free(pv->buf);
free(pv);
w->private_data = NULL;
}
/***********************************************************************
* Flush
***********************************************************************
*
**********************************************************************/
static hb_buffer_t * Flush( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * buf;
int64_t blocksize = 0;
if( vorbis_analysis_blockout( &pv->vd, &pv->vb ) == 1 )
{
ogg_packet op;
vorbis_analysis( &pv->vb, NULL );
vorbis_bitrate_addblock( &pv->vb );
if( vorbis_bitrate_flushpacket( &pv->vd, &op ) )
{
buf = hb_buffer_init( op.bytes );
memcpy( buf->data, op.packet, op.bytes );
blocksize = vorbis_packet_blocksize(&pv->vi, &op);
buf->s.type = AUDIO_BUF;
buf->s.frametype = HB_FRAME_AUDIO;
buf->s.start = (int64_t)(vorbis_granule_time(&pv->vd, op.granulepos) * 90000);
buf->s.stop = (int64_t)(vorbis_granule_time(&pv->vd, (pv->prev_blocksize + blocksize)/4 + op.granulepos) * 90000);
buf->s.duration = buf->s.stop - buf->s.start;
/* The stop time isn't accurate for the first ~3 packets, as the actual blocksize depends on the previous _and_ current packets. */
pv->prev_blocksize = blocksize;
return buf;
}
}
return NULL;
}
/***********************************************************************
* Encode
***********************************************************************
*
**********************************************************************/
static hb_buffer_t* Encode(hb_work_object_t *w)
{
hb_work_private_t *pv = w->private_data;
hb_buffer_t *buf;
float **buffer;
int i, j;
/* Try to extract more data */
if ((buf = Flush(w)) != NULL)
{
return buf;
}
/* Check if we need more data */
if (hb_list_bytes(pv->list) < pv->input_samples * sizeof(float))
{
return NULL;
}
/* Process more samples */
hb_list_getbytes(pv->list, pv->buf, pv->input_samples * sizeof(float),
&pv->pts, NULL);
buffer = vorbis_analysis_buffer(&pv->vd, OGGVORBIS_FRAME_SIZE);
for (i = 0; i < OGGVORBIS_FRAME_SIZE; i++)
{
for (j = 0; j < pv->out_discrete_channels; j++)
{
buffer[j][i] = ((float*)pv->buf)[(pv->out_discrete_channels * i +
pv->remap_table[j])];
}
}
vorbis_analysis_wrote(&pv->vd, OGGVORBIS_FRAME_SIZE);
/* Try to extract again */
return Flush(w);
}
/***********************************************************************
* Work
***********************************************************************
*
**********************************************************************/
int encvorbisWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * buf;
if ( (*buf_in)->size <= 0 )
{
/* EOF on input - send it downstream & say we're done */
*buf_out = *buf_in;
*buf_in = NULL;
return HB_WORK_DONE;
}
hb_list_add( pv->list, *buf_in );
*buf_in = NULL;
*buf_out = buf = Encode( w );
while( buf )
{
buf->next = Encode( w );
buf = buf->next;
}
return HB_WORK_OK;
}
HandBrake-0.10.2/libhb/nlmeans.c 0000664 0001752 0001752 00000116227 12403636035 016733 0 ustar handbrake handbrake /* nlmeans.c
Copyright (c) 2013 Dirk Farin
Copyright (c) 2003-2014 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/* Usage
*
* Parameters:
* lumaY_strength : lumaY_origin_tune : lumaY_patch_size : lumaY_range : lumaY_frames : lumaY_prefilter :
* chromaB_strength : chromaB_origin_tune : chromaB_patch_size : chromaB_range : chromaB_frames : chromaB_prefilter :
* chromaR_strength : chromaR_origin_tune : chromaR_patch_size : chromaR_range : chromaR_frames : chromaR_prefilter
*
* Defaults:
* 8:1:7:3:2:0 for each channel (equivalent to 8:1:7:3:2:0:8:1:7:3:2:0:8:1:7:3:2:0)
*
* Parameters cascade, e.g. 6:0.8:7:3:3:0:4:1 sets:
* strength 6, origin tune 0.8 for luma
* patch size 7, range 3, frames 3, prefilter 0 for all channels
* strength 4, origin tune 1 for both chroma channels
*
* Strength is relative and must be adjusted; ALL parameters affect overall strength.
* Lower origin tune improves results for noisier input or animation (film 0.5-1, animation 0.15-0.5).
* Large patch size (>9) may greatly reduce quality by clobbering detail.
* Larger search range increases quality; however, computation time increases exponentially.
* Large number of frames (film >3, animation >6) may cause temporal smearing.
* Prefiltering can potentially improve weight decisions, yielding better results for difficult sources.
*
* Prefilter enum combos:
* 1: Mean 3x3
* 2: Mean 5x5
* 3: Mean 5x5 (overrides Mean 3x3)
* 257: Mean 3x3 reduced by 25%
* 258: Mean 5x5 reduced by 25%
* 513: Mean 3x3 reduced by 50%
* 514: Mean 5x5 reduced by 50%
* 769: Mean 3x3 reduced by 75%
* 770: Mean 5x5 reduced by 75%
* 1025: Mean 3x3 plus edge boost (restores lost edge detail)
* 1026: Mean 5x5 plus edge boost
* 1281: Mean 3x3 reduced by 25% plus edge boost
* etc...
* 2049: Mean 3x3 passthru (NL-means off, prefilter is the output)
* etc...
* 3329: Mean 3x3 reduced by 25% plus edge boost, passthru
* etc...
*/
#include "hb.h"
#include "hbffmpeg.h"
#include "taskset.h"
#define NLMEANS_STRENGTH_LUMA_DEFAULT 8
#define NLMEANS_STRENGTH_CHROMA_DEFAULT 8
#define NLMEANS_ORIGIN_TUNE_LUMA_DEFAULT 1
#define NLMEANS_ORIGIN_TUNE_CHROMA_DEFAULT 1
#define NLMEANS_PATCH_SIZE_LUMA_DEFAULT 7
#define NLMEANS_PATCH_SIZE_CHROMA_DEFAULT 7
#define NLMEANS_RANGE_LUMA_DEFAULT 3
#define NLMEANS_RANGE_CHROMA_DEFAULT 3
#define NLMEANS_FRAMES_LUMA_DEFAULT 2
#define NLMEANS_FRAMES_CHROMA_DEFAULT 2
#define NLMEANS_PREFILTER_LUMA_DEFAULT 0
#define NLMEANS_PREFILTER_CHROMA_DEFAULT 0
#define NLMEANS_PREFILTER_MODE_MEAN3X3 1
#define NLMEANS_PREFILTER_MODE_MEAN5X5 2
#define NLMEANS_PREFILTER_MODE_MEDIAN3X3 4
#define NLMEANS_PREFILTER_MODE_MEDIAN5X5 8
#define NLMEANS_PREFILTER_MODE_RESERVED16 16 // Reserved
#define NLMEANS_PREFILTER_MODE_RESERVED32 32 // Reserved
#define NLMEANS_PREFILTER_MODE_RESERVED64 64 // Reserved
#define NLMEANS_PREFILTER_MODE_RESERVED128 128 // Reserved
#define NLMEANS_PREFILTER_MODE_REDUCE25 256
#define NLMEANS_PREFILTER_MODE_REDUCE50 512
#define NLMEANS_PREFILTER_MODE_EDGEBOOST 1024
#define NLMEANS_PREFILTER_MODE_PASSTHRU 2048
#define NLMEANS_SORT(a,b) { if (a > b) NLMEANS_SWAP(a, b); }
#define NLMEANS_SWAP(a,b) { a = (a ^ b); b = (a ^ b); a = (b ^ a); }
#define NLMEANS_FRAMES_MAX 32
#define NLMEANS_EXPSIZE 128
typedef struct
{
uint8_t *mem;
uint8_t *mem_pre;
uint8_t *image;
uint8_t *image_pre;
int w;
int h;
int border;
hb_lock_t *mutex;
int prefiltered;
} BorderedPlane;
typedef struct
{
int width;
int height;
int fmt;
BorderedPlane plane[3];
hb_buffer_settings_t s;
} Frame;
struct PixelSum
{
float weight_sum;
float pixel_sum;
};
typedef struct
{
hb_filter_private_t *pv;
int segment;
hb_buffer_t *out;
} nlmeans_thread_arg_t;
struct hb_filter_private_s
{
double strength[3]; // averaging weight decay, larger produces smoother output
double origin_tune[3]; // weight tuning for origin patch, 0.00..1.00
int patch_size[3]; // pixel context region width (must be odd)
int range[3]; // spatial search window width (must be odd)
int nframes[3]; // temporal search depth in frames
int prefilter[3]; // prefilter mode, can improve weight analysis
Frame *frame;
int next_frame;
int max_frames;
taskset_t taskset;
int thread_count;
nlmeans_thread_arg_t **thread_data;
};
static int nlmeans_init(hb_filter_object_t *filter, hb_filter_init_t *init);
static int nlmeans_work(hb_filter_object_t *filter,
hb_buffer_t **buf_in,
hb_buffer_t **buf_out);
static void nlmeans_close(hb_filter_object_t *filter);
static void nlmeans_filter_thread(void *thread_args_v);
hb_filter_object_t hb_filter_nlmeans =
{
.id = HB_FILTER_NLMEANS,
.enforce_order = 1,
.name = "Denoise (nlmeans)",
.settings = NULL,
.init = nlmeans_init,
.work = nlmeans_work,
.close = nlmeans_close,
};
static void nlmeans_border(uint8_t *src,
int w,
int h,
int border)
{
int bw = w + 2 * border;
uint8_t *image = src + border + bw * border;
// Create faux borders using edge pixels
for (int y = 0; y < h; y++)
{
for (int x = 0; x < border; x++)
{
*(image + y*bw - x - 1) = *(image + y*bw + x);
*(image + y*bw + x + w) = *(image + y*bw - x + (w-1));
}
}
for (int y = 0; y < border; y++)
{
memcpy(image - border - (y+1)*bw, image - border + y*bw, bw);
memcpy(image - border + (y+h)*bw, image - border + (h-y-1)*bw, bw);
}
}
static void nlmeans_deborder(BorderedPlane *src,
uint8_t *dst,
int w,
int s,
int h)
{
int bw = src->w + 2 * src->border;
uint8_t *image = src->mem + src->border + bw * src->border;
int width = w;
if (src->w < width)
width = src->w;
// Copy main image
for (int y = 0; y < h; y++)
{
memcpy(dst + y * s, image + y * bw, width);
}
}
static void nlmeans_alloc(uint8_t *src,
int src_w,
int src_s,
int src_h,
BorderedPlane *dst,
int border)
{
int bw = src_w + 2 * border;
int bh = src_h + 2 * border;
uint8_t *mem = malloc(bw * bh * sizeof(uint8_t));
uint8_t *image = mem + border + bw * border;
// Copy main image
for (int y = 0; y < src_h; y++)
{
memcpy(image + y * bw, src + y * src_s, src_w);
}
dst->mem = mem;
dst->image = image;
dst->w = src_w;
dst->h = src_h;
dst->border = border;
nlmeans_border(dst->mem, dst->w, dst->h, dst->border);
dst->mem_pre = dst->mem;
dst->image_pre = dst->image;
}
static void nlmeans_filter_mean(uint8_t *src,
uint8_t *dst,
int w,
int h,
int border,
int size)
{
// Mean filter
int bw = w + 2 * border;
int offset_min = -((size - 1) /2);
int offset_max = (size + 1) /2;
uint16_t pixel_sum;
double pixel_weight = 1.0 / (size * size);
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
pixel_sum = 0;
for (int k = offset_min; k < offset_max; k++)
{
for (int j = offset_min; j < offset_max; j++)
{
pixel_sum = pixel_sum + *(src + bw*(y+j) + (x+k));
}
}
*(dst + bw*y + x) = (uint8_t)(pixel_sum * pixel_weight);
}
}
}
static uint8_t nlmeans_filter_median_opt(uint8_t *pixels, int size)
{
// Optimized sorting networks
if (size == 3)
{
/* opt_med9() via Nicolas Devillard
* http://ndevilla.free.fr/median/median.pdf
*/
NLMEANS_SORT(pixels[1], pixels[2]); NLMEANS_SORT(pixels[4], pixels[5]); NLMEANS_SORT(pixels[7], pixels[8]);
NLMEANS_SORT(pixels[0], pixels[1]); NLMEANS_SORT(pixels[3], pixels[4]); NLMEANS_SORT(pixels[6], pixels[7]);
NLMEANS_SORT(pixels[1], pixels[2]); NLMEANS_SORT(pixels[4], pixels[5]); NLMEANS_SORT(pixels[7], pixels[8]);
NLMEANS_SORT(pixels[0], pixels[3]); NLMEANS_SORT(pixels[5], pixels[8]); NLMEANS_SORT(pixels[4], pixels[7]);
NLMEANS_SORT(pixels[3], pixels[6]); NLMEANS_SORT(pixels[1], pixels[4]); NLMEANS_SORT(pixels[2], pixels[5]);
NLMEANS_SORT(pixels[4], pixels[7]); NLMEANS_SORT(pixels[4], pixels[2]); NLMEANS_SORT(pixels[6], pixels[4]);
NLMEANS_SORT(pixels[4], pixels[2]);
return pixels[4];
}
else if (size == 5)
{
/* opt_med25() via Nicolas Devillard
* http://ndevilla.free.fr/median/median.pdf
*/
NLMEANS_SORT(pixels[0], pixels[1]); NLMEANS_SORT(pixels[3], pixels[4]); NLMEANS_SORT(pixels[2], pixels[4]);
NLMEANS_SORT(pixels[2], pixels[3]); NLMEANS_SORT(pixels[6], pixels[7]); NLMEANS_SORT(pixels[5], pixels[7]);
NLMEANS_SORT(pixels[5], pixels[6]); NLMEANS_SORT(pixels[9], pixels[10]); NLMEANS_SORT(pixels[8], pixels[10]);
NLMEANS_SORT(pixels[8], pixels[9]); NLMEANS_SORT(pixels[12], pixels[13]); NLMEANS_SORT(pixels[11], pixels[13]);
NLMEANS_SORT(pixels[11], pixels[12]); NLMEANS_SORT(pixels[15], pixels[16]); NLMEANS_SORT(pixels[14], pixels[16]);
NLMEANS_SORT(pixels[14], pixels[15]); NLMEANS_SORT(pixels[18], pixels[19]); NLMEANS_SORT(pixels[17], pixels[19]);
NLMEANS_SORT(pixels[17], pixels[18]); NLMEANS_SORT(pixels[21], pixels[22]); NLMEANS_SORT(pixels[20], pixels[22]);
NLMEANS_SORT(pixels[20], pixels[21]); NLMEANS_SORT(pixels[23], pixels[24]); NLMEANS_SORT(pixels[2], pixels[5]);
NLMEANS_SORT(pixels[3], pixels[6]); NLMEANS_SORT(pixels[0], pixels[6]); NLMEANS_SORT(pixels[0], pixels[3]);
NLMEANS_SORT(pixels[4], pixels[7]); NLMEANS_SORT(pixels[1], pixels[7]); NLMEANS_SORT(pixels[1], pixels[4]);
NLMEANS_SORT(pixels[11], pixels[14]); NLMEANS_SORT(pixels[8], pixels[14]); NLMEANS_SORT(pixels[8], pixels[11]);
NLMEANS_SORT(pixels[12], pixels[15]); NLMEANS_SORT(pixels[9], pixels[15]); NLMEANS_SORT(pixels[9], pixels[12]);
NLMEANS_SORT(pixels[13], pixels[16]); NLMEANS_SORT(pixels[10], pixels[16]); NLMEANS_SORT(pixels[10], pixels[13]);
NLMEANS_SORT(pixels[20], pixels[23]); NLMEANS_SORT(pixels[17], pixels[23]); NLMEANS_SORT(pixels[17], pixels[20]);
NLMEANS_SORT(pixels[21], pixels[24]); NLMEANS_SORT(pixels[18], pixels[24]); NLMEANS_SORT(pixels[18], pixels[21]);
NLMEANS_SORT(pixels[19], pixels[22]); NLMEANS_SORT(pixels[8], pixels[17]); NLMEANS_SORT(pixels[9], pixels[18]);
NLMEANS_SORT(pixels[0], pixels[18]); NLMEANS_SORT(pixels[0], pixels[9]); NLMEANS_SORT(pixels[10], pixels[19]);
NLMEANS_SORT(pixels[1], pixels[19]); NLMEANS_SORT(pixels[1], pixels[10]); NLMEANS_SORT(pixels[11], pixels[20]);
NLMEANS_SORT(pixels[2], pixels[20]); NLMEANS_SORT(pixels[2], pixels[11]); NLMEANS_SORT(pixels[12], pixels[21]);
NLMEANS_SORT(pixels[3], pixels[21]); NLMEANS_SORT(pixels[3], pixels[12]); NLMEANS_SORT(pixels[13], pixels[22]);
NLMEANS_SORT(pixels[4], pixels[22]); NLMEANS_SORT(pixels[4], pixels[13]); NLMEANS_SORT(pixels[14], pixels[23]);
NLMEANS_SORT(pixels[5], pixels[23]); NLMEANS_SORT(pixels[5], pixels[14]); NLMEANS_SORT(pixels[15], pixels[24]);
NLMEANS_SORT(pixels[6], pixels[24]); NLMEANS_SORT(pixels[6], pixels[15]); NLMEANS_SORT(pixels[7], pixels[16]);
NLMEANS_SORT(pixels[7], pixels[19]); NLMEANS_SORT(pixels[13], pixels[21]); NLMEANS_SORT(pixels[15], pixels[23]);
NLMEANS_SORT(pixels[7], pixels[13]); NLMEANS_SORT(pixels[7], pixels[15]); NLMEANS_SORT(pixels[1], pixels[9]);
NLMEANS_SORT(pixels[3], pixels[11]); NLMEANS_SORT(pixels[5], pixels[17]); NLMEANS_SORT(pixels[11], pixels[17]);
NLMEANS_SORT(pixels[9], pixels[17]); NLMEANS_SORT(pixels[4], pixels[10]); NLMEANS_SORT(pixels[6], pixels[12]);
NLMEANS_SORT(pixels[7], pixels[14]); NLMEANS_SORT(pixels[4], pixels[6]); NLMEANS_SORT(pixels[4], pixels[7]);
NLMEANS_SORT(pixels[12], pixels[14]); NLMEANS_SORT(pixels[10], pixels[14]); NLMEANS_SORT(pixels[6], pixels[7]);
NLMEANS_SORT(pixels[10], pixels[12]); NLMEANS_SORT(pixels[6], pixels[10]); NLMEANS_SORT(pixels[6], pixels[17]);
NLMEANS_SORT(pixels[12], pixels[17]); NLMEANS_SORT(pixels[7], pixels[17]); NLMEANS_SORT(pixels[7], pixels[10]);
NLMEANS_SORT(pixels[12], pixels[18]); NLMEANS_SORT(pixels[7], pixels[12]); NLMEANS_SORT(pixels[10], pixels[18]);
NLMEANS_SORT(pixels[12], pixels[20]); NLMEANS_SORT(pixels[10], pixels[20]); NLMEANS_SORT(pixels[10], pixels[12]);
return pixels[12];
}
// Network for size not implemented
return pixels[(int)((size * size)/2)];
}
static void nlmeans_filter_median(uint8_t *src,
uint8_t *dst,
int w,
int h,
int border,
int size)
{
// Median filter
int bw = w + 2 * border;
int offset_min = -((size - 1) /2);
int offset_max = (size + 1) /2;
int index;
uint8_t pixels[size * size];
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
index = 0;
for (int k = offset_min; k < offset_max; k++)
{
for (int j = offset_min; j < offset_max; j++)
{
pixels[index] = *(src + bw*(y+j) + (x+k));
index++;
}
}
*(dst + bw*y + x) = nlmeans_filter_median_opt(pixels, size);
}
}
}
static void nlmeans_filter_edgeboost(uint8_t *src,
uint8_t *dst,
int w,
int h,
int border)
{
int bw = w + 2 * border;
int bh = h + 2 * border;
// Custom kernel
int kernel_size = 3;
int kernel[3][3] = {{-31, 0, 31},
{-44, 0, 44},
{-31, 0, 31}};
double kernel_coef = 1.0 / 126.42;
// Detect edges
int offset_min = -((kernel_size - 1) /2);
int offset_max = (kernel_size + 1) /2;
uint16_t pixel1;
uint16_t pixel2;
uint8_t *mask_mem = calloc(bw * bh, sizeof(uint8_t));
uint8_t *mask = mask_mem + border + bw * border;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
pixel1 = 0;
pixel2 = 0;
for (int k = offset_min; k < offset_max; k++)
{
for (int j = offset_min; j < offset_max; j++)
{
pixel1 += kernel[j+1][k+1] * *(src + bw*(y+j) + (x+k));
pixel2 += kernel[k+1][j+1] * *(src + bw*(y+j) + (x+k));
}
}
pixel1 = pixel1 > 0 ? pixel1 : -pixel1;
pixel2 = pixel2 > 0 ? pixel2 : -pixel2;
pixel1 = (uint16_t)(((double)pixel1 * kernel_coef) + 128);
pixel2 = (uint16_t)(((double)pixel2 * kernel_coef) + 128);
*(mask + bw*y + x) = (uint8_t)(pixel1 + pixel2);
if (*(mask + bw*y + x) > 160)
{
*(mask + bw*y + x) = 235;
}
else if (*(mask + bw*y + x) > 16)
{
*(mask + bw*y + x) = 128;
}
else
{
*(mask + bw*y + x) = 16;
}
}
}
// Post-process and output
int pixels;
for (int y = 0; y < h; y++)
{
for (int x = 0; x < w; x++)
{
if (*(mask + bw*y + x) > 16)
{
// Count nearby edge pixels
pixels = 0;
for (int k = offset_min; k < offset_max; k++)
{
for (int j = offset_min; j < offset_max; j++)
{
if (*(mask + bw*(y+j) + (x+k)) > 16)
{
pixels++;
}
}
}
// Remove false positive
if (pixels < 3)
{
*(mask + bw*y + x) = 16;
}
// Filter output
if (*(mask + bw*y + x) > 16)
{
if (*(mask + bw*y + x) == 235)
{
*(dst + bw*y + x) = (3 * *(src + bw*y + x) + 1 * *(dst + bw*y + x)) /4;
}
else
{
*(dst + bw*y + x) = (2 * *(src + bw*y + x) + 3 * *(dst + bw*y + x)) /5;
}
//*(dst + bw*y + x) = *(mask + bw*y + x); // Overlay mask
}
}
//*(dst + bw*y + x) = *(mask + bw*y + x); // Full mask
}
}
free(mask_mem);
}
static void nlmeans_prefilter(BorderedPlane *src,
int filter_type)
{
hb_lock(src->mutex);
if (src->prefiltered)
{
hb_unlock(src->mutex);
return;
}
if (filter_type & NLMEANS_PREFILTER_MODE_MEAN3X3 ||
filter_type & NLMEANS_PREFILTER_MODE_MEAN5X5 ||
filter_type & NLMEANS_PREFILTER_MODE_MEDIAN3X3 ||
filter_type & NLMEANS_PREFILTER_MODE_MEDIAN5X5)
{
// Source image
uint8_t *mem = src->mem;
uint8_t *image = src->image;
int border = src->border;
int w = src->w;
int h = src->h;
int bw = w + 2 * border;
int bh = h + 2 * border;
// Duplicate plane
uint8_t *mem_pre = malloc(bw * bh * sizeof(uint8_t));
uint8_t *image_pre = mem_pre + border + bw * border;
for (int y = 0; y < h; y++)
{
memcpy(mem_pre + y * bw, mem + y * bw, bw);
}
// Filter plane; should already have at least 2px extra border on each side
if (filter_type & NLMEANS_PREFILTER_MODE_MEDIAN5X5)
{
// Median 5x5
nlmeans_filter_median(image, image_pre, w, h, border, 5);
}
else if (filter_type & NLMEANS_PREFILTER_MODE_MEDIAN3X3)
{
// Median 3x3
nlmeans_filter_median(image, image_pre, w, h, border, 3);
}
else if (filter_type & NLMEANS_PREFILTER_MODE_MEAN5X5)
{
// Mean 5x5
nlmeans_filter_mean(image, image_pre, w, h, border, 5);
}
else if (filter_type & NLMEANS_PREFILTER_MODE_MEAN3X3)
{
// Mean 3x3
nlmeans_filter_mean(image, image_pre, w, h, border, 3);
}
// Restore edges
if (filter_type & NLMEANS_PREFILTER_MODE_EDGEBOOST)
{
nlmeans_filter_edgeboost(image, image_pre, w, h, border);
}
// Blend source and destination for lesser effect
int wet = 1;
int dry = 0;
if (filter_type & NLMEANS_PREFILTER_MODE_REDUCE50 &&
filter_type & NLMEANS_PREFILTER_MODE_REDUCE25)
{
wet = 1;
dry = 3;
}
else if (filter_type & NLMEANS_PREFILTER_MODE_REDUCE50)
{
wet = 1;
dry = 1;
}
else if (filter_type & NLMEANS_PREFILTER_MODE_REDUCE25)
{
wet = 3;
dry = 1;
}
if (dry > 0)
{
for (int y = 0; y < bh; y++)
{
for (int x = 0; x < bw; x++)
{
*(mem_pre + bw*y + x) = (uint8_t)((wet * *(mem_pre + bw*y + x) + dry * *(mem + bw*y + x)) / (wet + dry));
}
}
}
// Assign result
src->mem_pre = mem_pre;
src->image_pre = image_pre;
// Recreate borders
nlmeans_border(mem_pre, w, h, border);
}
src->prefiltered = 1;
hb_unlock(src->mutex);
}
static void nlmeans_plane(Frame *frame,
int prefilter,
int plane,
int nframes,
uint8_t *dst,
int w,
int s,
int h,
double h_param,
double origin_tune,
int n,
int r)
{
int n_half = (n-1) /2;
int r_half = (r-1) /2;
// Source image
uint8_t *src = frame[0].plane[plane].image;
uint8_t *src_pre = frame[0].plane[plane].image_pre;
int border = frame[0].plane[plane].border;
int src_w = frame[0].plane[plane].w + 2 * border;
// Allocate temporary pixel sums
struct PixelSum *tmp_data = calloc(w * h, sizeof(struct PixelSum));
// Allocate integral image
int integral_stride = w + 2 * 16;
uint32_t *integral_mem = malloc(integral_stride * (h+1) * sizeof(uint32_t));
uint32_t *integral = integral_mem + integral_stride + 16;
// Precompute exponential table
float exptable[NLMEANS_EXPSIZE];
const float weight_factor = 1.0/n/n / (h_param * h_param);
const float min_weight_in_table = 0.0005;
const float stretch = NLMEANS_EXPSIZE / (-log(min_weight_in_table));
const float weight_fact_table = weight_factor * stretch;
const int diff_max = NLMEANS_EXPSIZE / weight_fact_table;
for (int i = 0; i < NLMEANS_EXPSIZE; i++)
{
exptable[i] = exp(-i/stretch);
}
exptable[NLMEANS_EXPSIZE-1] = 0;
// Iterate through available frames
for (int f = 0; f < nframes; f++)
{
nlmeans_prefilter(&frame[f].plane[plane], prefilter);
// Compare image
uint8_t *compare = frame[f].plane[plane].image;
uint8_t *compare_pre = frame[f].plane[plane].image_pre;
int border = frame[f].plane[plane].border;
int compare_w = frame[f].plane[plane].w + 2 * border;
// Iterate through all displacements
for (int dy = -r_half; dy <= r_half; dy++)
{
for (int dx = -r_half; dx <= r_half; dx++)
{
// Apply special weight tuning to origin patch
if (dx == 0 && dy == 0 && f == 0)
{
// TODO: Parallelize this
for (int y = n_half; y < h-n + n_half; y++)
{
for (int x = n_half; x < w-n + n_half; x++)
{
tmp_data[y*w + x].weight_sum += origin_tune;
tmp_data[y*w + x].pixel_sum += origin_tune * src[y*src_w + x];
}
}
continue;
}
// Build integral
memset(integral-1 - integral_stride, 0, (w+1) * sizeof(uint32_t));
for (int y = 0; y < h; y++)
{
const uint8_t *p1 = src_pre + y*src_w;
const uint8_t *p2 = compare_pre + (y+dy)*compare_w + dx;
uint32_t *out = integral + (y*integral_stride) - 1;
*out++ = 0;
for (int x = 0; x < w; x++)
{
int diff = *p1++ - *p2++;
*out = *(out-1) + diff * diff;
out++;
}
if (y > 0)
{
out = integral + y*integral_stride;
for (int x = 0; x < w; x++)
{
*out += *(out - integral_stride);
out++;
}
}
}
// Average displacement
// TODO: Parallelize this
for (int y = 0; y <= h-n; y++)
{
const uint32_t *integral_ptr1 = integral + (y -1)*integral_stride - 1;
const uint32_t *integral_ptr2 = integral + (y+n-1)*integral_stride - 1;
for (int x = 0; x <= w-n; x++)
{
int xc = x + n_half;
int yc = y + n_half;
// Difference between patches
int diff = (uint32_t)(integral_ptr2[n] - integral_ptr2[0] - integral_ptr1[n] + integral_ptr1[0]);
// Sum pixel with weight
if (diff < diff_max)
{
int diffidx = diff * weight_fact_table;
//float weight = exp(-diff*weightFact);
float weight = exptable[diffidx];
tmp_data[yc*w + xc].weight_sum += weight;
tmp_data[yc*w + xc].pixel_sum += weight * compare[(yc+dy)*compare_w + xc + dx];
}
integral_ptr1++;
integral_ptr2++;
}
}
}
}
}
// Copy edges
for (int y = 0; y < h; y++)
{
for (int x = 0; x < n_half; x++)
{
*(dst + y * s + x) = *(src + y * src_w - x - 1);
*(dst + y * s - x + (w - 1)) = *(src + y * src_w + x + w);
}
}
for (int y = 0; y < n_half; y++)
{
memcpy(dst + y*s, src - (y+1)*src_w, w);
memcpy(dst + (h-y-1)*s, src + (y+h)*src_w, w);
}
// Copy main image
uint8_t result;
for (int y = n_half; y < h-n_half; y++)
{
for (int x = n_half; x < w-n_half; x++)
{
result = (uint8_t)(tmp_data[y*w + x].pixel_sum / tmp_data[y*w + x].weight_sum);
*(dst + y*s + x) = result ? result : *(src + y*src_w + x);
}
}
free(tmp_data);
free(integral_mem);
}
static int nlmeans_init(hb_filter_object_t *filter,
hb_filter_init_t *init)
{
filter->private_data = calloc(sizeof(struct hb_filter_private_s), 1);
hb_filter_private_t *pv = filter->private_data;
// Mark parameters unset
for (int c = 0; c < 3; c++)
{
pv->strength[c] = -1;
pv->origin_tune[c] = -1;
pv->patch_size[c] = -1;
pv->range[c] = -1;
pv->nframes[c] = -1;
pv->prefilter[c] = -1;
}
// Read user parameters
if (filter->settings != NULL)
{
sscanf(filter->settings, "%lf:%lf:%d:%d:%d:%d:%lf:%lf:%d:%d:%d:%d:%lf:%lf:%d:%d:%d:%d",
&pv->strength[0], &pv->origin_tune[0], &pv->patch_size[0], &pv->range[0], &pv->nframes[0], &pv->prefilter[0],
&pv->strength[1], &pv->origin_tune[1], &pv->patch_size[1], &pv->range[1], &pv->nframes[1], &pv->prefilter[1],
&pv->strength[2], &pv->origin_tune[2], &pv->patch_size[2], &pv->range[2], &pv->nframes[2], &pv->prefilter[2]);
}
// Cascade values
// Cr not set; inherit Cb. Cb not set; inherit Y. Y not set; defaults.
for (int c = 1; c < 3; c++)
{
if (pv->strength[c] == -1) { pv->strength[c] = pv->strength[c-1]; }
if (pv->origin_tune[c] == -1) { pv->origin_tune[c] = pv->origin_tune[c-1]; }
if (pv->patch_size[c] == -1) { pv->patch_size[c] = pv->patch_size[c-1]; }
if (pv->range[c] == -1) { pv->range[c] = pv->range[c-1]; }
if (pv->nframes[c] == -1) { pv->nframes[c] = pv->nframes[c-1]; }
if (pv->prefilter[c] == -1) { pv->prefilter[c] = pv->prefilter[c-1]; }
}
for (int c = 0; c < 3; c++)
{
// Replace unset values with defaults
if (pv->strength[c] == -1) { pv->strength[c] = c ? NLMEANS_STRENGTH_LUMA_DEFAULT : NLMEANS_STRENGTH_CHROMA_DEFAULT; }
if (pv->origin_tune[c] == -1) { pv->origin_tune[c] = c ? NLMEANS_ORIGIN_TUNE_LUMA_DEFAULT : NLMEANS_ORIGIN_TUNE_CHROMA_DEFAULT; }
if (pv->patch_size[c] == -1) { pv->patch_size[c] = c ? NLMEANS_PATCH_SIZE_LUMA_DEFAULT : NLMEANS_PATCH_SIZE_CHROMA_DEFAULT; }
if (pv->range[c] == -1) { pv->range[c] = c ? NLMEANS_RANGE_LUMA_DEFAULT : NLMEANS_RANGE_CHROMA_DEFAULT; }
if (pv->nframes[c] == -1) { pv->nframes[c] = c ? NLMEANS_FRAMES_LUMA_DEFAULT : NLMEANS_FRAMES_CHROMA_DEFAULT; }
if (pv->prefilter[c] == -1) { pv->prefilter[c] = c ? NLMEANS_PREFILTER_LUMA_DEFAULT : NLMEANS_PREFILTER_CHROMA_DEFAULT; }
// Sanitize
if (pv->strength[c] < 0) { pv->strength[c] = 0; }
if (pv->origin_tune[c] < 0.01) { pv->origin_tune[c] = 0.01; } // avoid black artifacts
if (pv->origin_tune[c] > 1) { pv->origin_tune[c] = 1; }
if (pv->patch_size[c] % 2 == 0) { pv->patch_size[c]--; }
if (pv->patch_size[c] < 1) { pv->patch_size[c] = 1; }
if (pv->range[c] % 2 == 0) { pv->range[c]--; }
if (pv->range[c] < 1) { pv->range[c] = 1; }
if (pv->nframes[c] < 1) { pv->nframes[c] = 1; }
if (pv->nframes[c] > NLMEANS_FRAMES_MAX) { pv->nframes[c] = NLMEANS_FRAMES_MAX; }
if (pv->prefilter[c] < 0) { pv->prefilter[c] = 0; }
if (pv->max_frames < pv->nframes[c]) pv->max_frames = pv->nframes[c];
}
pv->thread_count = hb_get_cpu_count();
pv->frame = calloc(pv->thread_count + pv->max_frames, sizeof(Frame));
for (int ii = 0; ii < pv->thread_count + pv->max_frames; ii++)
{
for (int c = 0; c < 3; c++)
{
pv->frame[ii].plane[c].mutex = hb_lock_init();
}
}
pv->thread_data = malloc(pv->thread_count * sizeof(nlmeans_thread_arg_t*));
if (taskset_init(&pv->taskset, pv->thread_count,
sizeof(nlmeans_thread_arg_t)) == 0)
{
hb_error("nlmeans could not initialize taskset");
goto fail;
}
for (int ii = 0; ii < pv->thread_count; ii++)
{
pv->thread_data[ii] = taskset_thread_args(&pv->taskset, ii);
if (pv->thread_data[ii] == NULL)
{
hb_error("nlmeans could not create thread args");
goto fail;
}
pv->thread_data[ii]->pv = pv;
pv->thread_data[ii]->segment = ii;
if (taskset_thread_spawn(&pv->taskset, ii, "nlmeans_filter",
nlmeans_filter_thread, HB_NORMAL_PRIORITY) == 0)
{
hb_error("nlmeans could not spawn thread");
goto fail;
}
}
return 0;
fail:
taskset_fini(&pv->taskset);
free(pv->thread_data);
free(pv);
return -1;
}
static void nlmeans_close(hb_filter_object_t *filter)
{
hb_filter_private_t *pv = filter->private_data;
if (pv == NULL)
{
return;
}
taskset_fini(&pv->taskset);
for (int c = 0; c < 3; c++)
{
for (int f = 0; f < pv->nframes[c]; f++)
{
if (pv->frame[f].plane[c].mem_pre != NULL &&
pv->frame[f].plane[c].mem_pre != pv->frame[f].plane[c].mem)
{
free(pv->frame[f].plane[c].mem_pre);
pv->frame[f].plane[c].mem_pre = NULL;
}
if (pv->frame[f].plane[c].mem != NULL)
{
free(pv->frame[f].plane[c].mem);
pv->frame[f].plane[c].mem = NULL;
}
}
}
for (int ii = 0; ii < pv->thread_count + pv->max_frames; ii++)
{
for (int c = 0; c < 3; c++)
{
hb_lock_close(&pv->frame[ii].plane[c].mutex);
}
}
free(pv->frame);
free(pv->thread_data);
free(pv);
filter->private_data = NULL;
}
static void nlmeans_filter_thread(void *thread_args_v)
{
nlmeans_thread_arg_t *thread_data = thread_args_v;
hb_filter_private_t *pv = thread_data->pv;
int segment = thread_data->segment;
hb_log("NLMeans Denoise thread started for segment %d", segment);
while (1)
{
// Wait until there is work to do.
taskset_thread_wait4start(&pv->taskset, segment);
if (taskset_thread_stop(&pv->taskset, segment))
{
break;
}
Frame *frame = &pv->frame[segment];
hb_buffer_t *buf;
buf = hb_frame_buffer_init(frame->fmt, frame->width, frame->height);
for (int c = 0; c < 3; c++)
{
if (pv->strength[c] == 0)
{
nlmeans_deborder(&frame->plane[c], buf->plane[c].data,
buf->plane[c].width, buf->plane[c].stride,
buf->plane[c].height);
continue;
}
if (pv->prefilter[c] & NLMEANS_PREFILTER_MODE_PASSTHRU)
{
nlmeans_prefilter(&pv->frame->plane[c], pv->prefilter[c]);
nlmeans_deborder(&frame->plane[c], buf->plane[c].data,
buf->plane[c].width, buf->plane[c].stride,
buf->plane[c].height);
continue;
}
// Process current plane
nlmeans_plane(frame,
pv->prefilter[c],
c,
pv->nframes[c],
buf->plane[c].data,
buf->plane[c].width,
buf->plane[c].stride,
buf->plane[c].height,
pv->strength[c],
pv->origin_tune[c],
pv->patch_size[c],
pv->range[c]);
}
buf->s = pv->frame[segment].s;
thread_data->out = buf;
// Finished this segment, notify.
taskset_thread_complete(&pv->taskset, segment);
}
taskset_thread_complete(&pv->taskset, segment);
}
static void nlmeans_add_frame(hb_filter_private_t *pv, hb_buffer_t *buf)
{
for (int c = 0; c < 3; c++)
{
// Extend copy of plane with extra border and place in buffer
int border = ((pv->range[c] + 2) / 2 + 15) / 16 * 16;
nlmeans_alloc(buf->plane[c].data,
buf->plane[c].width,
buf->plane[c].stride,
buf->plane[c].height,
&pv->frame[pv->next_frame].plane[c],
border);
pv->frame[pv->next_frame].s = buf->s;
pv->frame[pv->next_frame].width = buf->f.width;
pv->frame[pv->next_frame].height = buf->f.height;
pv->frame[pv->next_frame].fmt = buf->f.fmt;
}
pv->next_frame++;
}
static hb_buffer_t * nlmeans_filter(hb_filter_private_t *pv)
{
if (pv->next_frame < pv->max_frames + pv->thread_count)
return NULL;
taskset_cycle(&pv->taskset);
// Free buffers that are not needed for next taskset cycle
for (int c = 0; c < 3; c++)
{
for (int t = 0; t < pv->thread_count; t++)
{
// Release last frame in buffer
if (pv->frame[t].plane[c].mem_pre != NULL &&
pv->frame[t].plane[c].mem_pre != pv->frame[t].plane[c].mem)
{
free(pv->frame[t].plane[c].mem_pre);
pv->frame[t].plane[c].mem_pre = NULL;
}
if (pv->frame[t].plane[c].mem != NULL)
{
free(pv->frame[t].plane[c].mem);
pv->frame[t].plane[c].mem = NULL;
}
}
}
// Shift frames in buffer down
for (int f = 0; f < pv->max_frames; f++)
{
// Don't move the mutex!
Frame frame = pv->frame[f];
pv->frame[f] = pv->frame[f+pv->thread_count];
for (int c = 0; c < 3; c++)
{
pv->frame[f].plane[c].mutex = frame.plane[c].mutex;
pv->frame[f+pv->thread_count].plane[c].mem_pre = NULL;
pv->frame[f+pv->thread_count].plane[c].mem = NULL;
}
}
pv->next_frame -= pv->thread_count;
// Collect results from taskset
hb_buffer_t *last = NULL, *out = NULL;
for (int t = 0; t < pv->thread_count; t++)
{
if (out == NULL)
{
out = last = pv->thread_data[t]->out;
}
else
{
last->next = pv->thread_data[t]->out;
last = pv->thread_data[t]->out;
}
}
return out;
}
static hb_buffer_t * nlmeans_filter_flush(hb_filter_private_t *pv)
{
hb_buffer_t *out = NULL, *last = NULL;
for (int f = 0; f < pv->next_frame; f++)
{
Frame *frame = &pv->frame[f];
hb_buffer_t *buf;
buf = hb_frame_buffer_init(frame->fmt, frame->width, frame->height);
for (int c = 0; c < 3; c++)
{
if (pv->strength[c] == 0)
{
nlmeans_deborder(&frame->plane[c], buf->plane[c].data,
buf->plane[c].width, buf->plane[c].stride,
buf->plane[c].height);
continue;
}
if (pv->prefilter[c] & NLMEANS_PREFILTER_MODE_PASSTHRU)
{
nlmeans_prefilter(&pv->frame[f].plane[c], pv->prefilter[c]);
nlmeans_deborder(&frame->plane[c], buf->plane[c].data,
buf->plane[c].width, buf->plane[c].stride,
buf->plane[c].height);
continue;
}
int nframes = pv->next_frame - f;
if (pv->nframes[c] < nframes)
nframes = pv->nframes[c];
// Process current plane
nlmeans_plane(frame,
pv->prefilter[c],
c,
nframes,
buf->plane[c].data,
buf->plane[c].width,
buf->plane[c].stride,
buf->plane[c].height,
pv->strength[c],
pv->origin_tune[c],
pv->patch_size[c],
pv->range[c]);
}
buf->s = frame->s;
if (out == NULL)
{
out = last = buf;
}
else
{
last->next = buf;
last = buf;
}
}
return out;
}
static int nlmeans_work(hb_filter_object_t *filter,
hb_buffer_t **buf_in,
hb_buffer_t **buf_out )
{
hb_filter_private_t *pv = filter->private_data;
hb_buffer_t *in = *buf_in;
if (in->size <= 0)
{
hb_buffer_t *last;
// Flush buffered frames
last = *buf_out = nlmeans_filter_flush(pv);
// And terminate the buffer list with a null buffer
if (last != NULL)
{
while (last->next != NULL)
last = last->next;
last->next = in;
}
else
{
*buf_out = in;
}
*buf_in = NULL;
return HB_FILTER_DONE;
}
nlmeans_add_frame(pv, in);
*buf_out = nlmeans_filter(pv);
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/enctheora.c 0000664 0001752 0001752 00000027404 12531124076 017243 0 ustar handbrake handbrake /* enctheora.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "theora/codec.h"
#include "theora/theoraenc.h"
int enctheoraInit( hb_work_object_t *, hb_job_t * );
int enctheoraWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
void enctheoraClose( hb_work_object_t * );
hb_work_object_t hb_enctheora =
{
WORK_ENCTHEORA,
"Theora encoder (libtheora)",
enctheoraInit,
enctheoraWork,
enctheoraClose
};
struct hb_work_private_s
{
hb_job_t * job;
th_enc_ctx * ctx;
FILE * file;
unsigned char stat_buf[80];
int stat_read;
int stat_fill;
};
int enctheoraInit( hb_work_object_t * w, hb_job_t * job )
{
int keyframe_frequency, log_keyframe, ret;
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
if( job->pass != 0 && job->pass != -1 )
{
char filename[1024];
memset( filename, 0, 1024 );
hb_get_tempory_filename( job->h, filename, "theroa.log" );
if ( job->pass == 1 )
{
pv->file = hb_fopen(filename, "wb");
}
else
{
pv->file = hb_fopen(filename, "rb");
}
}
th_info ti;
th_comment tc;
ogg_packet op;
th_info_init( &ti );
/* Frame width and height need to be multiples of 16 */
ti.pic_width = job->width;
ti.pic_height = job->height;
ti.frame_width = (job->width + 0xf) & ~0xf;
ti.frame_height = (job->height + 0xf) & ~0xf;
ti.pic_x = ti.pic_y = 0;
if( job->pass == 2 )
{
hb_interjob_t * interjob = hb_interjob_get( job->h );
ti.fps_numerator = interjob->vrate;
ti.fps_denominator = interjob->vrate_base;
}
else
{
ti.fps_numerator = job->vrate;
ti.fps_denominator = job->vrate_base;
}
if( job->anamorphic.mode )
{
ti.aspect_numerator = job->anamorphic.par_width;
ti.aspect_denominator = job->anamorphic.par_height;
}
else
{
ti.aspect_numerator = ti.aspect_denominator = 1;
}
ti.colorspace = TH_CS_UNSPECIFIED;
ti.pixel_fmt = TH_PF_420;
if (job->vquality < 0.0)
{
ti.target_bitrate = job->vbitrate * 1000;
ti.quality = 0;
}
else
{
ti.target_bitrate = 0;
ti.quality = job->vquality;
}
keyframe_frequency = 10 * (int)( (double)job->vrate / (double)job->vrate_base + 0.5 );
hb_log("theora: keyint: %i", keyframe_frequency);
int tmp = keyframe_frequency - 1;
for (log_keyframe = 0; tmp; log_keyframe++)
tmp >>= 1;
ti.keyframe_granule_shift = log_keyframe;
pv->ctx = th_encode_alloc( &ti );
th_info_clear( &ti );
ret = th_encode_ctl(pv->ctx, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
&keyframe_frequency, sizeof(keyframe_frequency));
if( ret < 0 )
{
hb_log("theora: Could not set keyframe interval to %d", keyframe_frequency);
}
/* Set "soft target" rate control which improves quality at the
* expense of solid bitrate caps */
int arg = TH_RATECTL_CAP_UNDERFLOW;
ret = th_encode_ctl(pv->ctx, TH_ENCCTL_SET_RATE_FLAGS, &arg, sizeof(arg));
if( ret < 0 )
{
hb_log("theora: Could not set soft ratecontrol");
}
if( job->pass != 0 && job->pass != -1 )
{
arg = keyframe_frequency * 7 >> 1;
ret = th_encode_ctl(pv->ctx, TH_ENCCTL_SET_RATE_BUFFER, &arg, sizeof(arg));
if( ret < 0 )
{
hb_log("theora: Could not set rate control buffer");
}
}
if( job->pass == 1 )
{
unsigned char *buffer;
int bytes;
bytes = th_encode_ctl(pv->ctx, TH_ENCCTL_2PASS_OUT, &buffer, sizeof(buffer));
if( bytes < 0 )
{
hb_error("Could not set up the first pass of two-pass mode.\n");
hb_error("Did you remember to specify an estimated bitrate?\n");
return 1;
}
if( fwrite( buffer, 1, bytes, pv->file ) < bytes )
{
hb_error("Unable to write to two-pass data file.\n");
return 1;
}
fflush( pv->file );
}
if( job->pass == 2 )
{
/* Enable the second pass here.
* We make this call just to set the encoder into 2-pass mode, because
* by default enabling two-pass sets the buffer delay to the whole file
* (because there's no way to explicitly request that behavior).
* If we waited until we were actually encoding, it would overwite our
* settings.*/
hb_log("enctheora: init 2nd pass");
if( th_encode_ctl( pv->ctx, TH_ENCCTL_2PASS_IN, NULL, 0) < 0)
{
hb_log("theora: Could not set up the second pass of two-pass mode.");
return 1;
}
}
th_comment_init( &tc );
ogg_packet *header;
int ii;
for (ii = 0; ii < 3; ii++)
{
th_encode_flushheader( pv->ctx, &tc, &op );
header = (ogg_packet*)w->config->theora.headers[ii];
memcpy(header, &op, sizeof(op));
header->packet = w->config->theora.headers[ii] + sizeof(ogg_packet);
memcpy(header->packet, op.packet, op.bytes );
}
th_comment_clear( &tc );
return 0;
}
/***********************************************************************
* Close
***********************************************************************
*
**********************************************************************/
void enctheoraClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
th_encode_free( pv->ctx );
if( pv->file )
{
fclose( pv->file );
}
free( pv );
w->private_data = NULL;
}
/***********************************************************************
* Work
***********************************************************************
*
**********************************************************************/
int enctheoraWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
hb_buffer_t * in = *buf_in, * buf;
th_ycbcr_buffer ycbcr;
ogg_packet op;
int frame_width, frame_height;
if ( in->size <= 0 )
{
// EOF on input - send it downstream & say we're done.
// XXX may need to flush packets via a call to
// th_encode_packetout( pv->ctx, 1, &op );
// but we don't have a timestamp to put on those packets so we
// drop them for now.
*buf_out = in;
*buf_in = NULL;
th_encode_packetout( pv->ctx, 1, &op );
if( job->pass == 1 )
{
unsigned char *buffer;
int bytes;
bytes = th_encode_ctl(pv->ctx, TH_ENCCTL_2PASS_OUT,
&buffer, sizeof(buffer));
if( bytes < 0 )
{
fprintf(stderr,"Could not read two-pass data from encoder.\n");
return HB_WORK_DONE;
}
fseek( pv->file, 0, SEEK_SET );
if( fwrite( buffer, 1, bytes, pv->file ) < bytes)
{
fprintf(stderr,"Unable to write to two-pass data file.\n");
return HB_WORK_DONE;
}
fflush( pv->file );
}
return HB_WORK_DONE;
}
if( job->pass == 2 )
{
for(;;)
{
int bytes, size, ret;
/*Ask the encoder how many bytes it would like.*/
bytes = th_encode_ctl( pv->ctx, TH_ENCCTL_2PASS_IN, NULL, 0 );
if( bytes < 0 )
{
hb_error("Error requesting stats size in second pass.");
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return HB_WORK_DONE;
}
/*If it's got enough, stop.*/
if( bytes == 0 ) break;
/*Read in some more bytes, if necessary.*/
if( bytes > pv->stat_fill - pv->stat_read )
size = bytes - (pv->stat_fill - pv->stat_read);
else
size = 0;
if( size > 80 - pv->stat_fill )
size = 80 - pv->stat_fill;
if( size > 0 &&
fread( pv->stat_buf+pv->stat_fill, 1, size, pv->file ) < size )
{
hb_error("Could not read frame data from two-pass data file!");
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return HB_WORK_DONE;
}
pv->stat_fill += size;
/*And pass them off.*/
if( bytes > pv->stat_fill - pv->stat_read )
bytes = pv->stat_fill - pv->stat_read;
ret = th_encode_ctl( pv->ctx, TH_ENCCTL_2PASS_IN,
pv->stat_buf+pv->stat_read, bytes);
if( ret < 0 )
{
hb_error("Error submitting pass data in second pass.");
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return HB_WORK_DONE;
}
/*If the encoder consumed the whole buffer, reset it.*/
if( ret >= pv->stat_fill - pv->stat_read )
pv->stat_read = pv->stat_fill = 0;
/*Otherwise remember how much it used.*/
else
pv->stat_read += ret;
}
}
memset(&op, 0, sizeof(op));
memset(&ycbcr, 0, sizeof(ycbcr));
frame_width = (job->width + 0xf) & ~0xf;
frame_height = (job->height + 0xf) & ~0xf;
// Y
ycbcr[0].width = frame_width;
ycbcr[0].height = frame_height;
// CbCr decimated by factor of 2 in both width and height
ycbcr[1].width = ycbcr[2].width = (frame_width + 1) / 2;
ycbcr[1].height = ycbcr[2].height = (frame_height + 1) / 2;
ycbcr[0].stride = in->plane[0].stride;
ycbcr[1].stride = in->plane[1].stride;
ycbcr[2].stride = in->plane[2].stride;
ycbcr[0].data = in->plane[0].data;
ycbcr[1].data = in->plane[1].data;
ycbcr[2].data = in->plane[2].data;
th_encode_ycbcr_in( pv->ctx, ycbcr );
if( job->pass == 1 )
{
unsigned char *buffer;
int bytes;
bytes = th_encode_ctl(pv->ctx, TH_ENCCTL_2PASS_OUT,
&buffer, sizeof(buffer));
if( bytes < 0 )
{
fprintf(stderr,"Could not read two-pass data from encoder.\n");
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return HB_WORK_DONE;
}
if( fwrite( buffer, 1, bytes, pv->file ) < bytes)
{
fprintf(stderr,"Unable to write to two-pass data file.\n");
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return HB_WORK_DONE;
}
fflush( pv->file );
}
th_encode_packetout( pv->ctx, 0, &op );
// Theora can generate 0 length output for duplicate frames.
// Since we use 0 length buffers to indicate end of stream, we
// can't allow 0 lenth buffers.
//
// As a work-around, always allocate an extra byte for theora buffers.
//
// This is fixed correctly in svn trunk by using a end of stream flag
// instead of 0 length buffer.
buf = hb_buffer_init(op.bytes + 1);
memcpy(buf->data, op.packet, op.bytes);
buf->f.fmt = AV_PIX_FMT_YUV420P;
buf->f.width = frame_width;
buf->f.height = frame_height;
buf->s.frametype = ( th_packet_iskeyframe(&op) ) ? HB_FRAME_KEY : HB_FRAME_REF;
buf->s.start = in->s.start;
buf->s.stop = in->s.stop;
buf->s.duration = in->s.stop - in->s.start;
*buf_out = buf;
return HB_WORK_OK;
}
HandBrake-0.10.2/libhb/opencl.h 0000664 0001752 0001752 00000064723 12463330511 016562 0 ustar handbrake handbrake /* opencl.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_OPENCL_H
#define HB_OPENCL_H
#include "extras/cl.h"
#include "openclwrapper.h"
// we only support OpenCL 1.1 or later
#define HB_OCL_MINVERSION_MAJOR 1
#define HB_OCL_MINVERSION_MINOR 1
#define HB_OCL_FUNC_TYPE(name) hb_opencl_##name##_func
#define HB_OCL_FUNC_DECL(name) HB_OCL_FUNC_TYPE(name) name
#define HB_OCL_API(ret, attr, name) typedef ret (attr* HB_OCL_FUNC_TYPE(name))
#ifdef __APPLE__
#pragma mark -
#pragma mark OpenCL API
#endif // __APPLE__
/* Platform API */
HB_OCL_API(cl_int, CL_API_CALL, clGetPlatformIDs)
(cl_uint /* num_entries */,
cl_platform_id * /* platforms */,
cl_uint * /* num_platforms */);
HB_OCL_API(cl_int, CL_API_CALL, clGetPlatformInfo)
(cl_platform_id /* platform */,
cl_platform_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
/* Device APIs */
HB_OCL_API(cl_int, CL_API_CALL, clGetDeviceIDs)
(cl_platform_id /* platform */,
cl_device_type /* device_type */,
cl_uint /* num_entries */,
cl_device_id * /* devices */,
cl_uint * /* num_devices */);
HB_OCL_API(cl_int, CL_API_CALL, clGetDeviceInfo)
(cl_device_id /* device */,
cl_device_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clCreateSubDevices)
(cl_device_id /* in_device */,
const cl_device_partition_property * /* properties */,
cl_uint /* num_devices */,
cl_device_id * /* out_devices */,
cl_uint * /* num_devices_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainDevice)
(cl_device_id /* device */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseDevice)
(cl_device_id /* device */);
/* Context APIs */
HB_OCL_API(cl_context, CL_API_CALL, clCreateContext)
(const cl_context_properties * /* properties */,
cl_uint /* num_devices */,
const cl_device_id * /* devices */,
void (CL_CALLBACK * /* pfn_notify */)(const char *, const void *, size_t, void *),
void * /* user_data */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_context, CL_API_CALL, clCreateContextFromType)
(const cl_context_properties * /* properties */,
cl_device_type /* device_type */,
void (CL_CALLBACK * /* pfn_notify*/ )(const char *, const void *, size_t, void *),
void * /* user_data */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainContext)
(cl_context /* context */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseContext)
(cl_context /* context */);
HB_OCL_API(cl_int, CL_API_CALL, clGetContextInfo)
(cl_context /* context */,
cl_context_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
/* Command Queue APIs */
HB_OCL_API(cl_command_queue, CL_API_CALL, clCreateCommandQueue)
(cl_context /* context */,
cl_device_id /* device */,
cl_command_queue_properties /* properties */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainCommandQueue)
(cl_command_queue /* command_queue */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseCommandQueue)
(cl_command_queue /* command_queue */);
HB_OCL_API(cl_int, CL_API_CALL, clGetCommandQueueInfo)
(cl_command_queue /* command_queue */,
cl_command_queue_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
/* Memory Object APIs */
HB_OCL_API(cl_mem, CL_API_CALL, clCreateBuffer)
(cl_context /* context */,
cl_mem_flags /* flags */,
size_t /* size */,
void * /* host_ptr */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_mem, CL_API_CALL, clCreateSubBuffer)
(cl_mem /* buffer */,
cl_mem_flags /* flags */,
cl_buffer_create_type /* buffer_create_type */,
const void * /* buffer_create_info */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_mem, CL_API_CALL, clCreateImage)
(cl_context /* context */,
cl_mem_flags /* flags */,
const cl_image_format * /* image_format */,
const cl_image_desc * /* image_desc */,
void * /* host_ptr */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainMemObject)
(cl_mem /* memobj */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseMemObject)
(cl_mem /* memobj */);
HB_OCL_API(cl_int, CL_API_CALL, clGetSupportedImageFormats)
(cl_context /* context */,
cl_mem_flags /* flags */,
cl_mem_object_type /* image_type */,
cl_uint /* num_entries */,
cl_image_format * /* image_formats */,
cl_uint * /* num_image_formats */);
HB_OCL_API(cl_int, CL_API_CALL, clGetMemObjectInfo)
(cl_mem /* memobj */,
cl_mem_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clGetImageInfo)
(cl_mem /* image */,
cl_image_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clSetMemObjectDestructorCallback)
(cl_mem /* memobj */,
void (CL_CALLBACK * /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
void * /*user_data */ );
/* Sampler APIs */
HB_OCL_API(cl_sampler, CL_API_CALL, clCreateSampler)
(cl_context /* context */,
cl_bool /* normalized_coords */,
cl_addressing_mode /* addressing_mode */,
cl_filter_mode /* filter_mode */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainSampler)
(cl_sampler /* sampler */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseSampler)
(cl_sampler /* sampler */);
HB_OCL_API(cl_int, CL_API_CALL, clGetSamplerInfo)
(cl_sampler /* sampler */,
cl_sampler_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
/* Program Object APIs */
HB_OCL_API(cl_program, CL_API_CALL, clCreateProgramWithSource)
(cl_context /* context */,
cl_uint /* count */,
const char ** /* strings */,
const size_t * /* lengths */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_program, CL_API_CALL, clCreateProgramWithBinary)
(cl_context /* context */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const size_t * /* lengths */,
const unsigned char ** /* binaries */,
cl_int * /* binary_status */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_program, CL_API_CALL, clCreateProgramWithBuiltInKernels)
(cl_context /* context */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* kernel_names */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainProgram)
(cl_program /* program */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseProgram)
(cl_program /* program */);
HB_OCL_API(cl_int, CL_API_CALL, clBuildProgram)
(cl_program /* program */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* options */,
void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
void * /* user_data */);
HB_OCL_API(cl_int, CL_API_CALL, clCompileProgram)
(cl_program /* program */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* options */,
cl_uint /* num_input_headers */,
const cl_program * /* input_headers */,
const char ** /* header_include_names */,
void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
void * /* user_data */);
HB_OCL_API(cl_program, CL_API_CALL, clLinkProgram)
(cl_context /* context */,
cl_uint /* num_devices */,
const cl_device_id * /* device_list */,
const char * /* options */,
cl_uint /* num_input_programs */,
const cl_program * /* input_programs */,
void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */),
void * /* user_data */,
cl_int * /* errcode_ret */ );
HB_OCL_API(cl_int, CL_API_CALL, clUnloadPlatformCompiler)
(cl_platform_id /* platform */);
HB_OCL_API(cl_int, CL_API_CALL, clGetProgramInfo)
(cl_program /* program */,
cl_program_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clGetProgramBuildInfo)
(cl_program /* program */,
cl_device_id /* device */,
cl_program_build_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
/* Kernel Object APIs */
HB_OCL_API(cl_kernel, CL_API_CALL, clCreateKernel)
(cl_program /* program */,
const char * /* kernel_name */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clCreateKernelsInProgram)
(cl_program /* program */,
cl_uint /* num_kernels */,
cl_kernel * /* kernels */,
cl_uint * /* num_kernels_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainKernel)
(cl_kernel /* kernel */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseKernel)
(cl_kernel /* kernel */);
HB_OCL_API(cl_int, CL_API_CALL, clSetKernelArg)
(cl_kernel /* kernel */,
cl_uint /* arg_index */,
size_t /* arg_size */,
const void * /* arg_value */);
HB_OCL_API(cl_int, CL_API_CALL, clGetKernelInfo)
(cl_kernel /* kernel */,
cl_kernel_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clGetKernelArgInfo)
(cl_kernel /* kernel */,
cl_uint /* arg_indx */,
cl_kernel_arg_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clGetKernelWorkGroupInfo)
(cl_kernel /* kernel */,
cl_device_id /* device */,
cl_kernel_work_group_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
/* Event Object APIs */
HB_OCL_API(cl_int, CL_API_CALL, clWaitForEvents)
(cl_uint /* num_events */,
const cl_event * /* event_list */);
HB_OCL_API(cl_int, CL_API_CALL, clGetEventInfo)
(cl_event /* event */,
cl_event_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
HB_OCL_API(cl_event, CL_API_CALL, clCreateUserEvent)
(cl_context /* context */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clRetainEvent)
(cl_event /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clReleaseEvent)
(cl_event /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clSetUserEventStatus)
(cl_event /* event */,
cl_int /* execution_status */);
HB_OCL_API(cl_int, CL_API_CALL, clSetEventCallback)
(cl_event /* event */,
cl_int /* command_exec_callback_type */,
void (CL_CALLBACK * /* pfn_notify */)(cl_event, cl_int, void *),
void * /* user_data */);
/* Profiling APIs */
HB_OCL_API(cl_int, CL_API_CALL, clGetEventProfilingInfo)
(cl_event /* event */,
cl_profiling_info /* param_name */,
size_t /* param_value_size */,
void * /* param_value */,
size_t * /* param_value_size_ret */);
/* Flush and Finish APIs */
HB_OCL_API(cl_int, CL_API_CALL, clFlush)
(cl_command_queue /* command_queue */);
HB_OCL_API(cl_int, CL_API_CALL, clFinish)
(cl_command_queue /* command_queue */);
/* Enqueued Commands APIs */
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueReadBuffer)
(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_read */,
size_t /* offset */,
size_t /* size */,
void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueReadBufferRect)
(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_read */,
const size_t * /* buffer_offset */,
const size_t * /* host_offset */,
const size_t * /* region */,
size_t /* buffer_row_pitch */,
size_t /* buffer_slice_pitch */,
size_t /* host_row_pitch */,
size_t /* host_slice_pitch */,
void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueWriteBuffer)
(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_write */,
size_t /* offset */,
size_t /* size */,
const void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueWriteBufferRect)
(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_write */,
const size_t * /* buffer_offset */,
const size_t * /* host_offset */,
const size_t * /* region */,
size_t /* buffer_row_pitch */,
size_t /* buffer_slice_pitch */,
size_t /* host_row_pitch */,
size_t /* host_slice_pitch */,
const void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueFillBuffer)
(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
const void * /* pattern */,
size_t /* pattern_size */,
size_t /* offset */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueCopyBuffer)
(cl_command_queue /* command_queue */,
cl_mem /* src_buffer */,
cl_mem /* dst_buffer */,
size_t /* src_offset */,
size_t /* dst_offset */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueCopyBufferRect)
(cl_command_queue /* command_queue */,
cl_mem /* src_buffer */,
cl_mem /* dst_buffer */,
const size_t * /* src_origin */,
const size_t * /* dst_origin */,
const size_t * /* region */,
size_t /* src_row_pitch */,
size_t /* src_slice_pitch */,
size_t /* dst_row_pitch */,
size_t /* dst_slice_pitch */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueReadImage)
(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_read */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t /* row_pitch */,
size_t /* slice_pitch */,
void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueWriteImage)
(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_write */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t /* input_row_pitch */,
size_t /* input_slice_pitch */,
const void * /* ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueFillImage)
(cl_command_queue /* command_queue */,
cl_mem /* image */,
const void * /* fill_color */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueCopyImage)
(cl_command_queue /* command_queue */,
cl_mem /* src_image */,
cl_mem /* dst_image */,
const size_t * /* src_origin[3] */,
const size_t * /* dst_origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueCopyImageToBuffer)
(cl_command_queue /* command_queue */,
cl_mem /* src_image */,
cl_mem /* dst_buffer */,
const size_t * /* src_origin[3] */,
const size_t * /* region[3] */,
size_t /* dst_offset */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueCopyBufferToImage)
(cl_command_queue /* command_queue */,
cl_mem /* src_buffer */,
cl_mem /* dst_image */,
size_t /* src_offset */,
const size_t * /* dst_origin[3] */,
const size_t * /* region[3] */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(void *, CL_API_CALL, clEnqueueMapBuffer)
(cl_command_queue /* command_queue */,
cl_mem /* buffer */,
cl_bool /* blocking_map */,
cl_map_flags /* map_flags */,
size_t /* offset */,
size_t /* size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */,
cl_int * /* errcode_ret */);
HB_OCL_API(void *, CL_API_CALL, clEnqueueMapImage)
(cl_command_queue /* command_queue */,
cl_mem /* image */,
cl_bool /* blocking_map */,
cl_map_flags /* map_flags */,
const size_t * /* origin[3] */,
const size_t * /* region[3] */,
size_t * /* image_row_pitch */,
size_t * /* image_slice_pitch */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */,
cl_int * /* errcode_ret */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueUnmapMemObject)
(cl_command_queue /* command_queue */,
cl_mem /* memobj */,
void * /* mapped_ptr */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueMigrateMemObjects)
(cl_command_queue /* command_queue */,
cl_uint /* num_mem_objects */,
const cl_mem * /* mem_objects */,
cl_mem_migration_flags /* flags */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueNDRangeKernel)
(cl_command_queue /* command_queue */,
cl_kernel /* kernel */,
cl_uint /* work_dim */,
const size_t * /* global_work_offset */,
const size_t * /* global_work_size */,
const size_t * /* local_work_size */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueTask)
(cl_command_queue /* command_queue */,
cl_kernel /* kernel */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueNativeKernel)
(cl_command_queue /* command_queue */,
void (CL_CALLBACK * /*user_func*/)(void *),
void * /* args */,
size_t /* cb_args */,
cl_uint /* num_mem_objects */,
const cl_mem * /* mem_list */,
const void ** /* args_mem_loc */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueMarkerWithWaitList)
(cl_command_queue /* command_queue */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
HB_OCL_API(cl_int, CL_API_CALL, clEnqueueBarrierWithWaitList)
(cl_command_queue /* command_queue */,
cl_uint /* num_events_in_wait_list */,
const cl_event * /* event_wait_list */,
cl_event * /* event */);
/* Extension function access
*
* Returns the extension function address for the given function name,
* or NULL if a valid function can not be found. The client must
* check to make sure the address is not NULL, before using or
* calling the returned function address.
*/
HB_OCL_API(void *, CL_API_CALL, clGetExtensionFunctionAddressForPlatform)
(cl_platform_id /* platform */,
const char * /* func_name */);
#ifdef __APPLE__
#pragma mark -
#endif // __APPLE__
typedef struct hb_opencl_library_s
{
void *library;
/* Pointers to select OpenCL API functions */
HB_OCL_FUNC_DECL(clBuildProgram);
HB_OCL_FUNC_DECL(clCreateBuffer);
HB_OCL_FUNC_DECL(clCreateCommandQueue);
HB_OCL_FUNC_DECL(clCreateContextFromType);
HB_OCL_FUNC_DECL(clCreateKernel);
HB_OCL_FUNC_DECL(clCreateProgramWithBinary);
HB_OCL_FUNC_DECL(clCreateProgramWithSource);
HB_OCL_FUNC_DECL(clEnqueueCopyBuffer);
HB_OCL_FUNC_DECL(clEnqueueMapBuffer);
HB_OCL_FUNC_DECL(clEnqueueNDRangeKernel);
HB_OCL_FUNC_DECL(clEnqueueReadBuffer);
HB_OCL_FUNC_DECL(clEnqueueUnmapMemObject);
HB_OCL_FUNC_DECL(clEnqueueWriteBuffer);
HB_OCL_FUNC_DECL(clFlush);
HB_OCL_FUNC_DECL(clGetCommandQueueInfo);
HB_OCL_FUNC_DECL(clGetContextInfo);
HB_OCL_FUNC_DECL(clGetDeviceIDs);
HB_OCL_FUNC_DECL(clGetDeviceInfo);
HB_OCL_FUNC_DECL(clGetPlatformIDs);
HB_OCL_FUNC_DECL(clGetPlatformInfo);
HB_OCL_FUNC_DECL(clGetProgramBuildInfo);
HB_OCL_FUNC_DECL(clGetProgramInfo);
HB_OCL_FUNC_DECL(clReleaseCommandQueue);
HB_OCL_FUNC_DECL(clReleaseContext);
HB_OCL_FUNC_DECL(clReleaseEvent);
HB_OCL_FUNC_DECL(clReleaseKernel);
HB_OCL_FUNC_DECL(clReleaseMemObject);
HB_OCL_FUNC_DECL(clReleaseProgram);
HB_OCL_FUNC_DECL(clSetKernelArg);
HB_OCL_FUNC_DECL(clWaitForEvents);
} hb_opencl_library_t;
hb_opencl_library_t* hb_opencl_library_init();
void hb_opencl_library_close(hb_opencl_library_t **_opencl);
/*
* Convenience pointer to a single shared OpenCL library wrapper.
*
* It can be initialized and closed via hb_ocl_init/close().
*/
extern hb_opencl_library_t *hb_ocl;
int hb_ocl_init();
void hb_ocl_close();
typedef struct hb_opencl_device_s
{
cl_platform_id platform;
cl_device_type type;
cl_device_id id;
char version[128];
char driver[128];
char vendor[128];
char name[128];
enum
{
HB_OCL_VENDOR_AMD,
HB_OCL_VENDOR_NVIDIA,
HB_OCL_VENDOR_INTEL,
HB_OCL_VENDOR_OTHER,
} ocl_vendor;
} hb_opencl_device_t;
int hb_opencl_available();
void hb_opencl_info_print();
/* OpenCL scaling */
typedef struct hb_oclscale_s
{
int initialized;
// bicubic scale weights
cl_mem bicubic_x_weights;
cl_mem bicubic_y_weights;
cl_float xscale;
cl_float yscale;
int width;
int height;
// horizontal scaling and vertical scaling kernel handle
cl_kernel m_kernel;
int use_ocl_mem; // 0 use host memory. 1 use gpu oclmem
} hb_oclscale_t;
int hb_ocl_scale(hb_buffer_t *in, hb_buffer_t *out, int *crop,
hb_oclscale_t *os);
/* Utilities */
#define HB_OCL_BUF_CREATE(ocl_lib, out, flags, size) \
{ \
out = ocl_lib->clCreateBuffer(kenv->context, flags, size, NULL, &status); \
if (CL_SUCCESS != status) \
{ \
return -1; \
} \
}
#define HB_OCL_BUF_FREE(ocl_lib, buf) \
{ \
if (buf != NULL) \
{ \
ocl_lib->clReleaseMemObject(buf); \
buf = NULL; \
} \
}
#define HB_OCL_CHECK(method, ...) \
{ \
status = method(__VA_ARGS__); \
if (status != CL_SUCCESS) \
{ \
hb_error("%s:%d (%s) error: %d\n",__FUNCTION__,__LINE__,#method,status);\
return status; \
} \
}
#endif//HB_OPENCL_H
HandBrake-0.10.2/libhb/deccc608sub.c 0000664 0001752 0001752 00000162340 12463330511 017300 0 ustar handbrake handbrake /* deccc608sub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/*
* From ccextractor, leave this file as intact and close to the original as possible so that
* it is easy to patch in fixes - even though this file contains code that we don't need.
*
* Note that the SRT sub generation from CC could be useful for mkv subs.
*/
#include "hb.h"
#include "deccc608sub.h"
#define SSA_PREAMBLE_LEN 24
/*
* ccextractor static configuration variables.
*/
static int debug_608 = 0;
static int cc_channel = 1;
static int subs_delay = 0;
/*
* Get the time of the last buffer that we have received.
*/
static int64_t get_last_pts(struct s_write *wb)
{
return wb->last_pts;
}
#define fatal(N, ...) // N
int rowdata[] = {11,-1,1,2,3,4,12,13,14,15,5,6,7,8,9,10};
// Relationship between the first PAC byte and the row number
// The following enc_buffer is not used at the moment, if it does get used
// we need to bring it into the swrite struct. Same for "str".
#define INITIAL_ENC_BUFFER_CAPACITY 2048
static const unsigned char pac2_attribs[][3]= // Color, font, ident
{
{COL_WHITE, FONT_REGULAR, 0}, // 0x40 || 0x60
{COL_WHITE, FONT_UNDERLINED, 0}, // 0x41 || 0x61
{COL_GREEN, FONT_REGULAR, 0}, // 0x42 || 0x62
{COL_GREEN, FONT_UNDERLINED, 0}, // 0x43 || 0x63
{COL_BLUE, FONT_REGULAR, 0}, // 0x44 || 0x64
{COL_BLUE, FONT_UNDERLINED, 0}, // 0x45 || 0x65
{COL_CYAN, FONT_REGULAR, 0}, // 0x46 || 0x66
{COL_CYAN, FONT_UNDERLINED, 0}, // 0x47 || 0x67
{COL_RED, FONT_REGULAR, 0}, // 0x48 || 0x68
{COL_RED, FONT_UNDERLINED, 0}, // 0x49 || 0x69
{COL_YELLOW, FONT_REGULAR, 0}, // 0x4a || 0x6a
{COL_YELLOW, FONT_UNDERLINED, 0}, // 0x4b || 0x6b
{COL_MAGENTA, FONT_REGULAR, 0}, // 0x4c || 0x6c
{COL_MAGENTA, FONT_UNDERLINED, 0}, // 0x4d || 0x6d
{COL_WHITE, FONT_ITALICS, 0}, // 0x4e || 0x6e
{COL_WHITE, FONT_UNDERLINED_ITALICS, 0}, // 0x4f || 0x6f
{COL_WHITE, FONT_REGULAR, 0}, // 0x50 || 0x70
{COL_WHITE, FONT_UNDERLINED, 0}, // 0x51 || 0x71
{COL_WHITE, FONT_REGULAR, 4}, // 0x52 || 0x72
{COL_WHITE, FONT_UNDERLINED, 4}, // 0x53 || 0x73
{COL_WHITE, FONT_REGULAR, 8}, // 0x54 || 0x74
{COL_WHITE, FONT_UNDERLINED, 8}, // 0x55 || 0x75
{COL_WHITE, FONT_REGULAR, 12}, // 0x56 || 0x76
{COL_WHITE, FONT_UNDERLINED, 12}, // 0x57 || 0x77
{COL_WHITE, FONT_REGULAR, 16}, // 0x58 || 0x78
{COL_WHITE, FONT_UNDERLINED, 16}, // 0x59 || 0x79
{COL_WHITE, FONT_REGULAR, 20}, // 0x5a || 0x7a
{COL_WHITE, FONT_UNDERLINED, 20}, // 0x5b || 0x7b
{COL_WHITE, FONT_REGULAR, 24}, // 0x5c || 0x7c
{COL_WHITE, FONT_UNDERLINED, 24}, // 0x5d || 0x7d
{COL_WHITE, FONT_REGULAR, 28}, // 0x5e || 0x7e
{COL_WHITE, FONT_UNDERLINED, 28} // 0x5f || 0x7f
};
// Default color
static enum color_code default_color=COL_WHITE;
static const char *command_type[] =
{
"Unknown",
"EDM - EraseDisplayedMemory",
"RCL - ResumeCaptionLoading",
"EOC - End Of Caption",
"TO1 - Tab Offset, 1 column",
"TO2 - Tab Offset, 2 column",
"TO3 - Tab Offset, 3 column",
"RU2 - Roll up 2 rows",
"RU3 - Roll up 3 rows",
"RU4 - Roll up 4 rows",
"CR - Carriage Return",
"ENM - Erase non-displayed memory",
"BS - Backspace",
"RTD - Resume Text Display"
};
static const char *font_text[]=
{
"regular",
"italics",
"underlined",
"underlined italics"
};
static const char *color_text[][2]=
{
{"white", "&HFFFFFF&"},
{"green", "&H00FF00&"},
{"blue", "&HFF0000&"},
{"cyan", "&HFFFF00&"},
{"red", "&H0000FF&"},
{"yellow", "&H00FFFF&"},
{"magenta", "&HFF00FF&"},
{"userdefined", "&HFFFFFF&"}
};
static int general_608_init (struct s_write *wb)
{
if( !wb->enc_buffer )
{
wb->enc_buffer=(unsigned char *) malloc (INITIAL_ENC_BUFFER_CAPACITY);
if (wb->enc_buffer==NULL)
return -1;
wb->enc_buffer_capacity=INITIAL_ENC_BUFFER_CAPACITY;
}
if( !wb->subline) {
wb->subline = malloc(2048);
if (!wb->subline)
{
return -1;
}
}
wb->new_sentence = 1;
wb->new_channel = 1;
wb->in_xds_mode = 0;
wb->hb_buffer = NULL;
wb->hb_last_buffer = NULL;
wb->last_pts = 0;
return 0;
}
/*
* Free up CC memory - don't call this from HB just yet since it will cause
* parallel encodes to fail - to be honest they will be stuffed anyway since
* the CC's may be overwriting the buffers.
*/
static void general_608_close (struct s_write *wb)
{
if( wb->enc_buffer ) {
free(wb->enc_buffer);
wb->enc_buffer_capacity = 0;
wb->enc_buffer_used = 0;
}
if( wb->subline ) {
free(wb->subline);
}
if( wb->hb_buffer ) {
hb_buffer_close( &wb->hb_buffer );
}
}
#include
// Returns number of bytes used
static int get_char_in_utf8(unsigned char *buffer, unsigned char c)
{
if (c == 0x00)
return 0;
// Regular line-21 character set, mostly ASCII except these exceptions
if (c < 0x80)
{
switch (c)
{
case 0x2a: // lowercase a, acute accent
*buffer = 0xc3;
*(buffer+1) = 0xa1;
return 2;
case 0x5c: // lowercase e, acute accent
*buffer = 0xc3;
*(buffer+1) = 0xa9;
return 2;
case 0x5e: // lowercase i, acute accent
*buffer = 0xc3;
*(buffer+1) = 0xad;
return 2;
case 0x5f: // lowercase o, acute accent
*buffer = 0xc3;
*(buffer+1) = 0xb3;
return 2;
case 0x60: // lowercase u, acute accent
*buffer = 0xc3;
*(buffer+1) = 0xba;
return 2;
case 0x7b: // lowercase c with cedilla
*buffer = 0xc3;
*(buffer+1) = 0xa7;
return 2;
case 0x7c: // division symbol
*buffer = 0xc3;
*(buffer+1) = 0xb7;
return 2;
case 0x7d: // uppercase N tilde
*buffer = 0xc3;
*(buffer+1) = 0x91;
return 2;
case 0x7e: // lowercase n tilde
*buffer = 0xc3;
*(buffer+1) = 0xb1;
return 2;
default:
*buffer = c;
return 1;
}
}
switch (c)
{
// THIS BLOCK INCLUDES THE 16 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS
// THAT COME FROM HI BYTE = 0x11 AND LOW BETWEEN 0x30 AND 0x3F
case 0x80: // Registered symbol (R)
*buffer = 0xc2;
*(buffer+1) = 0xae;
return 2;
case 0x81: // degree sign
*buffer = 0xc2;
*(buffer+1) = 0xb0;
return 2;
case 0x82: // 1/2 symbol
*buffer = 0xc2;
*(buffer+1) = 0xbd;
return 2;
case 0x83: // Inverted (open) question mark
*buffer = 0xc2;
*(buffer+1) = 0xbf;
return 2;
case 0x84: // Trademark symbol (TM)
*buffer = 0xe2;
*(buffer+1) = 0x84;
*(buffer+2) = 0xa2;
return 3;
case 0x85: // Cents symbol
*buffer = 0xc2;
*(buffer+1) = 0xa2;
return 2;
case 0x86: // Pounds sterling
*buffer = 0xc2;
*(buffer+1) = 0xa3;
return 2;
case 0x87: // Music note
*buffer = 0xe2;
*(buffer+1) = 0x99;
*(buffer+2) = 0xaa;
return 3;
case 0x88: // lowercase a, grave accent
*buffer = 0xc3;
*(buffer+1) = 0xa0;
return 2;
case 0x89: // transparent space, we make it regular
*buffer = 0x20;
return 1;
case 0x8a: // lowercase e, grave accent
*buffer = 0xc3;
*(buffer+1) = 0xa8;
return 2;
case 0x8b: // lowercase a, circumflex accent
*buffer = 0xc3;
*(buffer+1) = 0xa2;
return 2;
case 0x8c: // lowercase e, circumflex accent
*buffer = 0xc3;
*(buffer+1) = 0xaa;
return 2;
case 0x8d: // lowercase i, circumflex accent
*buffer = 0xc3;
*(buffer+1) = 0xae;
return 2;
case 0x8e: // lowercase o, circumflex accent
*buffer = 0xc3;
*(buffer+1) = 0xb4;
return 2;
case 0x8f: // lowercase u, circumflex accent
*buffer = 0xc3;
*(buffer+1) = 0xbb;
return 2;
// THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS
// THAT COME FROM HI BYTE = 0x12 AND LOW BETWEEN 0x20 AND 0x3F
case 0x90: // capital letter A with acute
*buffer = 0xc3;
*(buffer+1) = 0x81;
return 2;
case 0x91: // capital letter E with acute
*buffer = 0xc3;
*(buffer+1) = 0x89;
return 2;
case 0x92: // capital letter O with acute
*buffer = 0xc3;
*(buffer+1) = 0x93;
return 2;
case 0x93: // capital letter U with acute
*buffer = 0xc3;
*(buffer+1) = 0x9a;
return 2;
case 0x94: // capital letter U with diaresis
*buffer = 0xc3;
*(buffer+1) = 0x9c;
return 2;
case 0x95: // lowercase letter U with diaeresis
*buffer = 0xc3;
*(buffer+1) = 0xbc;
return 2;
case 0x96: // apostrophe
*buffer = 0x27;
return 1;
case 0x97: // inverted exclamation mark
*buffer = 0xc2;
*(buffer+1) = 0xa1;
return 2;
case 0x98: // asterisk
*buffer = 0x2a;
return 1;
case 0x99: // apostrophe (yes, duped). See CCADI source code.
*buffer = 0x27;
return 1;
case 0x9a: // hyphen-minus
*buffer = 0x2d;
return 1;
case 0x9b: // copyright sign
*buffer = 0xc2;
*(buffer+1) = 0xa9;
return 2;
case 0x9c: // Service mark
*buffer = 0xe2;
*(buffer+1) = 0x84;
*(buffer+2) = 0xa0;
return 3;
case 0x9d: // Full stop (.)
*buffer = 0x2e;
return 1;
case 0x9e: // Quoatation mark
*buffer = 0x22;
return 1;
case 0x9f: // Quoatation mark
*buffer = 0x22;
return 1;
case 0xa0: // uppercase A, grave accent
*buffer = 0xc3;
*(buffer+1) = 0x80;
return 2;
case 0xa1: // uppercase A, circumflex
*buffer = 0xc3;
*(buffer+1) = 0x82;
return 2;
case 0xa2: // uppercase C with cedilla
*buffer = 0xc3;
*(buffer+1) = 0x87;
return 2;
case 0xa3: // uppercase E, grave accent
*buffer = 0xc3;
*(buffer+1) = 0x88;
return 2;
case 0xa4: // uppercase E, circumflex
*buffer = 0xc3;
*(buffer+1) = 0x8a;
return 2;
case 0xa5: // capital letter E with diaresis
*buffer = 0xc3;
*(buffer+1) = 0x8b;
return 2;
case 0xa6: // lowercase letter e with diaresis
*buffer = 0xc3;
*(buffer+1) = 0xab;
return 2;
case 0xa7: // uppercase I, circumflex
*buffer = 0xc3;
*(buffer+1) = 0x8e;
return 2;
case 0xa8: // uppercase I, with diaresis
*buffer = 0xc3;
*(buffer+1) = 0x8f;
return 2;
case 0xa9: // lowercase i, with diaresis
*buffer = 0xc3;
*(buffer+1) = 0xaf;
return 2;
case 0xaa: // uppercase O, circumflex
*buffer = 0xc3;
*(buffer+1) = 0x94;
return 2;
case 0xab: // uppercase U, grave accent
*buffer = 0xc3;
*(buffer+1) = 0x99;
return 2;
case 0xac: // lowercase u, grave accent
*buffer = 0xc3;
*(buffer+1) = 0xb9;
return 2;
case 0xad: // uppercase U, circumflex
*buffer = 0xc3;
*(buffer+1) = 0x9b;
return 2;
case 0xae: // LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
*buffer = 0xc2;
*(buffer+1) = 0xab;
return 2;
case 0xaf: // RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
*buffer = 0xc2;
*(buffer+1) = 0xbb;
return 2;
// THIS BLOCK INCLUDES THE 32 EXTENDED (TWO-BYTE) LINE 21 CHARACTERS
// THAT COME FROM HI BYTE = 0x13 AND LOW BETWEEN 0x20 AND 0x3F
case 0xb0: // Uppercase A, tilde
*buffer = 0xc3;
*(buffer+1) = 0x83;
return 2;
case 0xb1: // Lowercase a, tilde
*buffer = 0xc3;
*(buffer+1) = 0xa3;
return 2;
case 0xb2: // Uppercase I, acute accent
*buffer = 0xc3;
*(buffer+1) = 0x8d;
return 2;
case 0xb3: // Uppercase I, grave accent
*buffer = 0xc3;
*(buffer+1) = 0x8c;
return 2;
case 0xb4: // Lowercase i, grave accent
*buffer = 0xc3;
*(buffer+1) = 0xac;
return 2;
case 0xb5: // Uppercase O, grave accent
*buffer = 0xc3;
*(buffer+1) = 0x92;
return 2;
case 0xb6: // Lowercase o, grave accent
*buffer = 0xc3;
*(buffer+1) = 0xb2;
return 2;
case 0xb7: // Uppercase O, tilde
*buffer = 0xc3;
*(buffer+1) = 0x95;
return 2;
case 0xb8: // Lowercase o, tilde
*buffer = 0xc3;
*(buffer+1) = 0xb5;
return 2;
case 0xb9: // Open curly brace
*buffer = 0x7b;
return 1;
case 0xba: // Closing curly brace
*buffer = 0x7d;
return 1;
case 0xbb: // Backslash
*buffer = 0x5c;
return 1;
case 0xbc: // Caret
*buffer = 0x5e;
return 1;
case 0xbd: // Underscore
*buffer = 0x5f;
return 1;
case 0xbe: // Pipe (broken bar)
*buffer = 0xc2;
*(buffer+1) = 0xa6;
return 1;
case 0xbf: // Tilde
*buffer = 0x7e; // Not sure
return 1;
case 0xc0: // Uppercase A, umlaut
*buffer = 0xc3;
*(buffer+1) = 0x84;
return 2;
case 0xc1: // Lowercase A, umlaut
*buffer = 0xc3;
*(buffer+1) = 0xa4;
return 2;
case 0xc2: // Uppercase O, umlaut
*buffer = 0xc3;
*(buffer+1) = 0x96;
return 2;
case 0xc3: // Lowercase o, umlaut
*buffer = 0xc3;
*(buffer+1) = 0xb6;
return 2;
case 0xc4: // Esszett (sharp S)
*buffer = 0xc3;
*(buffer+1) = 0x9f;
return 2;
case 0xc5: // Yen symbol
*buffer = 0xc2;
*(buffer+1) = 0xa5;
return 2;
case 0xc6: // Currency symbol
*buffer = 0xc2;
*(buffer+1) = 0xa4;
return 2;
case 0xc7: // Vertical bar
*buffer = 0x7c;
return 1;
case 0xc8: // Uppercase A, ring
*buffer = 0xc3;
*(buffer+1) = 0x85;
return 2;
case 0xc9: // Lowercase A, ring
*buffer = 0xc3;
*(buffer+1) = 0xa5;
return 2;
case 0xca: // Uppercase O, slash
*buffer = 0xc3;
*(buffer+1) = 0x98;
return 2;
case 0xcb: // Lowercase o, slash
*buffer = 0xc3;
*(buffer+1) = 0xb8;
return 2;
case 0xcc: // Upper left corner
*buffer = 0xe2;
*(buffer+1) = 0x8c;
*(buffer+2) = 0x9c;
return 3;
case 0xcd: // Upper right corner
*buffer = 0xe2;
*(buffer+1) = 0x8c;
*(buffer+2) = 0x9d;
return 3;
case 0xce: // Lower left corner
*buffer = 0xe2;
*(buffer+1) = 0x8c;
*(buffer+2) = 0x9e;
return 3;
case 0xcf: // Lower right corner
*buffer = 0xe2;
*(buffer+1) = 0x8c;
*(buffer+2) = 0x9f;
return 3;
default: //
*buffer = '?'; // I'll do it eventually, I promise
return 1; // This are weird chars anyway
}
}
// Encodes a generic string. Note that since we use the encoders for closed
// caption data, text would have to be encoded as CCs... so using special
// characters here it's a bad idea.
static unsigned encode_line(unsigned char *buffer, unsigned char *text)
{
unsigned bytes = 0;
while (*text)
{
*buffer++ = *text++;
bytes++;
}
return bytes;
}
static void find_limit_characters(unsigned char *line, int *first_non_blank,
int *last_non_blank)
{
int i;
*last_non_blank = -1;
*first_non_blank = -1;
for (i = 0; i < CC608_SCREEN_WIDTH; i++)
{
unsigned char c = line[i];
if (c != ' ' && c != 0x89)
{
if (*first_non_blank == -1)
*first_non_blank = i;
*last_non_blank = i;
}
}
}
static unsigned get_decoder_line_encoded(struct s_write *wb,
unsigned char *buffer, int line_num,
struct eia608_screen *data)
{
uint8_t font_style;
uint8_t font_color;
int i;
unsigned char *line = data->characters[line_num];
unsigned char *orig = buffer; // Keep for debugging
int first = 0, last = 31;
find_limit_characters(line, &first, &last);
for (i = first; i <= last; i++)
{
// Handle color
font_color = data->colors[line_num][i];
font_style = data->fonts[line_num][i];
// Handle reset to defaults
if ((font_style & FONT_STYLE_MASK) == 0 && font_color == COL_WHITE)
{
if (((font_style ^ wb->prev_font_style) & FONT_STYLE_MASK) ||
(font_color != wb->prev_font_color))
{
buffer += encode_line(buffer, (uint8_t*)"{\\r}");
}
}
else
{
// Open markup
if (((font_style ^ wb->prev_font_style) & FONT_STYLE_MASK) ||
(font_color != wb->prev_font_color))
{
// style changed
buffer += encode_line(buffer, (uint8_t*)"{");
}
// Handle underlined
if ((font_style ^ wb->prev_font_style) & FONT_UNDERLINED)
{
int enable = !!(font_style & FONT_UNDERLINED);
buffer += encode_line(buffer, (uint8_t*)"\\u");
*buffer++ = enable + 0x30;
}
// Handle italics
if ((font_style ^ wb->prev_font_style) & FONT_ITALICS)
{
int enable = !!(font_style & FONT_ITALICS);
buffer += encode_line(buffer, (uint8_t*)"\\i");
*buffer++ = enable + 0x30;
}
// Handle color
if (font_color != wb->prev_font_color)
{
buffer += encode_line(buffer, (uint8_t*)"\\1c");
buffer += encode_line(buffer,
(uint8_t*)color_text[font_color][1]);
}
// Close markup
if (((font_style ^ wb->prev_font_style) & FONT_STYLE_MASK) ||
(font_color != wb->prev_font_color))
{
// style changed
buffer += encode_line(buffer, (uint8_t*)"}");
}
}
wb->prev_font_style = font_style;
wb->prev_font_color = font_color;
int bytes = 0;
bytes = get_char_in_utf8(buffer, line[i]);
buffer += bytes;
}
*buffer = 0;
return (unsigned) (buffer - orig); // Return length
}
static void clear_eia608_cc_buffer (struct eia608_screen *data)
{
int i;
for (i=0;i<15;i++)
{
memset(data->characters[i],' ',CC608_SCREEN_WIDTH);
data->characters[i][CC608_SCREEN_WIDTH]=0;
memset (data->colors[i],default_color,CC608_SCREEN_WIDTH+1);
memset (data->fonts[i],FONT_REGULAR,CC608_SCREEN_WIDTH+1);
data->row_used[i]=0;
}
data->empty=1;
}
static void init_eia608 (struct eia608 *data)
{
data->cursor_column = 0;
data->cursor_row = 0;
clear_eia608_cc_buffer (&data->buffer1);
clear_eia608_cc_buffer (&data->buffer2);
data->visible_buffer = 1;
data->last_c1 = 0;
data->last_c2 = 0;
data->mode = MODE_POPUP;
data->current_visible_start_ms = 0;
data->ssa_counter = 0;
data->screenfuls_counter = 0;
data->channel = 1;
data->color = default_color;
data->font = FONT_REGULAR;
data->rollup_base_row = 14;
}
static struct eia608_screen *get_current_hidden_buffer(struct s_write *wb)
{
struct eia608_screen *data;
if (wb->data608->visible_buffer == 1)
data = &wb->data608->buffer2;
else
data = &wb->data608->buffer1;
return data;
}
static struct eia608_screen *get_current_visible_buffer(struct s_write *wb)
{
struct eia608_screen *data;
if (wb->data608->visible_buffer == 1)
data = &wb->data608->buffer1;
else
data = &wb->data608->buffer2;
return data;
}
static void swap_visible_buffer(struct s_write *wb)
{
wb->data608->visible_buffer = (wb->data608->visible_buffer == 1) ? 2 : 1;
}
static struct eia608_screen *get_writing_buffer(struct s_write *wb)
{
struct eia608_screen *use_buffer=NULL;
switch (wb->data608->mode)
{
case MODE_POPUP: // Write on the non-visible buffer
use_buffer = get_current_hidden_buffer(wb);
break;
case MODE_ROLLUP_2: // Write directly to screen
case MODE_ROLLUP_3:
case MODE_ROLLUP_4:
use_buffer = get_current_visible_buffer(wb);
break;
default:
fatal (EXIT_BUG_BUG, "Caption mode has an illegal value at get_writing_buffer(), this is a bug.\n");
}
return use_buffer;
}
static void write_char(const unsigned char c, struct s_write *wb)
{
if (wb->data608->mode != MODE_TEXT)
{
struct eia608_screen * use_buffer = get_writing_buffer(wb);
/* hb_log ("\rWriting char [%c] at %s:%d:%d\n",c,
use_buffer == &wb->data608->buffer1?"B1":"B2",
wb->data608->cursor_row,wb->data608->cursor_column); */
use_buffer->characters[wb->data608->cursor_row][wb->data608->cursor_column] = c;
use_buffer->colors[wb->data608->cursor_row][wb->data608->cursor_column] = wb->data608->color;
use_buffer->fonts[wb->data608->cursor_row][wb->data608->cursor_column] = wb->data608->font;
use_buffer->row_used[wb->data608->cursor_row] = 1;
use_buffer->empty = 0;
if (wb->data608->cursor_column < 31)
wb->data608->cursor_column++;
use_buffer->dirty = 1;
}
}
/* Handle MID-ROW CODES. */
static void handle_text_attr(const unsigned char c1, const unsigned char c2,
struct s_write *wb)
{
// Handle channel change
wb->data608->channel=wb->new_channel;
if (wb->data608->channel!=cc_channel)
return;
if (debug_608)
hb_log ("\r608: text_attr: %02X %02X",c1,c2);
if ( ((c1!=0x11 && c1!=0x19) ||
(c2<0x20 || c2>0x2f)) && debug_608)
{
hb_log ("\rThis is not a text attribute!\n");
}
else
{
int i = c2-0x20;
wb->data608->color=pac2_attribs[i][0];
wb->data608->font=pac2_attribs[i][1];
if (debug_608)
hb_log(" -- Color: %s, font: %s\n",
color_text[wb->data608->color][0],
font_text[wb->data608->font]);
if (wb->data608->cursor_column<31)
wb->data608->cursor_column++;
}
}
static int write_cc_buffer_as_ssa(struct eia608_screen *data,
struct s_write *wb)
{
int wrote_something = 0;
int i;
int64_t ms_start = wb->data608->current_visible_start_ms;
//int64_t ms_end = get_last_pts(wb) + subs_delay;
int row = -1, col = -1;
ms_start += subs_delay;
if (ms_start<0) // Drop screens that because of subs_delay start too early
return 0;
if (debug_608)
{
char timeline[128];
wb->data608->ssa_counter++;
sprintf (timeline,"%u\r\n",wb->data608->ssa_counter);
hb_log ("\n- - - SSA caption - - -\n");
hb_log ("%s", timeline);
}
/*
* Write all the lines into enc_buffer, and then write that out at the end
* ensure that we only have two lines, insert a newline after the first one,
* and have a big bottom line (strip spaces from any joined lines).
*/
int rows = 0, columns = 0;
for (i = 0; i < 15; i++)
{
if (data->row_used[i])
{
int first, last;
rows++;
find_limit_characters(data->characters[i], &first, &last);
if (last - first + 1 > columns)
columns = last - first + 1;
}
}
wb->prev_font_style = FONT_REGULAR;
wb->prev_font_color = COL_WHITE;
wb->enc_buffer_used = 0;
int line = 1;
for (i = 0; i < 15; i++)
{
if (data->row_used[i])
{
// Get position for this CC
if (row == -1)
{
int last, x, y, top, safe_zone, cell_width, cell_height;
int cropped_width, cropped_height, font_size;
char *pos;
row = i;
find_limit_characters(data->characters[i], &col, &last);
// CC grid is 16 rows by 62 colums
// Our SSA resolution is the title resolution
// Tranlate CC grid to SSA coordinates
// The numbers are tweaked to keep things off the very
// edges of the screen and in the "safe" zone
cropped_height = wb->height - wb->crop[0] - wb->crop[1];
cropped_width = wb->width - wb->crop[2] - wb->crop[3];
font_size = cropped_height * .066;
safe_zone = cropped_height * 0.025;
cell_height = (wb->height - 2 * safe_zone) / 16;
cell_width = (wb->width - 2 * safe_zone) / 32;
// Calculate position assuming the position defines
// the baseline of the text which is lower left corner
// of bottom row of characters
y = cell_height * (row + 1 + rows) + safe_zone - wb->crop[0];
top = y - rows * font_size;
x = cell_width * col + safe_zone - wb->crop[2];
if (top < safe_zone)
y = (rows * font_size) + safe_zone;
if (y > cropped_height - safe_zone)
y = cropped_height - safe_zone;
if (x + columns * cell_width > cropped_width - safe_zone)
x = cropped_width - columns * cell_width - safe_zone;
if (x < safe_zone)
x = safe_zone;
pos = hb_strdup_printf("{\\a1\\pos(%d,%d)}", x, y);
wb->enc_buffer_used += encode_line(
wb->enc_buffer + wb->enc_buffer_used, (uint8_t*)pos);
free(pos);
}
/*
* The intention was to use a newline but QT doesn't like it,
* old code still here just in case..
*/
if (line == 1) {
wb->enc_buffer_used += get_decoder_line_encoded(wb,
wb->enc_buffer + wb->enc_buffer_used, i, data);
line = 2;
} else {
wb->enc_buffer_used += encode_line(
wb->enc_buffer + wb->enc_buffer_used, (uint8_t*)"\\N");
wb->enc_buffer_used += get_decoder_line_encoded(wb,
wb->enc_buffer + wb->enc_buffer_used, i, data);
}
}
}
if (wb->enc_buffer_used && wb->enc_buffer[0] != 0 && data->dirty)
{
hb_buffer_t *buffer;
int len;
// bump past null terminator
wb->enc_buffer_used++;
buffer = hb_buffer_init(wb->enc_buffer_used + SSA_PREAMBLE_LEN);
buffer->s.frametype = HB_FRAME_SUBTITLE;
buffer->s.start = ms_start;
buffer->s.stop = AV_NOPTS_VALUE;
sprintf((char*)buffer->data, "%d,,Default,,0,0,0,,", ++wb->line);
len = strlen((char*)buffer->data);
memcpy(buffer->data + len, wb->enc_buffer, wb->enc_buffer_used);
if (wb->hb_last_buffer)
{
wb->hb_last_buffer->next = buffer;
}
else
{
wb->hb_buffer = buffer;
}
wb->hb_last_buffer = buffer;
wrote_something=1;
wb->clear_sub_needed = 1;
}
else if (wb->clear_sub_needed)
{
hb_buffer_t *buffer = hb_buffer_init(1);
buffer->s.frametype = HB_FRAME_SUBTITLE;
buffer->s.start = ms_start;
buffer->s.stop = ms_start;
buffer->data[0] = 0;
if (wb->hb_last_buffer != NULL)
{
wb->hb_last_buffer->next = buffer;
}
else
{
wb->hb_buffer = buffer;
}
wb->hb_last_buffer = buffer;
wb->clear_sub_needed = 0;
}
if (debug_608)
{
hb_log ("- - - - - - - - - - - -\r\n");
}
return wrote_something;
}
static int write_cc_buffer(struct s_write *wb)
{
struct eia608_screen *data;
int wrote_something=0;
data = get_current_visible_buffer(wb);
if (!data->dirty)
return 0;
wb->new_sentence=1;
wrote_something = write_cc_buffer_as_ssa(data, wb);
data->dirty = 0;
return wrote_something;
}
static void move_roll_up(struct s_write *wb, int row)
{
struct eia608_screen *use_buffer;
int ii, src, dst, keep_lines;
switch (wb->data608->mode)
{
case MODE_ROLLUP_2:
keep_lines = 2;
break;
case MODE_ROLLUP_3:
keep_lines = 3;
break;
case MODE_ROLLUP_4:
keep_lines = 4;
break;
default:
// Not rollup mode, nothing to do
return;
}
if (row == wb->data608->rollup_base_row)
{
// base row didn't change, nothing to do
return;
}
use_buffer = get_current_visible_buffer(wb);
if (row < wb->data608->rollup_base_row)
{
src = wb->data608->rollup_base_row - keep_lines + 1;
dst = row - keep_lines + 1;
for (ii = 0; ii < keep_lines; ii++)
{
memcpy(use_buffer->characters[dst], use_buffer->characters[src], CC608_SCREEN_WIDTH+1);
memcpy(use_buffer->colors[dst], use_buffer->colors[src], CC608_SCREEN_WIDTH+1);
memcpy(use_buffer->fonts[dst], use_buffer->fonts[src], CC608_SCREEN_WIDTH+1);
use_buffer->row_used[dst] = use_buffer->row_used[src];
memset(use_buffer->characters[src], ' ', CC608_SCREEN_WIDTH);
memset(use_buffer->colors[src], COL_WHITE, CC608_SCREEN_WIDTH);
memset(use_buffer->fonts[src], FONT_REGULAR, CC608_SCREEN_WIDTH);
use_buffer->characters[src][CC608_SCREEN_WIDTH] = 0;
use_buffer->row_used[src] = 0;
src++;
dst++;
}
}
else
{
src = wb->data608->rollup_base_row;
dst = row;
for (ii = 0; ii < keep_lines; ii++)
{
memcpy(use_buffer->characters[dst], use_buffer->characters[src], CC608_SCREEN_WIDTH+1);
memcpy(use_buffer->colors[dst], use_buffer->colors[src], CC608_SCREEN_WIDTH+1);
memcpy(use_buffer->fonts[dst], use_buffer->fonts[src], CC608_SCREEN_WIDTH+1);
use_buffer->row_used[dst] = use_buffer->row_used[src];
memset(use_buffer->characters[src], ' ', CC608_SCREEN_WIDTH);
memset(use_buffer->colors[src], COL_WHITE, CC608_SCREEN_WIDTH);
memset(use_buffer->fonts[src], FONT_REGULAR, CC608_SCREEN_WIDTH);
use_buffer->characters[src][CC608_SCREEN_WIDTH] = 0;
use_buffer->row_used[src] = 0;
src--;
dst--;
}
}
use_buffer->dirty = 1;
}
static void roll_up(struct s_write *wb)
{
struct eia608_screen *use_buffer;
int i, j;
use_buffer = get_current_visible_buffer(wb);
int keep_lines;
switch (wb->data608->mode)
{
case MODE_ROLLUP_2:
keep_lines = 2;
break;
case MODE_ROLLUP_3:
keep_lines = 3;
break;
case MODE_ROLLUP_4:
keep_lines = 4;
break;
default: // Shouldn't happen
keep_lines = 0;
break;
}
int firstrow = -1, lastrow = -1;
// Look for the last line used
int rows_now = 0; // Number of rows in use right now
for (i = 0; i < 15; i++)
{
if (use_buffer->row_used[i])
{
rows_now++;
if (firstrow == -1)
firstrow = i;
lastrow = i;
}
}
if (debug_608)
hb_log ("\rIn roll-up: %d lines used, first: %d, last: %d\n", rows_now, firstrow, lastrow);
if (lastrow==-1) // Empty screen, nothing to rollup
return;
for (j = lastrow - keep_lines + 1; j < lastrow; j++)
{
if (j >= 0)
{
memcpy(use_buffer->characters[j], use_buffer->characters[j+1], CC608_SCREEN_WIDTH+1);
memcpy(use_buffer->colors[j], use_buffer->colors[j+1], CC608_SCREEN_WIDTH+1);
memcpy(use_buffer->fonts[j], use_buffer->fonts[j+1], CC608_SCREEN_WIDTH+1);
use_buffer->row_used[j] = use_buffer->row_used[j+1];
}
}
for (j = 0; j < (1 + wb->data608->cursor_row - keep_lines); j++)
{
memset(use_buffer->characters[j], ' ', CC608_SCREEN_WIDTH);
memset(use_buffer->colors[j], COL_WHITE, CC608_SCREEN_WIDTH);
memset(use_buffer->fonts[j], FONT_REGULAR, CC608_SCREEN_WIDTH);
use_buffer->characters[j][CC608_SCREEN_WIDTH] = 0;
use_buffer->row_used[j] = 0;
}
memset(use_buffer->characters[lastrow], ' ', CC608_SCREEN_WIDTH);
memset(use_buffer->colors[lastrow], COL_WHITE, CC608_SCREEN_WIDTH);
memset(use_buffer->fonts[lastrow], FONT_REGULAR, CC608_SCREEN_WIDTH);
use_buffer->characters[lastrow][CC608_SCREEN_WIDTH] = 0;
use_buffer->row_used[lastrow] = 0;
// Sanity check
rows_now = 0;
for (i = 0; i < 15; i++)
if (use_buffer->row_used[i])
rows_now++;
if (rows_now > keep_lines)
hb_log ("Bug in roll_up, should have %d lines but I have %d.\n",
keep_lines, rows_now);
use_buffer->dirty = 1;
}
void erase_memory (struct s_write *wb, int displayed)
{
struct eia608_screen *buf;
if (displayed)
{
buf = get_current_visible_buffer(wb);
}
else
{
buf = get_current_hidden_buffer(wb);
}
clear_eia608_cc_buffer (buf);
}
static int is_current_row_empty (struct s_write *wb)
{
struct eia608_screen *use_buffer;
int i;
use_buffer = get_current_visible_buffer(wb);
for (i=0;icharacters[wb->data608->rollup_base_row][i]!=' ')
return 0;
}
return 1;
}
/* Process GLOBAL CODES */
static void handle_command(unsigned char c1, const unsigned char c2,
struct s_write *wb)
{
// Handle channel change
wb->data608->channel=wb->new_channel;
if (wb->data608->channel!=cc_channel)
return;
enum command_code command = COM_UNKNOWN;
if (c1==0x15)
c1=0x14;
if ((c1==0x14 || c1==0x1C) && c2==0x2C)
command = COM_ERASEDISPLAYEDMEMORY;
if ((c1==0x14 || c1==0x1C) && c2==0x20)
command = COM_RESUMECAPTIONLOADING;
if ((c1==0x14 || c1==0x1C) && c2==0x2F)
command = COM_ENDOFCAPTION;
if ((c1==0x17 || c1==0x1F) && c2==0x21)
command = COM_TABOFFSET1;
if ((c1==0x17 || c1==0x1F) && c2==0x22)
command = COM_TABOFFSET2;
if ((c1==0x17 || c1==0x1F) && c2==0x23)
command = COM_TABOFFSET3;
if ((c1==0x14 || c1==0x1C) && c2==0x25)
command = COM_ROLLUP2;
if ((c1==0x14 || c1==0x1C) && c2==0x26)
command = COM_ROLLUP3;
if ((c1==0x14 || c1==0x1C) && c2==0x27)
command = COM_ROLLUP4;
if ((c1==0x14 || c1==0x1C) && c2==0x2D)
command = COM_CARRIAGERETURN;
if ((c1==0x14 || c1==0x1C) && c2==0x2E)
command = COM_ERASENONDISPLAYEDMEMORY;
if ((c1==0x14 || c1==0x1C) && c2==0x21)
command = COM_BACKSPACE;
if ((c1==0x14 || c1==0x1C) && c2==0x2b)
command = COM_RESUMETEXTDISPLAY;
if (debug_608)
{
hb_log ("\rCommand: %02X %02X (%s)\n",c1,c2,command_type[command]);
}
switch (command)
{
case COM_BACKSPACE:
if (wb->data608->cursor_column>0)
{
struct eia608_screen *data;
data = get_writing_buffer(wb);
wb->data608->cursor_column--;
data->characters[wb->data608->cursor_row][wb->data608->cursor_column] = ' ';
data->dirty = 1;
}
break;
case COM_TABOFFSET1:
if (wb->data608->cursor_column<31)
wb->data608->cursor_column++;
break;
case COM_TABOFFSET2:
wb->data608->cursor_column+=2;
if (wb->data608->cursor_column>31)
wb->data608->cursor_column=31;
break;
case COM_TABOFFSET3:
wb->data608->cursor_column+=3;
if (wb->data608->cursor_column>31)
wb->data608->cursor_column=31;
break;
case COM_RESUMECAPTIONLOADING:
wb->data608->mode=MODE_POPUP;
wb->data608->current_visible_start_ms = get_last_pts(wb);
break;
case COM_RESUMETEXTDISPLAY:
wb->data608->mode=MODE_TEXT;
wb->data608->current_visible_start_ms = get_last_pts(wb);
break;
case COM_ROLLUP2:
if (wb->data608->rollup_base_row + 1 < 2)
{
move_roll_up(wb, 1);
wb->data608->rollup_base_row = 1;
}
if (wb->data608->mode==MODE_POPUP)
{
swap_visible_buffer(wb);
if (write_cc_buffer(wb))
wb->data608->screenfuls_counter++;
erase_memory (wb, 1);
}
wb->data608->color=default_color;
wb->data608->font=FONT_REGULAR;
if (wb->data608->mode==MODE_ROLLUP_2 && !is_current_row_empty(wb))
{
if (debug_608)
hb_log ("Two RU2, current line not empty. Simulating a CR\n");
handle_command(0x14, 0x2D, wb);
wb->rollup_cr = 1;
}
wb->data608->current_visible_start_ms = get_last_pts(wb);
wb->data608->mode=MODE_ROLLUP_2;
erase_memory (wb, 0);
wb->data608->cursor_column = 0;
wb->data608->cursor_row = wb->data608->rollup_base_row;
break;
case COM_ROLLUP3:
if (wb->data608->rollup_base_row + 1 < 3)
{
move_roll_up(wb, 2);
wb->data608->rollup_base_row = 2;
}
if (wb->data608->mode==MODE_POPUP)
{
if (write_cc_buffer(wb))
wb->data608->screenfuls_counter++;
erase_memory (wb, 1);
}
wb->data608->color=default_color;
wb->data608->font=FONT_REGULAR;
if (wb->data608->mode==MODE_ROLLUP_3 && !is_current_row_empty(wb))
{
if (debug_608)
hb_log ("Two RU3, current line not empty. Simulating a CR\n");
handle_command(0x14, 0x2D, wb);
wb->rollup_cr = 1;
}
wb->data608->current_visible_start_ms = get_last_pts(wb);
wb->data608->mode=MODE_ROLLUP_3;
erase_memory (wb, 0);
wb->data608->cursor_column = 0;
wb->data608->cursor_row = wb->data608->rollup_base_row;
break;
case COM_ROLLUP4:
if (wb->data608->rollup_base_row + 1 < 4)
{
move_roll_up(wb, 3);
wb->data608->rollup_base_row = 3;
}
if (wb->data608->mode==MODE_POPUP)
{
if (write_cc_buffer(wb))
wb->data608->screenfuls_counter++;
erase_memory (wb, 1);
}
wb->data608->color=default_color;
wb->data608->font=FONT_REGULAR;
if (wb->data608->mode==MODE_ROLLUP_4 && !is_current_row_empty(wb))
{
if (debug_608)
hb_log ("Two RU4, current line not empty. Simulating a CR\n");
handle_command(0x14, 0x2D, wb);
wb->rollup_cr = 1;
}
wb->data608->current_visible_start_ms = get_last_pts(wb);
wb->data608->mode = MODE_ROLLUP_4;
wb->data608->cursor_column = 0;
wb->data608->cursor_row = wb->data608->rollup_base_row;
erase_memory (wb, 0);
break;
case COM_CARRIAGERETURN:
// In transcript mode, CR doesn't write the whole screen, to avoid
// repeated lines.
// Skip initial CR if rollup has already done it
if (wb->rollup_cr && is_current_row_empty(wb))
{
wb->rollup_cr = 0;
wb->data608->current_visible_start_ms = get_last_pts(wb);
break;
}
if (write_cc_buffer(wb))
wb->data608->screenfuls_counter++;
roll_up(wb);
wb->data608->cursor_column = 0;
wb->data608->current_visible_start_ms = get_last_pts(wb);
break;
case COM_ERASENONDISPLAYEDMEMORY:
erase_memory (wb,0);
break;
case COM_ERASEDISPLAYEDMEMORY:
// There may be "displayed" rollup data that has not been
// written to a buffer yet.
if (wb->data608->mode == MODE_ROLLUP_2 ||
wb->data608->mode == MODE_ROLLUP_3 ||
wb->data608->mode == MODE_ROLLUP_4)
{
write_cc_buffer(wb);
}
erase_memory (wb,1);
// the last pts is the time to remove the previously
// displayed CC from the display
wb->data608->current_visible_start_ms = get_last_pts(wb);
// Write "clear" subtitle if necessary
struct eia608_screen *data;
data = get_current_visible_buffer(wb);
data->dirty = 1;
write_cc_buffer(wb);
break;
case COM_ENDOFCAPTION: // Switch buffers
// The currently *visible* buffer is leaving, so now we know it's ending
// time. Time to actually write it to file.
if (wb->data608->mode == MODE_POPUP)
{
swap_visible_buffer(wb);
wb->data608->current_visible_start_ms = get_last_pts(wb);
}
if (write_cc_buffer(wb))
wb->data608->screenfuls_counter++;
if (wb->data608->mode != MODE_POPUP)
swap_visible_buffer(wb);
wb->data608->cursor_column = 0;
wb->data608->cursor_row = 0;
wb->data608->color=default_color;
wb->data608->font=FONT_REGULAR;
break;
default:
if (debug_608)
{
hb_log ("\rNot yet implemented.\n");
}
break;
}
}
static void handle_end_of_data(struct s_write *wb)
{
// We issue a EraseDisplayedMemory here so if there's any captions pending
// they get written to file.
handle_command (0x14, 0x2c, wb); // EDM
}
static void handle_double(const unsigned char c1, const unsigned char c2,
struct s_write *wb)
{
unsigned char c;
if (wb->data608->channel!=cc_channel)
return;
if (c2>=0x30 && c2<=0x3f)
{
c=c2 + 0x50; // So if c>=0x80 && c<=0x8f, it comes from here
if (debug_608)
hb_log ("\rDouble: %02X %02X --> %c\n",c1,c2,c);
write_char(c,wb);
}
}
/* Process EXTENDED CHARACTERS */
static unsigned char handle_extended(unsigned char hi, unsigned char lo,
struct s_write *wb)
{
// Handle channel change
if (wb->new_channel > 2)
{
wb->new_channel -= 2;
if (debug_608)
hb_log ("\nChannel correction, now %d\n", wb->new_channel);
}
wb->data608->channel=wb->new_channel;
if (wb->data608->channel!=cc_channel)
return 0;
// For lo values between 0x20-0x3f
unsigned char c=0;
if (debug_608)
hb_log ("\rExtended: %02X %02X\n",hi,lo);
if (lo>=0x20 && lo<=0x3f && (hi==0x12 || hi==0x13))
{
switch (hi)
{
case 0x12:
c=lo+0x70; // So if c>=0x90 && c<=0xaf it comes from here
break;
case 0x13:
c=lo+0x90; // So if c>=0xb0 && c<=0xcf it comes from here
break;
}
// This column change is because extended characters replace
// the previous character (which is sent for basic decoders
// to show something similar to the real char)
if (wb->data608->cursor_column>0)
wb->data608->cursor_column--;
write_char (c,wb);
}
return 1;
}
/* Process PREAMBLE ACCESS CODES (PAC) */
static void handle_pac(unsigned char c1, unsigned char c2, struct s_write *wb)
{
// Handle channel change
if (wb->new_channel > 2)
{
wb->new_channel -= 2;
if (debug_608)
hb_log ("\nChannel correction, now %d\n", wb->new_channel);
}
wb->data608->channel=wb->new_channel;
if (wb->data608->channel!=cc_channel)
return;
int row=rowdata[((c1<<1)&14)|((c2>>5)&1)];
if (debug_608)
hb_log ("\rPAC: %02X %02X",c1,c2);
if (c2>=0x40 && c2<=0x5f)
{
c2=c2-0x40;
}
else
{
if (c2>=0x60 && c2<=0x7f)
{
c2=c2-0x60;
}
else
{
if (debug_608)
hb_log ("\rThis is not a PAC!!!!!\n");
return;
}
}
int color=pac2_attribs[c2][0];
int font=pac2_attribs[c2][1];
int indent=pac2_attribs[c2][2];
if (debug_608)
hb_log (" -- Position: %d:%d, color: %s, font: %s\n",row,
indent,color_text[color][0],font_text[font]);
// CC spec says to the preferred method to handle a roll-up base row
// that causes the display to scroll off the top of the screen is to
// adjust the base row down.
int keep_lines;
switch (wb->data608->mode)
{
case MODE_ROLLUP_2:
keep_lines = 2;
break;
case MODE_ROLLUP_3:
keep_lines = 3;
break;
case MODE_ROLLUP_4:
keep_lines = 4;
break;
default:
// Not rollup mode, all rows ok
keep_lines = 0;
break;
}
if (row < keep_lines)
{
row = keep_lines;
}
if (wb->data608->mode != MODE_TEXT)
{
// According to Robson, row info is discarded in text mode
// but column is accepted
//
// CC-608 spec says current rollup display must move to the
// new position when the cursor row changes
move_roll_up(wb, row - 1);
wb->data608->cursor_row = row - 1 ; // Since the array is 0 based
}
wb->data608->rollup_base_row = row - 1;
wb->data608->cursor_column = indent;
}
static void handle_single(const unsigned char c1, struct s_write *wb)
{
if (c1<0x20 || wb->data608->channel!=cc_channel)
return; // We don't allow special stuff here
if (debug_608)
hb_log ("%c",c1);
/*if (debug_608)
hb_log ("Character: %02X (%c) -> %02X (%c)\n",c1,c1,c,c); */
write_char (c1,wb);
}
static int check_channel(unsigned char c1, struct s_write *wb)
{
if (c1==0x14)
{
if (debug_608 && wb->data608->channel!=1)
hb_log ("\nChannel change, now 1\n");
return 1;
}
if (c1==0x1c)
{
if (debug_608 && wb->data608->channel!=2)
hb_log ("\nChannel change, now 2\n");
return 2;
}
if (c1==0x15)
{
if (debug_608 && wb->data608->channel!=3)
hb_log ("\nChannel change, now 3\n");
return 3;
}
if (c1==0x1d)
{
if (debug_608 && wb->data608->channel!=4)
hb_log ("\nChannel change, now 4\n");
return 4;
}
// Otherwise keep the current channel
return wb->data608->channel;
}
/* Handle Command, special char or attribute and also check for
* channel changes.
* Returns 1 if something was written to screen, 0 otherwise */
static int disCommand(unsigned char hi, unsigned char lo, struct s_write *wb)
{
int wrote_to_screen=0;
/* Full channel changes are only allowed for "GLOBAL CODES",
* "OTHER POSITIONING CODES", "BACKGROUND COLOR CODES",
* "MID-ROW CODES".
* "PREAMBLE ACCESS CODES", "BACKGROUND COLOR CODES" and
* SPECIAL/SPECIAL CHARACTERS allow only switching
* between 1&3 or 2&4. */
wb->new_channel = check_channel (hi,wb);
//if (wb->data608->channel!=cc_channel)
// continue;
if (hi>=0x18 && hi<=0x1f)
hi=hi-8;
switch (hi)
{
case 0x10:
if (lo>=0x40 && lo<=0x5f)
handle_pac (hi,lo,wb);
break;
case 0x11:
if (lo>=0x20 && lo<=0x2f)
handle_text_attr (hi,lo,wb);
if (lo>=0x30 && lo<=0x3f)
{
wrote_to_screen=1;
handle_double (hi,lo,wb);
}
if (lo>=0x40 && lo<=0x7f)
handle_pac (hi,lo,wb);
break;
case 0x12:
case 0x13:
if (lo>=0x20 && lo<=0x3f)
{
wrote_to_screen=handle_extended (hi,lo,wb);
}
if (lo>=0x40 && lo<=0x7f)
handle_pac (hi,lo,wb);
break;
case 0x14:
case 0x15:
if (lo>=0x20 && lo<=0x2f)
handle_command (hi,lo,wb);
if (lo>=0x40 && lo<=0x7f)
handle_pac (hi,lo,wb);
break;
case 0x16:
if (lo>=0x40 && lo<=0x7f)
handle_pac (hi,lo,wb);
break;
case 0x17:
if (lo>=0x21 && lo<=0x22)
handle_command (hi,lo,wb);
if (lo>=0x2e && lo<=0x2f)
handle_text_attr (hi,lo,wb);
if (lo>=0x40 && lo<=0x7f)
handle_pac (hi,lo,wb);
break;
}
return wrote_to_screen;
}
static void process608(const unsigned char *data, int length,
struct s_write *wb)
{
static int textprinted = 0;
int i;
if (data!=NULL)
{
for (i=0;i=0x01 && hi<=0x0E)
{
// XDS crap - mode. Would be nice to support it eventually
// wb->data608->last_c1=0;
// wb->data608->last_c2=0;
wb->data608->channel=3; // Always channel 3
wb->in_xds_mode=1;
}
if (hi==0x0F) // End of XDS block
{
wb->in_xds_mode=0;
continue;
}
if (hi>=0x10 && hi<0x1F) // Non-character code or special/extended char
// http://www.geocities.com/mcpoodle43/SCC_TOOLS/DOCS/CC_CODES.HTML
// http://www.geocities.com/mcpoodle43/SCC_TOOLS/DOCS/CC_CHARS.HTML
{
// We were writing characters before, start a new line for
// diagnostic output from disCommand()
if (debug_608 && textprinted == 1 )
{
hb_log("\n");
textprinted = 0;
}
wb->in_xds_mode=0; // Back to normal
if (wb->data608->last_c1==hi && wb->data608->last_c2==lo)
{
// Duplicate dual code, discard
continue;
}
wb->data608->last_c1=hi;
wb->data608->last_c2=lo;
disCommand (hi,lo,wb);
}
if (hi>=0x20) // Standard characters (always in pairs)
{
// Only print if the channel is active
if (wb->data608->channel!=cc_channel)
continue;
if (debug_608)
{
if( textprinted == 0 )
{
hb_log("\n");
textprinted = 1;
}
}
handle_single(hi,wb);
handle_single(lo,wb);
wb->data608->last_c1=0;
wb->data608->last_c2=0;
}
if ( debug_608 && !textprinted && wb->data608->channel==cc_channel )
{ // Current FTS information after the characters are shown
//hb_log("Current FTS: %s\n", print_mstime(get_last_pts()));
}
if ((wb->data608->mode == MODE_ROLLUP_2 ||
wb->data608->mode == MODE_ROLLUP_3 ||
wb->data608->mode == MODE_ROLLUP_4) &&
wb->direct_rollup)
{
// If we are showing rollup on the fly (direct_rollup)
// write a buffer now
write_cc_buffer(wb);
wb->data608->current_visible_start_ms = get_last_pts(wb);
}
}
}
}
struct hb_work_private_s
{
hb_job_t * job;
struct s_write * cc608;
};
static int decccInit( hb_work_object_t * w, hb_job_t * job )
{
int retval = 1;
hb_work_private_t * pv;
pv = calloc( 1, sizeof( hb_work_private_t ) );
if( pv )
{
w->private_data = pv;
pv->job = job;
pv->cc608 = calloc(1, sizeof(struct s_write));
if( pv->cc608 )
{
pv->cc608->width = job->title->width;
pv->cc608->height = job->title->height;
memcpy(pv->cc608->crop, job->crop, sizeof(int[4]));
retval = general_608_init(pv->cc608);
if( !retval )
{
pv->cc608->data608 = calloc(1, sizeof(struct eia608));
if( !pv->cc608->data608 )
{
retval = 1;
}
init_eia608(pv->cc608->data608);
}
}
}
if (!retval)
{
// Generate generic SSA Script Info.
int height = job->title->height - job->crop[0] - job->crop[1];
int width = job->title->width - job->crop[2] - job->crop[3];
hb_subtitle_add_ssa_header(w->subtitle, width, height);
}
// When rendering subs, we need to push rollup subtitles out
// asap (instead of waiting for a completed line) so that we
// do not miss the frame that they should be rendered over.
pv->cc608->direct_rollup = w->subtitle->config.dest == RENDERSUB;
return retval;
}
static int decccWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in;
if ( in->size <= 0 )
{
/* EOF on input stream - send it downstream & say that we're done */
handle_end_of_data(pv->cc608);
/*
* Grab any pending buffer and output them with the EOF on the end
*/
if (pv->cc608->hb_last_buffer) {
pv->cc608->hb_last_buffer->next = in;
*buf_out = pv->cc608->hb_buffer;
*buf_in = NULL;
pv->cc608->hb_buffer = NULL;
pv->cc608->hb_last_buffer = NULL;
} else {
*buf_out = in;
*buf_in = NULL;
}
return HB_WORK_DONE;
}
pv->cc608->last_pts = in->s.start;
process608(in->data, in->size, pv->cc608);
/*
* If there is one waiting then pass it on
*/
*buf_out = pv->cc608->hb_buffer;
pv->cc608->hb_buffer = NULL;
pv->cc608->hb_last_buffer = NULL;
return HB_WORK_OK;
}
static void decccClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
general_608_close( pv->cc608 );
free( pv->cc608->data608 );
free( pv->cc608 );
free( w->private_data );
}
hb_work_object_t hb_deccc608 =
{
WORK_DECCC608,
"Closed Caption (608) decoder",
decccInit,
decccWork,
decccClose
};
HandBrake-0.10.2/libhb/colormap.c 0000664 0001752 0001752 00000074172 12463330511 017110 0 ustar handbrake handbrake /* colormap.c
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include
#include
#include "colormap.h"
typedef struct
{
char *name;
uint32_t rgb;
} hb_colormap_t;
static hb_colormap_t colormap[] =
{
{ "none", 0xFFFFFF },
{ "black", 0x000000 },
{ "white", 0xFFFFFF },
{ "red", 0xFF0000 },
{ "green", 0x00FF00 },
{ "blue", 0x0000FF },
{ "aliceblue", 0xF0F8FF },
{ "antiquewhite", 0xFAEBD7 },
{ "antiquewhite1", 0xFFEFDB },
{ "antiquewhite2", 0xEEDFCC },
{ "antiquewhite3", 0xCDC0B0 },
{ "antiquewhite4", 0x8B8378 },
{ "aqua", 0x00FFFF },
{ "aquamarine", 0x7FFFD4 },
{ "aquamarine1", 0x7FFFD4 },
{ "aquamarine2", 0x76EEC6 },
{ "aquamarine3", 0x66CDAA },
{ "aquamarine4", 0x458B74 },
{ "azure", 0xF0FFFF },
{ "azure1", 0xF0FFFF },
{ "azure2", 0xE0EEEE },
{ "azure3", 0xC1CDCD },
{ "azure4", 0x838B8B },
{ "beige", 0xF5F5DC },
{ "bisque", 0xFFE4C4 },
{ "bisque1", 0xFFE4C4 },
{ "bisque2", 0xEED5B7 },
{ "bisque3", 0xCDB79E },
{ "bisque4", 0x8B7D6B },
{ "black", 0x000000 },
{ "blanchedalmond", 0xFFEBCD },
{ "blue", 0x0000FF },
{ "blue1", 0x0000FF },
{ "blue2", 0x0000EE },
{ "blue3", 0x0000CD },
{ "blue4", 0x00008B },
{ "blueviolet", 0x8A2BE2 },
{ "brown", 0xA52A2A },
{ "brown1", 0xFF4040 },
{ "brown2", 0xEE3B3B },
{ "brown3", 0xCD3333 },
{ "brown4", 0x8B2323 },
{ "burlywood", 0xDEB887 },
{ "burlywood1", 0xFFD39B },
{ "burlywood2", 0xEEC591 },
{ "burlywood3", 0xCDAA7D },
{ "burlywood4", 0x8B7355 },
{ "cadetblue", 0x5F9EA0 },
{ "cadetblue", 0x5F9EA0 },
{ "cadetblue1", 0x98F5FF },
{ "cadetblue2", 0x8EE5EE },
{ "cadetblue3", 0x7AC5CD },
{ "cadetblue4", 0x53868B },
{ "chartreuse", 0x7FFF00 },
{ "chartreuse1", 0x7FFF00 },
{ "chartreuse2", 0x76EE00 },
{ "chartreuse3", 0x66CD00 },
{ "chartreuse4", 0x458B00 },
{ "chocolate", 0xD2691E },
{ "chocolate1", 0xFF7F24 },
{ "chocolate2", 0xEE7621 },
{ "chocolate3", 0xCD661D },
{ "chocolate4", 0x8B4513 },
{ "coral", 0xFF7F50 },
{ "coral1", 0xFF7256 },
{ "coral2", 0xEE6A50 },
{ "coral3", 0xCD5B45 },
{ "coral4", 0x8B3E2F },
{ "cornflowerblue", 0x6495ED },
{ "cornsilk", 0xFFF8DC },
{ "cornsilk1", 0xFFF8DC },
{ "cornsilk2", 0xEEE8CD },
{ "cornsilk3", 0xCDC8B1 },
{ "cornsilk4", 0x8B8878 },
{ "crimson", 0xDC143C },
{ "cyan", 0x00FFFF },
{ "cyan1", 0x00FFFF },
{ "cyan2", 0x00EEEE },
{ "cyan3", 0x00CDCD },
{ "cyan4", 0x008B8B },
{ "darkblue", 0x00008B },
{ "darkcyan", 0x008B8B },
{ "darkgoldenrod", 0xB8860B },
{ "darkgoldenrod1", 0xFFB90F },
{ "darkgoldenrod2", 0xEEAD0E },
{ "darkgoldenrod3", 0xCD950C },
{ "darkgoldenrod4", 0x8B6508 },
{ "darkgray", 0xA9A9A9 },
{ "darkgreen", 0x006400 },
{ "darkgrey", 0xA9A9A9 },
{ "darkkhaki", 0xBDB76B },
{ "darkmagenta", 0x8B008B },
{ "darkolivegreen", 0x556B2F },
{ "darkolivegreen1", 0xCAFF70 },
{ "darkolivegreen2", 0xBCEE68 },
{ "darkolivegreen3", 0xA2CD5A },
{ "darkolivegreen4", 0x6E8B3D },
{ "darkorange", 0xFF8C00 },
{ "darkorange1", 0xFF7F00 },
{ "darkorange2", 0xEE7600 },
{ "darkorange3", 0xCD6600 },
{ "darkorange4", 0x8B4500 },
{ "darkorchid", 0x9932CC },
{ "darkorchid1", 0xBF3EFF },
{ "darkorchid2", 0xB23AEE },
{ "darkorchid3", 0x9A32CD },
{ "darkorchid4", 0x68228B },
{ "darkred", 0x8B0000 },
{ "darksalmon", 0xE9967A },
{ "darkseagreen", 0x8FBC8F },
{ "darkseagreen1", 0xC1FFC1 },
{ "darkseagreen2", 0xB4EEB4 },
{ "darkseagreen3", 0x9BCD9B },
{ "darkseagreen4", 0x698B69 },
{ "darkslateblue", 0x483D8B },
{ "darkslategray", 0x2F4F4F },
{ "darkslategray1", 0x97FFFF },
{ "darkslategray2", 0x8DEEEE },
{ "darkslategray3", 0x79CDCD },
{ "darkslategray4", 0x528B8B },
{ "darkslategrey", 0x2F4F4F },
{ "darkturquoise", 0x00CED1 },
{ "darkviolet", 0x9400D3 },
{ "darkviolet", 0x9400D3 },
{ "deeppink", 0xFF1493 },
{ "deeppink1", 0xFF1493 },
{ "deeppink2", 0xEE1289 },
{ "deeppink3", 0xCD1076 },
{ "deeppink4", 0x8B0A50 },
{ "deepskyblue", 0x00BFFF },
{ "deepskyblue1", 0x00BFFF },
{ "deepskyblue2", 0x00B2EE },
{ "deepskyblue3", 0x009ACD },
{ "deepskyblue4", 0x00688B },
{ "dimgray", 0x696969 },
{ "dimgrey", 0x696969 },
{ "dodgerblue", 0x1E90FF },
{ "dodgerblue1", 0x1E90FF },
{ "dodgerblue2", 0x1C86EE },
{ "dodgerblue3", 0x1874CD },
{ "dodgerblue4", 0x104E8B },
{ "firebrick", 0xB22222 },
{ "firebrick1", 0xFF3030 },
{ "firebrick2", 0xEE2C2C },
{ "firebrick3", 0xCD2626 },
{ "firebrick4", 0x8B1A1A },
{ "floralwhite", 0xFFFAF0 },
{ "forestgreen", 0x228B22 },
{ "fractal", 0x808080 },
{ "fuchsia", 0xFF00FF },
{ "gainsboro", 0xDCDCDC },
{ "ghostwhite", 0xF8F8FF },
{ "gold", 0xFFD700 },
{ "gold1", 0xFFD700 },
{ "gold2", 0xEEC900 },
{ "gold3", 0xCDAD00 },
{ "gold4", 0x8B7500 },
{ "goldenrod", 0xDAA520 },
{ "goldenrod1", 0xFFC125 },
{ "goldenrod2", 0xEEB422 },
{ "goldenrod3", 0xCD9B1D },
{ "goldenrod4", 0x8B6914 },
{ "gray", 0x7E7E7E },
{ "gray", 0xBEBEBE },
{ "gray0", 0x000000 },
{ "gray1", 0x030303 },
{ "gray10", 0x1A1A1A },
{ "gray100", 0xFFFFFF },
{ "gray11", 0x1C1C1C },
{ "gray12", 0x1F1F1F },
{ "gray13", 0x212121 },
{ "gray14", 0x242424 },
{ "gray15", 0x262626 },
{ "gray16", 0x292929 },
{ "gray17", 0x2B2B2B },
{ "gray18", 0x2E2E2E },
{ "gray19", 0x303030 },
{ "gray2", 0x050505 },
{ "gray20", 0x333333 },
{ "gray21", 0x363636 },
{ "gray22", 0x383838 },
{ "gray23", 0x3B3B3B },
{ "gray24", 0x3D3D3D },
{ "gray25", 0x404040 },
{ "gray26", 0x424242 },
{ "gray27", 0x454545 },
{ "gray28", 0x474747 },
{ "gray29", 0x4A4A4A },
{ "gray3", 0x080808 },
{ "gray30", 0x4D4D4D },
{ "gray31", 0x4F4F4F },
{ "gray32", 0x525252 },
{ "gray33", 0x545454 },
{ "gray34", 0x575757 },
{ "gray35", 0x595959 },
{ "gray36", 0x5C5C5C },
{ "gray37", 0x5E5E5E },
{ "gray38", 0x616161 },
{ "gray39", 0x636363 },
{ "gray4", 0x0A0A0A },
{ "gray40", 0x666666 },
{ "gray41", 0x696969 },
{ "gray42", 0x6B6B6B },
{ "gray43", 0x6E6E6E },
{ "gray44", 0x707070 },
{ "gray45", 0x737373 },
{ "gray46", 0x757575 },
{ "gray47", 0x787878 },
{ "gray48", 0x7A7A7A },
{ "gray49", 0x7D7D7D },
{ "gray5", 0x0D0D0D },
{ "gray50", 0x7F7F7F },
{ "gray51", 0x828282 },
{ "gray52", 0x858585 },
{ "gray53", 0x878787 },
{ "gray54", 0x8A8A8A },
{ "gray55", 0x8C8C8C },
{ "gray56", 0x8F8F8F },
{ "gray57", 0x919191 },
{ "gray58", 0x949494 },
{ "gray59", 0x969696 },
{ "gray6", 0x0F0F0F },
{ "gray60", 0x999999 },
{ "gray61", 0x9C9C9C },
{ "gray62", 0x9E9E9E },
{ "gray63", 0xA1A1A1 },
{ "gray64", 0xA3A3A3 },
{ "gray65", 0xA6A6A6 },
{ "gray66", 0xA8A8A8 },
{ "gray67", 0xABABAB },
{ "gray68", 0xADADAD },
{ "gray69", 0xB0B0B0 },
{ "gray7", 0x121212 },
{ "gray70", 0xB3B3B3 },
{ "gray71", 0xB5B5B5 },
{ "gray72", 0xB8B8B8 },
{ "gray73", 0xBABABA },
{ "gray74", 0xBDBDBD },
{ "gray75", 0xBFBFBF },
{ "gray76", 0xC2C2C2 },
{ "gray77", 0xC4C4C4 },
{ "gray78", 0xC7C7C7 },
{ "gray79", 0xC9C9C9 },
{ "gray8", 0x141414 },
{ "gray80", 0xCCCCCC },
{ "gray81", 0xCFCFCF },
{ "gray82", 0xD1D1D1 },
{ "gray83", 0xD4D4D4 },
{ "gray84", 0xD6D6D6 },
{ "gray85", 0xD9D9D9 },
{ "gray86", 0xDBDBDB },
{ "gray87", 0xDEDEDE },
{ "gray88", 0xE0E0E0 },
{ "gray89", 0xE3E3E3 },
{ "gray9", 0x171717 },
{ "gray90", 0xE5E5E5 },
{ "gray91", 0xE8E8E8 },
{ "gray92", 0xEBEBEB },
{ "gray93", 0xEDEDED },
{ "gray94", 0xF0F0F0 },
{ "gray95", 0xF2F2F2 },
{ "gray96", 0xF5F5F5 },
{ "gray97", 0xF7F7F7 },
{ "gray98", 0xFAFAFA },
{ "gray99", 0xFCFCFC },
{ "green", 0x008000 },
{ "green", 0x00FF00 },
{ "green1", 0x00FF00 },
{ "green2", 0x00EE00 },
{ "green3", 0x00CD00 },
{ "green4", 0x008B00 },
{ "greenyellow", 0xADFF2F },
{ "grey", 0xBEBEBE },
{ "grey0", 0x000000 },
{ "grey1", 0x030303 },
{ "grey10", 0x1A1A1A },
{ "grey100", 0xFFFFFF },
{ "grey11", 0x1C1C1C },
{ "grey12", 0x1F1F1F },
{ "grey13", 0x212121 },
{ "grey14", 0x242424 },
{ "grey15", 0x262626 },
{ "grey16", 0x292929 },
{ "grey17", 0x2B2B2B },
{ "grey18", 0x2E2E2E },
{ "grey19", 0x303030 },
{ "grey2", 0x050505 },
{ "grey20", 0x333333 },
{ "grey21", 0x363636 },
{ "grey22", 0x383838 },
{ "grey23", 0x3B3B3B },
{ "grey24", 0x3D3D3D },
{ "grey25", 0x404040 },
{ "grey26", 0x424242 },
{ "grey27", 0x454545 },
{ "grey28", 0x474747 },
{ "grey29", 0x4A4A4A },
{ "grey3", 0x080808 },
{ "grey30", 0x4D4D4D },
{ "grey31", 0x4F4F4F },
{ "grey32", 0x525252 },
{ "grey33", 0x545454 },
{ "grey34", 0x575757 },
{ "grey35", 0x595959 },
{ "grey36", 0x5C5C5C },
{ "grey37", 0x5E5E5E },
{ "grey38", 0x616161 },
{ "grey39", 0x636363 },
{ "grey4", 0x0A0A0A },
{ "grey40", 0x666666 },
{ "grey41", 0x696969 },
{ "grey42", 0x6B6B6B },
{ "grey43", 0x6E6E6E },
{ "grey44", 0x707070 },
{ "grey45", 0x737373 },
{ "grey46", 0x757575 },
{ "grey47", 0x787878 },
{ "grey48", 0x7A7A7A },
{ "grey49", 0x7D7D7D },
{ "grey5", 0x0D0D0D },
{ "grey50", 0x7F7F7F },
{ "grey51", 0x828282 },
{ "grey52", 0x858585 },
{ "grey53", 0x878787 },
{ "grey54", 0x8A8A8A },
{ "grey55", 0x8C8C8C },
{ "grey56", 0x8F8F8F },
{ "grey57", 0x919191 },
{ "grey58", 0x949494 },
{ "grey59", 0x969696 },
{ "grey6", 0x0F0F0F },
{ "grey60", 0x999999 },
{ "grey61", 0x9C9C9C },
{ "grey62", 0x9E9E9E },
{ "grey63", 0xA1A1A1 },
{ "grey64", 0xA3A3A3 },
{ "grey65", 0xA6A6A6 },
{ "grey66", 0xA8A8A8 },
{ "grey67", 0xABABAB },
{ "grey68", 0xADADAD },
{ "grey69", 0xB0B0B0 },
{ "grey7", 0x121212 },
{ "grey70", 0xB3B3B3 },
{ "grey71", 0xB5B5B5 },
{ "grey72", 0xB8B8B8 },
{ "grey73", 0xBABABA },
{ "grey74", 0xBDBDBD },
{ "grey75", 0xBFBFBF },
{ "grey76", 0xC2C2C2 },
{ "grey77", 0xC4C4C4 },
{ "grey78", 0xC7C7C7 },
{ "grey79", 0xC9C9C9 },
{ "grey8", 0x141414 },
{ "grey80", 0xCCCCCC },
{ "grey81", 0xCFCFCF },
{ "grey82", 0xD1D1D1 },
{ "grey83", 0xD4D4D4 },
{ "grey84", 0xD6D6D6 },
{ "grey85", 0xD9D9D9 },
{ "grey86", 0xDBDBDB },
{ "grey87", 0xDEDEDE },
{ "grey88", 0xE0E0E0 },
{ "grey89", 0xE3E3E3 },
{ "grey9", 0x171717 },
{ "grey90", 0xE5E5E5 },
{ "grey91", 0xE8E8E8 },
{ "grey92", 0xEBEBEB },
{ "grey93", 0xEDEDED },
{ "grey94", 0xF0F0F0 },
{ "grey95", 0xF2F2F2 },
{ "grey96", 0xF5F5F5 },
{ "grey97", 0xF7F7F7 },
{ "grey98", 0xFAFAFA },
{ "grey99", 0xFCFCFC },
{ "honeydew", 0xF0FFF0 },
{ "honeydew1", 0xF0FFF0 },
{ "honeydew2", 0xE0EEE0 },
{ "honeydew3", 0xC1CDC1 },
{ "honeydew4", 0x838B83 },
{ "hotpink", 0xFF69B4 },
{ "hotpink1", 0xFF6EB4 },
{ "hotpink2", 0xEE6AA7 },
{ "hotpink3", 0xCD6090 },
{ "hotpink4", 0x8B3A62 },
{ "indianred", 0xCD5C5C },
{ "indianred1", 0xFF6A6A },
{ "indianred2", 0xEE6363 },
{ "indianred3", 0xCD5555 },
{ "indianred4", 0x8B3A3A },
{ "indigo", 0x4B0082 },
{ "ivory", 0xFFFFF0 },
{ "ivory1", 0xFFFFF0 },
{ "ivory2", 0xEEEEE0 },
{ "ivory3", 0xCDCDC1 },
{ "ivory4", 0x8B8B83 },
{ "khaki", 0xF0E68C },
{ "khaki1", 0xFFF68F },
{ "khaki2", 0xEEE685 },
{ "khaki3", 0xCDC673 },
{ "khaki4", 0x8B864E },
{ "lavender", 0xE6E6FA },
{ "lavenderblush", 0xFFF0F5 },
{ "lavenderblush1", 0xFFF0F5 },
{ "lavenderblush2", 0xEEE0E5 },
{ "lavenderblush3", 0xCDC1C5 },
{ "lavenderblush4", 0x8B8386 },
{ "lawngreen", 0x7CFC00 },
{ "lemonchiffon", 0xFFFACD },
{ "lemonchiffon1", 0xFFFACD },
{ "lemonchiffon2", 0xEEE9BF },
{ "lemonchiffon3", 0xCDC9A5 },
{ "lemonchiffon4", 0x8B8970 },
{ "lightblue", 0xADD8E6 },
{ "lightblue1", 0xBFEFFF },
{ "lightblue2", 0xB2DFEE },
{ "lightblue3", 0x9AC0CD },
{ "lightblue4", 0x68838B },
{ "lightcoral", 0xF08080 },
{ "lightcyan", 0xE0FFFF },
{ "lightcyan1", 0xE0FFFF },
{ "lightcyan2", 0xD1EEEE },
{ "lightcyan3", 0xB4CDCD },
{ "lightcyan4", 0x7A8B8B },
{ "lightgoldenrod", 0xEEDD82 },
{ "lightgoldenrod1", 0xFFEC8B },
{ "lightgoldenrod2", 0xEEDC82 },
{ "lightgoldenrod3", 0xCDBE70 },
{ "lightgoldenrod4", 0x8B814C },
{ "lightgoldenrodyellow", 0xFAFAD2 },
{ "lightgray", 0xD3D3D3 },
{ "lightgreen", 0x90EE90 },
{ "lightgrey", 0xD3D3D3 },
{ "lightpink", 0xFFB6C1 },
{ "lightpink1", 0xFFAEB9 },
{ "lightpink2", 0xEEA2AD },
{ "lightpink3", 0xCD8C95 },
{ "lightpink4", 0x8B5F65 },
{ "lightsalmon", 0xFFA07A },
{ "lightsalmon1", 0xFFA07A },
{ "lightsalmon2", 0xEE9572 },
{ "lightsalmon3", 0xCD8162 },
{ "lightsalmon4", 0x8B5742 },
{ "lightseagreen", 0x20B2AA },
{ "lightskyblue", 0x87CEFA },
{ "lightskyblue1", 0xB0E2FF },
{ "lightskyblue2", 0xA4D3EE },
{ "lightskyblue3", 0x8DB6CD },
{ "lightskyblue4", 0x607B8B },
{ "lightslateblue", 0x8470FF },
{ "lightslategray", 0x778899 },
{ "lightslategrey", 0x778899 },
{ "lightsteelblue", 0xB0C4DE },
{ "lightsteelblue1", 0xCAE1FF },
{ "lightsteelblue2", 0xBCD2EE },
{ "lightsteelblue3", 0xA2B5CD },
{ "lightsteelblue4", 0x6E7B8B },
{ "lightyellow", 0xFFFFE0 },
{ "lightyellow1", 0xFFFFE0 },
{ "lightyellow2", 0xEEEED1 },
{ "lightyellow3", 0xCDCDB4 },
{ "lightyellow4", 0x8B8B7A },
{ "lime", 0x00FF00 },
{ "limegreen", 0x32CD32 },
{ "linen", 0xFAF0E6 },
{ "magenta", 0xFF00FF },
{ "magenta1", 0xFF00FF },
{ "magenta2", 0xEE00EE },
{ "magenta3", 0xCD00CD },
{ "magenta4", 0x8B008B },
{ "maroon", 0x800000 },
{ "maroon", 0xB03060 },
{ "maroon1", 0xFF34B3 },
{ "maroon2", 0xEE30A7 },
{ "maroon3", 0xCD2990 },
{ "maroon4", 0x8B1C62 },
{ "mediumaquamarine", 0x66CDAA },
{ "mediumblue", 0x0000CD },
{ "mediumforestgreen", 0x32814B },
{ "mediumgoldenrod", 0xD1C166 },
{ "mediumorchid", 0xBA55D3 },
{ "mediumorchid1", 0xE066FF },
{ "mediumorchid2", 0xD15FEE },
{ "mediumorchid3", 0xB452CD },
{ "mediumorchid4", 0x7A378B },
{ "mediumpurple", 0x9370DB },
{ "mediumpurple1", 0xAB82FF },
{ "mediumpurple2", 0x9F79EE },
{ "mediumpurple3", 0x8968CD },
{ "mediumpurple4", 0x5D478B },
{ "mediumseagreen", 0x3CB371 },
{ "mediumslateblue", 0x7B68EE },
{ "mediumspringgreen", 0x00FA9A },
{ "mediumturquoise", 0x48D1CC },
{ "mediumvioletred", 0xC71585 },
{ "midnightblue", 0x191970 },
{ "mintcream", 0xF5FFFA },
{ "mistyrose", 0xFFE4E1 },
{ "mistyrose1", 0xFFE4E1 },
{ "mistyrose2", 0xEED5D2 },
{ "mistyrose3", 0xCDB7B5 },
{ "mistyrose4", 0x8B7D7B },
{ "moccasin", 0xFFE4B5 },
{ "navajowhite", 0xFFDEAD },
{ "navajowhite1", 0xFFDEAD },
{ "navajowhite2", 0xEECFA1 },
{ "navajowhite3", 0xCDB38B },
{ "navajowhite4", 0x8B795E },
{ "navy", 0x000080 },
{ "navyblue", 0x000080 },
{ "none", 0x0000FF },
{ "oldlace", 0xFDF5E6 },
{ "olive", 0x808000 },
{ "olivedrab", 0x6B8E23 },
{ "olivedrab1", 0xC0FF3E },
{ "olivedrab2", 0xB3EE3A },
{ "olivedrab3", 0x9ACD32 },
{ "olivedrab4", 0x698B22 },
{ "opaque", 0x000000 },
{ "orange", 0xFFA500 },
{ "orange1", 0xFFA500 },
{ "orange2", 0xEE9A00 },
{ "orange3", 0xCD8500 },
{ "orange4", 0x8B5A00 },
{ "orangered", 0xFF4500 },
{ "orangered1", 0xFF4500 },
{ "orangered2", 0xEE4000 },
{ "orangered3", 0xCD3700 },
{ "orangered4", 0x8B2500 },
{ "orchid", 0xDA70D6 },
{ "orchid1", 0xFF83FA },
{ "orchid2", 0xEE7AE9 },
{ "orchid3", 0xCD69C9 },
{ "orchid4", 0x8B4789 },
{ "palegoldenrod", 0xEEE8AA },
{ "palegreen", 0x98FB98 },
{ "palegreen1", 0x9AFF9A },
{ "palegreen2", 0x90EE90 },
{ "palegreen3", 0x7CCD7C },
{ "palegreen4", 0x548B54 },
{ "paleturquoise", 0xAFEEEE },
{ "paleturquoise1", 0xBBFFFF },
{ "paleturquoise2", 0xAEEEEE },
{ "paleturquoise3", 0x96CDCD },
{ "paleturquoise4", 0x668B8B },
{ "palevioletred", 0xDB7093 },
{ "palevioletred1", 0xFF82AB },
{ "palevioletred2", 0xEE799F },
{ "palevioletred3", 0xCD6889 },
{ "palevioletred4", 0x8B475D },
{ "papayawhip", 0xFFEFD5 },
{ "peachpuff", 0xFFDAB9 },
{ "peachpuff1", 0xFFDAB9 },
{ "peachpuff2", 0xEECBAD },
{ "peachpuff3", 0xCDAF95 },
{ "peachpuff4", 0x8B7765 },
{ "peru", 0xCD853F },
{ "pink", 0xFFC0CB },
{ "pink1", 0xFFB5C5 },
{ "pink2", 0xEEA9B8 },
{ "pink3", 0xCD919E },
{ "pink4", 0x8B636C },
{ "plum", 0xDDA0DD },
{ "plum1", 0xFFBBFF },
{ "plum2", 0xEEAEEE },
{ "plum3", 0xCD96CD },
{ "plum4", 0x8B668B },
{ "powderblue", 0xB0E0E6 },
{ "purple", 0x800080 },
{ "purple", 0xA020F0 },
{ "purple1", 0x9B30FF },
{ "purple2", 0x912CEE },
{ "purple3", 0x7D26CD },
{ "purple4", 0x551A8B },
{ "red", 0xFF0000 },
{ "red1", 0xFF0000 },
{ "red2", 0xEE0000 },
{ "red3", 0xCD0000 },
{ "red4", 0x8B0000 },
{ "rosybrown", 0xBC8F8F },
{ "rosybrown1", 0xFFC1C1 },
{ "rosybrown2", 0xEEB4B4 },
{ "rosybrown3", 0xCD9B9B },
{ "rosybrown4", 0x8B6969 },
{ "royalblue", 0x4169E1 },
{ "royalblue1", 0x4876FF },
{ "royalblue2", 0x436EEE },
{ "royalblue3", 0x3A5FCD },
{ "royalblue4", 0x27408B },
{ "saddlebrown", 0x8B4513 },
{ "salmon", 0xFA8072 },
{ "salmon1", 0xFF8C69 },
{ "salmon2", 0xEE8262 },
{ "salmon3", 0xCD7054 },
{ "salmon4", 0x8B4C39 },
{ "sandybrown", 0xF4A460 },
{ "seagreen", 0x2E8B57 },
{ "seagreen1", 0x54FF9F },
{ "seagreen2", 0x4EEE94 },
{ "seagreen3", 0x43CD80 },
{ "seagreen4", 0x2E8B57 },
{ "seashell", 0xFFF5EE },
{ "seashell1", 0xFFF5EE },
{ "seashell2", 0xEEE5DE },
{ "seashell3", 0xCDC5BF },
{ "seashell4", 0x8B8682 },
{ "sienna", 0xA0522D },
{ "sienna1", 0xFF8247 },
{ "sienna2", 0xEE7942 },
{ "sienna3", 0xCD6839 },
{ "sienna4", 0x8B4726 },
{ "silver", 0xC0C0C0 },
{ "skyblue", 0x87CEEB },
{ "skyblue1", 0x87CEFF },
{ "skyblue2", 0x7EC0EE },
{ "skyblue3", 0x6CA6CD },
{ "skyblue4", 0x4A708B },
{ "slateblue", 0x6A5ACD },
{ "slateblue1", 0x836FFF },
{ "slateblue2", 0x7A67EE },
{ "slateblue3", 0x6959CD },
{ "slateblue4", 0x473C8B },
{ "slategray", 0x708090 },
{ "slategray1", 0xC6E2FF },
{ "slategray2", 0xB9D3EE },
{ "slategray3", 0x9FB6CD },
{ "slategray4", 0x6C7B8B },
{ "slategrey", 0x708090 },
{ "snow", 0xFFFAFA },
{ "snow1", 0xFFFAFA },
{ "snow2", 0xEEE9E9 },
{ "snow3", 0xCDC9C9 },
{ "snow4", 0x8B8989 },
{ "springgreen", 0x00FF7F },
{ "springgreen1", 0x00FF7F },
{ "springgreen2", 0x00EE76 },
{ "springgreen3", 0x00CD66 },
{ "springgreen4", 0x008B45 },
{ "steelblue", 0x4682B4 },
{ "steelblue1", 0x63B8FF },
{ "steelblue2", 0x5CACEE },
{ "steelblue3", 0x4F94CD },
{ "steelblue4", 0x36648B },
{ "tan", 0xD2B48C },
{ "tan1", 0xFFA54F },
{ "tan2", 0xEE9A49 },
{ "tan3", 0xCD853F },
{ "tan4", 0x8B5A2B },
{ "teal", 0x008080 },
{ "thistle", 0xD8BFD8 },
{ "thistle1", 0xFFE1FF },
{ "thistle2", 0xEED2EE },
{ "thistle3", 0xCDB5CD },
{ "thistle4", 0x8B7B8B },
{ "tomato", 0xFF6347 },
{ "tomato1", 0xFF6347 },
{ "tomato2", 0xEE5C42 },
{ "tomato3", 0xCD4F39 },
{ "tomato4", 0x8B3626 },
{ "transparent", 0x0000FF },
{ "turquoise", 0x40E0D0 },
{ "turquoise1", 0x00F5FF },
{ "turquoise2", 0x00E5EE },
{ "turquoise3", 0x00C5CD },
{ "turquoise4", 0x00868B },
{ "violet", 0xEE82EE },
{ "violetred", 0xD02090 },
{ "violetred1", 0xFF3E96 },
{ "violetred2", 0xEE3A8C },
{ "violetred3", 0xCD3278 },
{ "violetred4", 0x8B2252 },
{ "wheat", 0xF5DEB3 },
{ "wheat1", 0xFFE7BA },
{ "wheat2", 0xEED8AE },
{ "wheat3", 0xCDBA96 },
{ "wheat4", 0x8B7E66 },
{ "white", 0xFFFFFF },
{ "whitesmoke", 0xF5F5F5 },
{ "yellow", 0xFFFF00 },
{ "yellow1", 0xFFFF00 },
{ "yellow2", 0xEEEE00 },
{ "yellow3", 0xCDCD00 },
{ "yellow4", 0x8B8B00 },
{ "yellowgreen", 0x9ACD32 },
{ NULL, 0x000000 }
};
uint32_t hb_rgb_lookup_by_name(const char *color)
{
int ii = 0;
while (colormap[ii].name != NULL)
{
if (!strcasecmp(color, colormap[ii].name))
return colormap[ii].rgb;
ii++;
}
return 0;
}
HandBrake-0.10.2/libhb/decssasub.c 0000664 0001752 0001752 00000033330 12463330511 017237 0 ustar handbrake handbrake /* decssasub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/*
* Converts SSA subtitles to either:
* (1) TEXTSUB format: UTF-8 subtitles with limited HTML-style markup (, , ), or
* (2) PICTURESUB format, using libass.
*
* SSA format references:
* http://www.matroska.org/technical/specs/subtitles/ssa.html
* http://moodub.free.fr/video/ass-specs.doc
* vlc-1.0.4/modules/codec/subtitles/subsass.c:ParseSSAString
*
* libass references:
* libass-0.9.9/ass.h
* vlc-1.0.4/modules/codec/libass.c
*
* @author David Foster (davidfstr)
*/
#include
#include
#include
#include "hb.h"
#include
#include "decssasub.h"
#include "colormap.h"
struct hb_work_private_s
{
// If decoding to PICTURESUB format:
int readOrder;
hb_job_t *job;
};
#define SSA_2_HB_TIME(hr,min,sec,centi) \
( 90L * ( hr * 1000L * 60 * 60 +\
min * 1000L * 60 +\
sec * 1000L +\
centi * 10L ) )
#define SSA_VERBOSE_PACKETS 0
static int ssa_update_style(char *ssa, hb_subtitle_style_t *style)
{
int pos, end, index;
if (ssa[0] != '{')
return 0;
pos = 1;
while (ssa[pos] != '}' && ssa[pos] != '\0')
{
index = -1;
// Skip any malformed markup junk
while (strchr("\\}", ssa[pos]) == NULL) pos++;
pos++;
// Check for an index that is in some markup (e.g. font color)
if (isdigit(ssa[pos]))
{
index = ssa[pos++] - 0x30;
}
// Find the end of this markup clause
end = pos;
while (strchr("\\}", ssa[end]) == NULL) end++;
// Handle simple integer valued attributes
if (strchr("ibu", ssa[pos]) != NULL && isdigit(ssa[pos+1]))
{
int val = strtol(ssa + pos + 1, NULL, 0);
switch (ssa[pos])
{
case 'i':
style->flags = (style->flags & ~HB_STYLE_FLAG_ITALIC) |
!!val * HB_STYLE_FLAG_ITALIC;
break;
case 'b':
style->flags = (style->flags & ~HB_STYLE_FLAG_BOLD) |
!!val * HB_STYLE_FLAG_BOLD;
break;
case 'u':
style->flags = (style->flags & ~HB_STYLE_FLAG_UNDERLINE) |
!!val * HB_STYLE_FLAG_UNDERLINE;
break;
}
}
if (ssa[pos] == 'c' && ssa[pos+1] == '&' && ssa[pos+2] == 'H')
{
// Font color markup
char *endptr;
uint32_t bgr;
bgr = strtol(ssa + pos + 3, &endptr, 16);
if (*endptr == '&')
{
switch (index)
{
case -1:
case 1:
style->fg_rgb = HB_BGR_TO_RGB(bgr);
break;
case 2:
style->alt_rgb = HB_BGR_TO_RGB(bgr);
break;
case 3:
style->ol_rgb = HB_BGR_TO_RGB(bgr);
break;
case 4:
style->bg_rgb = HB_BGR_TO_RGB(bgr);
break;
default:
// Unknown color index, ignore
break;
}
}
}
if ((ssa[pos] == 'a' && ssa[pos+1] == '&' && ssa[pos+2] == 'H') ||
(!strcmp(ssa+pos, "alpha") && ssa[pos+5] == '&' && ssa[pos+6] == 'H'))
{
// Font alpha markup
char *endptr;
uint8_t alpha;
int alpha_pos = 3;
if (ssa[1] == 'l')
alpha_pos = 7;
alpha = strtol(ssa + pos + alpha_pos, &endptr, 16);
if (*endptr == '&')
{
// SSA alpha is inverted 0 is opaque
alpha = 255 - alpha;
switch (index)
{
case -1:
case 1:
style->fg_alpha = alpha;
break;
case 2:
style->alt_alpha = alpha;
break;
case 3:
style->ol_alpha = alpha;
break;
case 4:
style->bg_alpha = alpha;
break;
default:
// Unknown alpha index, ignore
break;
}
}
}
pos = end;
}
if (ssa[pos] == '}')
pos++;
return pos;
}
char * hb_ssa_to_text(char *in, int *consumed, hb_subtitle_style_t *style)
{
int markup_len = 0;
int in_pos = 0;
int out_pos = 0;
char *out = malloc(strlen(in) + 1); // out will never be longer than in
for (in_pos = 0; in[in_pos] != '\0'; in_pos++)
{
if ((markup_len = ssa_update_style(in + in_pos, style)))
{
*consumed = in_pos + markup_len;
out[out_pos++] = '\0';
return out;
}
// Check escape codes
if (in[in_pos] == '\\')
{
in_pos++;
switch (in[in_pos])
{
case '\0':
in_pos--;
break;
case 'N':
case 'n':
out[out_pos++] = '\n';
break;
case 'h':
out[out_pos++] = ' ';
break;
default:
out[out_pos++] = in[in_pos];
break;
}
}
else
{
out[out_pos++] = in[in_pos];
}
}
*consumed = in_pos;
out[out_pos++] = '\0';
return out;
}
void hb_ssa_style_init(hb_subtitle_style_t *style)
{
style->flags = 0;
style->fg_rgb = 0x00FFFFFF;
style->alt_rgb = 0x00FFFFFF;
style->ol_rgb = 0x000F0F0F;
style->bg_rgb = 0x000F0F0F;
style->fg_alpha = 0xFF;
style->alt_alpha = 0xFF;
style->ol_alpha = 0xFF;
style->bg_alpha = 0xFF;
}
static hb_buffer_t *ssa_decode_line_to_mkv_ssa( hb_work_object_t * w, uint8_t *in_data, int in_size, int in_sequence );
/*
* Decodes a single SSA packet to one or more TEXTSUB or PICTURESUB subtitle packets.
*
* SSA packet format:
* ( Dialogue: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text CR LF ) +
* 1 2 3 4 5 6 7 8 9 10
*/
static hb_buffer_t *ssa_decode_packet( hb_work_object_t * w, hb_buffer_t *in )
{
// Store NULL after the end of the buffer to make using string processing safe
hb_buffer_realloc(in, ++in->size);
in->data[in->size - 1] = '\0';
hb_buffer_t *out_list = NULL;
hb_buffer_t **nextPtr = &out_list;
const char *EOL = "\r\n";
char *curLine, *curLine_parserData;
for ( curLine = strtok_r( (char *) in->data, EOL, &curLine_parserData );
curLine;
curLine = strtok_r( NULL, EOL, &curLine_parserData ) )
{
// Skip empty lines and spaces between adjacent CR and LF
if (curLine[0] == '\0')
continue;
// Decode an individual SSA line
hb_buffer_t *out;
out = ssa_decode_line_to_mkv_ssa(w, (uint8_t *)curLine, strlen(curLine), in->sequence);
if ( out == NULL )
continue;
// Append 'out' to 'out_list'
*nextPtr = out;
nextPtr = &out->next;
}
// For point-to-point encoding, when the start time of the stream
// may be offset, the timestamps of the subtitles must be offset as well.
//
// HACK: Here we are making the assumption that, under normal circumstances,
// the output display time of the first output packet is equal to the
// display time of the input packet.
//
// During point-to-point encoding, the display time of the input
// packet will be offset to compensate.
//
// Therefore we offset all of the output packets by a slip amount
// such that first output packet's display time aligns with the
// input packet's display time. This should give the correct time
// when point-to-point encoding is in effect.
if (out_list && out_list->s.start > in->s.start)
{
int64_t slip = out_list->s.start - in->s.start;
hb_buffer_t *out;
out = out_list;
while (out)
{
out->s.start -= slip;
out->s.stop -= slip;
out = out->next;
}
}
return out_list;
}
/*
* Parses the start and stop time from the specified SSA packet.
*
* Returns true if parsing failed; false otherwise.
*/
static int parse_timing_from_ssa_packet( char *in_data, int64_t *in_start, int64_t *in_stop )
{
/*
* Parse Start and End fields for timing information
*/
int start_hr, start_min, start_sec, start_centi;
int end_hr, end_min, end_sec, end_centi;
// SSA subtitles have an empty layer field (bare ','). The scanf
// format specifier "%*128[^,]" will not match on a bare ','. There
// must be at least one non ',' character in the match. So the format
// specifier is placed directly next to the ':' so that the next
// expected ' ' after the ':' will be the character it matches on
// when there is no layer field.
int numPartsRead = sscanf( (char *) in_data, "Dialogue:%*128[^,],"
"%d:%d:%d.%d," // Start
"%d:%d:%d.%d,", // End
&start_hr, &start_min, &start_sec, &start_centi,
&end_hr, &end_min, &end_sec, &end_centi );
if ( numPartsRead != 8 )
return 1;
*in_start = SSA_2_HB_TIME(start_hr, start_min, start_sec, start_centi);
*in_stop = SSA_2_HB_TIME( end_hr, end_min, end_sec, end_centi);
return 0;
}
static uint8_t *find_field( uint8_t *pos, uint8_t *end, int fieldNum )
{
int curFieldID = 1;
while (pos < end)
{
if ( *pos++ == ',' )
{
curFieldID++;
if ( curFieldID == fieldNum )
return pos;
}
}
return NULL;
}
/*
* SSA line format:
* Dialogue: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text '\0'
* 1 2 3 4 5 6 7 8 9 10
*
* MKV-SSA packet format:
* ReadOrder,Marked, Style,Name,MarginL,MarginR,MarginV,Effect,Text '\0'
* 1 2 3 4 5 6 7 8 9
*/
static hb_buffer_t *ssa_decode_line_to_mkv_ssa( hb_work_object_t * w, uint8_t *in_data, int in_size, int in_sequence )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * out;
// Parse values for in->s.start and in->s.stop
int64_t in_start, in_stop;
if ( parse_timing_from_ssa_packet( (char *) in_data, &in_start, &in_stop ) )
goto fail;
// Convert the SSA packet to MKV-SSA format, which is what libass expects
char *mkvIn;
int numPartsRead;
char *styleToTextFields;
char *layerField = malloc( in_size );
// SSA subtitles have an empty layer field (bare ','). The scanf
// format specifier "%*128[^,]" will not match on a bare ','. There
// must be at least one non ',' character in the match. So the format
// specifier is placed directly next to the ':' so that the next
// expected ' ' after the ':' will be the character it matches on
// when there is no layer field.
numPartsRead = sscanf( (char *)in_data, "Dialogue:%128[^,],", layerField );
if ( numPartsRead != 1 )
goto fail;
styleToTextFields = (char *)find_field( in_data, in_data + in_size, 4 );
if ( styleToTextFields == NULL ) {
free( layerField );
goto fail;
}
// The sscanf conversion above will result in an extra space
// before the layerField. Strip the space.
char *stripLayerField = layerField;
for(; *stripLayerField == ' '; stripLayerField++);
out = hb_buffer_init( in_size + 1 );
mkvIn = (char*)out->data;
mkvIn[0] = '\0';
sprintf(mkvIn, "%d", pv->readOrder++); // ReadOrder: make this up
strcat( mkvIn, "," );
strcat( mkvIn, stripLayerField );
strcat( mkvIn, "," );
strcat( mkvIn, (char *)styleToTextFields );
out->size = strlen(mkvIn) + 1;
out->s.frametype = HB_FRAME_SUBTITLE;
out->s.start = in_start;
out->s.stop = in_stop;
out->sequence = in_sequence;
if( out->size == 0 )
{
hb_buffer_close(&out);
}
free( layerField );
return out;
fail:
hb_log( "decssasub: malformed SSA subtitle packet: %.*s\n", in_size, in_data );
return NULL;
}
static int decssaInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv;
pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
return 0;
}
static int decssaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_buffer_t * in = *buf_in;
#if SSA_VERBOSE_PACKETS
printf("\nPACKET(%"PRId64",%"PRId64"): %.*s\n", in->s.start/90, in->s.stop/90, in->size, in->data);
#endif
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_WORK_DONE;
}
*buf_out = ssa_decode_packet(w, in);
return HB_WORK_OK;
}
static void decssaClose( hb_work_object_t * w )
{
free( w->private_data );
}
hb_work_object_t hb_decssasub =
{
WORK_DECSSASUB,
"SSA Subtitle Decoder",
decssaInit,
decssaWork,
decssaClose
};
HandBrake-0.10.2/libhb/encx265.c 0000664 0001752 0001752 00000041240 12535357536 016474 0 ustar handbrake handbrake /* encx265.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifdef USE_X265
#include "hb.h"
#include "hb_dict.h"
#include "h265_common.h"
#include "x265.h"
int encx265Init (hb_work_object_t*, hb_job_t*);
int encx265Work (hb_work_object_t*, hb_buffer_t**, hb_buffer_t**);
void encx265Close(hb_work_object_t*);
hb_work_object_t hb_encx265 =
{
WORK_ENCX265,
"H.265/HEVC encoder (libx265)",
encx265Init,
encx265Work,
encx265Close,
};
#define FRAME_INFO_MAX2 (8) // 2^8 = 256; 90000/256 = 352 frames/sec
#define FRAME_INFO_MIN2 (17) // 2^17 = 128K; 90000/131072 = 1.4 frames/sec
#define FRAME_INFO_SIZE (1 << (FRAME_INFO_MIN2 - FRAME_INFO_MAX2 + 1))
#define FRAME_INFO_MASK (FRAME_INFO_SIZE - 1)
static const char * const hb_x265_encopt_synonyms[][2] =
{
{ "me", "motion", },
{ NULL, NULL, },
};
struct hb_work_private_s
{
hb_job_t *job;
x265_encoder *x265;
x265_param *param;
int64_t last_stop;
uint32_t frames_in;
hb_list_t *delayed_chapters;
int64_t next_chapter_pts;
struct
{
int64_t duration;
}
frame_info[FRAME_INFO_SIZE];
char csvfn[1024];
};
// used in delayed_chapters list
struct chapter_s
{
int index;
int64_t start;
};
static int param_parse(x265_param *param, const char *key, const char *value)
{
int ret = x265_param_parse(param, key, value);
// let x265 sanity check the options for us
switch (ret)
{
case X265_PARAM_BAD_NAME:
hb_log("encx265: unknown option '%s'", key);
break;
case X265_PARAM_BAD_VALUE:
hb_log("encx265: bad argument '%s=%s'", key, value ? value : "(null)");
break;
default:
break;
}
return ret;
}
/***********************************************************************
* hb_work_encx265_init
***********************************************************************
*
**********************************************************************/
int encx265Init(hb_work_object_t *w, hb_job_t *job)
{
hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
pv->next_chapter_pts = AV_NOPTS_VALUE;
pv->delayed_chapters = hb_list_init();
pv->job = job;
w->private_data = pv;
int ret, vrate, vrate_base;
x265_nal *nal;
uint32_t nnal;
x265_param *param = pv->param = x265_param_alloc();
if (x265_param_default_preset(param,
job->encoder_preset, job->encoder_tune) < 0)
{
goto fail;
}
/* If the PSNR or SSIM tunes are in use, enable the relevant metric */
param->bEnablePsnr = param->bEnableSsim = 0;
if (job->encoder_tune != NULL && *job->encoder_tune)
{
char *tmp = strdup(job->encoder_tune);
char *tok = strtok(tmp, ",./-+");
do
{
if (!strncasecmp(tok, "psnr", 4))
{
param->bEnablePsnr = 1;
break;
}
if (!strncasecmp(tok, "ssim", 4))
{
param->bEnableSsim = 1;
break;
}
}
while ((tok = strtok(NULL, ",./-+")) != NULL);
free(tmp);
}
/*
* Some HandBrake-specific defaults; users can override them
* using the encoder_options string.
*/
hb_reduce(&vrate, &vrate_base, job->vrate, job->vrate_base);
param->fpsNum = vrate;
param->fpsDenom = vrate_base;
param->keyframeMin = (int)((double)vrate / (double)vrate_base + 0.5);
param->keyframeMax = param->keyframeMin * 10;
/*
* Video Signal Type (color description only).
*
* Use x265_param_parse (let x265 determine which bEnable
* flags, if any, should be set in the x265_param struct).
*/
char colorprim[11], transfer[11], colormatrix[11];
switch (job->color_matrix_code)
{
case 1: // ITU BT.601 DVD or SD TV content (NTSC)
strcpy(colorprim, "smpte170m");
strcpy(transfer, "bt709");
strcpy(colormatrix, "smpte170m");
break;
case 2: // ITU BT.601 DVD or SD TV content (PAL)
strcpy(colorprim, "bt470bg");
strcpy(transfer, "bt709");
strcpy(colormatrix, "smpte170m");
break;
case 3: // ITU BT.709 HD content
strcpy(colorprim, "bt709");
strcpy(transfer, "bt709");
strcpy(colormatrix, "bt709");
break;
case 4: // custom
snprintf(colorprim, sizeof(colorprim), "%d", job->color_prim);
snprintf(transfer, sizeof(transfer), "%d", job->color_transfer);
snprintf(colormatrix, sizeof(colormatrix), "%d", job->color_matrix);
break;
default: // detected during scan
snprintf(colorprim, sizeof(colorprim), "%d", job->title->color_prim);
snprintf(transfer, sizeof(transfer), "%d", job->title->color_transfer);
snprintf(colormatrix, sizeof(colormatrix), "%d", job->title->color_matrix);
break;
}
if (param_parse(param, "colorprim", colorprim) ||
param_parse(param, "transfer", transfer) ||
param_parse(param, "colormatrix", colormatrix))
{
goto fail;
}
/* iterate through x265_opts and parse the options */
hb_dict_entry_t *entry = NULL;
hb_dict_t *x265_opts = hb_encopts_to_dict(job->encoder_options, job->vcodec);
while ((entry = hb_dict_next(x265_opts, entry)) != NULL)
{
// here's where the strings are passed to libx265 for parsing
// unknown options or bad values are non-fatal, see encx264.c
param_parse(param, entry->key, entry->value);
}
hb_dict_free(&x265_opts);
/*
* Reload colorimetry settings in case custom
* values were set in the encoder_options string.
*/
job->color_matrix_code = 4;
job->color_prim = param->vui.colorPrimaries;
job->color_transfer = param->vui.transferCharacteristics;
job->color_matrix = param->vui.matrixCoeffs;
/*
* Settings which can't be overriden in the encodeer_options string
* (muxer-specific settings, resolution, ratecontrol, etc.).
*/
param->bRepeatHeaders = 0;
param->sourceWidth = job->width;
param->sourceHeight = job->height;
if (job->anamorphic.mode)
{
/*
* Let x265 determnine whether to use an aspect ratio
* index vs. the extended SAR index + SAR width/height.
*/
char sar[22];
snprintf(sar, sizeof(sar), "%d:%d",
job->anamorphic.par_width, job->anamorphic.par_height);
if (param_parse(param, "sar", sar))
{
goto fail;
}
}
if (job->vquality > 0)
{
param->rc.rateControlMode = X265_RC_CRF;
param->rc.rfConstant = job->vquality;
}
else
{
param->rc.rateControlMode = X265_RC_ABR;
param->rc.bitrate = job->vbitrate;
if (job->pass > 0 && job->pass < 3)
{
char stats_file[1024] = "";
char pass[2];
snprintf(pass, sizeof(pass), "%d", job->pass);
hb_get_tempory_filename(job->h, stats_file, "x265.log");
if (param_parse(param, "stats", stats_file) ||
param_parse(param, "pass", pass))
{
goto fail;
}
if (job->pass == 1 && job->fastfirstpass == 0 &&
param_parse(param, "slow-firstpass", "1"))
{
goto fail;
}
}
}
/* statsfile (but not 2-pass) */
memset(pv->csvfn, 0, sizeof(pv->csvfn));
if (param->logLevel >= X265_LOG_DEBUG)
{
if (param->csvfn == NULL)
{
hb_get_tempory_filename(job->h, pv->csvfn, "x265.csv");
param->csvfn = pv->csvfn;
}
else
{
strncpy(pv->csvfn, param->csvfn, sizeof(pv->csvfn));
}
}
/* Apply profile and level settings last. */
if (job->encoder_profile != NULL &&
strcasecmp(job->encoder_profile, hb_h265_profile_names[0]) != 0 &&
x265_param_apply_profile(param, job->encoder_profile) < 0)
{
goto fail;
}
/* we should now know whether B-frames are enabled */
job->areBframes = (param->bframes > 0) + (param->bframes > 0 &&
param->bBPyramid > 0);
pv->x265 = x265_encoder_open(param);
if (pv->x265 == NULL)
{
hb_error("encx265: x265_encoder_open failed.");
goto fail;
}
/*
* x265's output (headers and bitstream) are in Annex B format.
*
* Write the header as is, and let the muxer reformat
* the extradata and output bitstream properly for us.
*/
ret = x265_encoder_headers(pv->x265, &nal, &nnal);
if (ret < 0)
{
hb_error("encx265: x265_encoder_headers failed (%d)", ret);
goto fail;
}
if (ret > sizeof(w->config->h265.headers))
{
hb_error("encx265: bitstream headers too large (%d)", ret);
goto fail;
}
memcpy(w->config->h265.headers, nal->payload, ret);
w->config->h265.headers_length = ret;
return 0;
fail:
w->private_data = NULL;
free(pv);
return 1;
}
void encx265Close(hb_work_object_t *w)
{
hb_work_private_t *pv = w->private_data;
if (pv->delayed_chapters != NULL)
{
struct chapter_s *item;
while ((item = hb_list_item(pv->delayed_chapters, 0)) != NULL)
{
hb_list_rem(pv->delayed_chapters, item);
free(item);
}
hb_list_close(&pv->delayed_chapters);
}
x265_param_free(pv->param);
x265_encoder_close(pv->x265);
free(pv);
w->private_data = NULL;
}
/*
* see comments in definition of 'frame_info' in pv struct for description
* of what these routines are doing.
*/
static void save_frame_info(hb_work_private_t *pv, hb_buffer_t *in)
{
int i = (in->s.start >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
pv->frame_info[i].duration = in->s.stop - in->s.start;
}
static int64_t get_frame_duration(hb_work_private_t * pv, int64_t pts)
{
int i = (pts >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
return pv->frame_info[i].duration;
}
static hb_buffer_t* nal_encode(hb_work_object_t *w,
x265_picture *pic_out,
x265_nal *nal, uint32_t nnal)
{
hb_work_private_t *pv = w->private_data;
hb_job_t *job = pv->job;
hb_buffer_t *buf = NULL;
int i;
if (nnal <= 0)
{
return NULL;
}
buf = hb_video_buffer_init(job->width, job->height);
if (buf == NULL)
{
return NULL;
}
buf->size = 0;
// copy the bitstream data
for (i = 0; i < nnal; i++)
{
memcpy(buf->data + buf->size, nal[i].payload, nal[i].sizeBytes);
buf->size += nal[i].sizeBytes;
}
// use the pts to get the original frame's duration.
buf->s.duration = get_frame_duration(pv, pic_out->pts);
buf->s.stop = pic_out->pts + buf->s.duration;
buf->s.start = pic_out->pts;
buf->s.renderOffset = pic_out->dts;
if (w->config->h264.init_delay == 0 && pic_out->dts < 0)
{
w->config->h264.init_delay -= pic_out->dts;
}
switch (pic_out->sliceType)
{
case X265_TYPE_IDR:
buf->s.frametype = HB_FRAME_IDR;
break;
case X265_TYPE_I:
buf->s.frametype = HB_FRAME_I;
break;
case X265_TYPE_P:
buf->s.frametype = HB_FRAME_P;
break;
case X265_TYPE_B:
buf->s.frametype = HB_FRAME_B;
break;
case X265_TYPE_BREF:
buf->s.frametype = HB_FRAME_BREF;
break;
default:
buf->s.frametype = 0;
break;
}
if (pv->next_chapter_pts != AV_NOPTS_VALUE &&
pv->next_chapter_pts <= pic_out->pts &&
pic_out->sliceType == X265_TYPE_IDR)
{
// we're no longer looking for this chapter
pv->next_chapter_pts = AV_NOPTS_VALUE;
// get the chapter index from the list
struct chapter_s *item = hb_list_item(pv->delayed_chapters, 0);
if (item != NULL)
{
// we're done with this chapter
hb_list_rem(pv->delayed_chapters, item);
buf->s.new_chap = item->index;
free(item);
// we may still have another pending chapter
item = hb_list_item(pv->delayed_chapters, 0);
if (item != NULL)
{
// we're looking for this one now
// we still need it, don't remove it
pv->next_chapter_pts = item->start;
}
}
}
// discard empty buffers (no video)
if (buf->size <= 0)
{
hb_buffer_close(&buf);
}
return buf;
}
static hb_buffer_t* x265_encode(hb_work_object_t *w, hb_buffer_t *in)
{
hb_work_private_t *pv = w->private_data;
hb_job_t *job = pv->job;
x265_picture pic_in, pic_out;
x265_nal *nal;
uint32_t nnal;
x265_picture_init(pv->param, &pic_in);
pic_in.stride[0] = in->plane[0].stride;
pic_in.stride[1] = in->plane[1].stride;
pic_in.stride[2] = in->plane[2].stride;
pic_in.planes[0] = in->plane[0].data;
pic_in.planes[1] = in->plane[1].data;
pic_in.planes[2] = in->plane[2].data;
pic_in.poc = pv->frames_in++;
pic_in.pts = in->s.start;
pic_in.bitDepth = 8;
if (in->s.new_chap && job->chapter_markers)
{
if (pv->next_chapter_pts == AV_NOPTS_VALUE)
{
pv->next_chapter_pts = in->s.start;
}
/*
* Chapter markers are sometimes so close we can get a new one before
* the previous marker has been through the encoding queue.
*
* Dropping markers can cause weird side-effects downstream, including
* but not limited to missing chapters in the output, so we need to save
* it somehow.
*/
struct chapter_s *item = malloc(sizeof(struct chapter_s));
if (item != NULL)
{
item->start = in->s.start;
item->index = in->s.new_chap;
hb_list_add(pv->delayed_chapters, item);
}
/* don't let 'work_loop' put a chapter mark on the wrong buffer */
in->s.new_chap = 0;
/*
* Chapters have to start with an IDR frame so request that this frame be
* coded as IDR. Since there may be up to 16 frames currently buffered in
* the encoder, remember the timestamp so when this frame finally pops out
* of the encoder we'll mark its buffer as the start of a chapter.
*/
pic_in.sliceType = X265_TYPE_IDR;
}
else
{
pic_in.sliceType = X265_TYPE_AUTO;
}
if (pv->last_stop != in->s.start)
{
hb_log("encx265 input continuity err: last stop %"PRId64" start %"PRId64,
pv->last_stop, in->s.start);
}
pv->last_stop = in->s.stop;
save_frame_info(pv, in);
if (x265_encoder_encode(pv->x265, &nal, &nnal, &pic_in, &pic_out) > 0)
{
return nal_encode(w, &pic_out, nal, nnal);
}
return NULL;
}
int encx265Work(hb_work_object_t *w, hb_buffer_t **buf_in, hb_buffer_t **buf_out)
{
hb_work_private_t *pv = w->private_data;
hb_buffer_t *in = *buf_in;
if (in->size <= 0)
{
uint32_t nnal;
x265_nal *nal;
x265_picture pic_out;
hb_buffer_t *last_buf = NULL;
// flush delayed frames
while (x265_encoder_encode(pv->x265, &nal, &nnal, NULL, &pic_out) > 0)
{
hb_buffer_t *buf = nal_encode(w, &pic_out, nal, nnal);
if (buf != NULL)
{
if (last_buf == NULL)
{
*buf_out = buf;
}
else
{
last_buf->next = buf;
}
last_buf = buf;
}
}
// add the EOF to the end of the chain
if (last_buf == NULL)
{
*buf_out = in;
}
else
{
last_buf->next = in;
}
*buf_in = NULL;
return HB_WORK_DONE;
}
*buf_out = x265_encode(w, in);
return HB_WORK_OK;
}
const char* hb_x265_encopt_name(const char *name)
{
int i;
for (i = 0; hb_x265_encopt_synonyms[i][0] != NULL; i++)
if (!strcmp(name, hb_x265_encopt_synonyms[i][1]))
return hb_x265_encopt_synonyms[i][0];
return name;
}
#endif
HandBrake-0.10.2/libhb/work.c 0000664 0001752 0001752 00000201176 12463330511 016252 0 ustar handbrake handbrake /* work.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "libavformat/avformat.h"
#include "openclwrapper.h"
#include "opencl.h"
#ifdef USE_QSV
#include "qsv_common.h"
#include "qsv_filter_pp.h"
#endif
typedef struct
{
hb_list_t * jobs;
hb_job_t ** current_job;
hb_error_code * error;
volatile int * die;
} hb_work_t;
static void work_func();
static void do_job( hb_job_t *);
static void work_loop( void * );
static void filter_loop( void * );
#define FIFO_UNBOUNDED 65536
#define FIFO_UNBOUNDED_WAKE 65535
#define FIFO_LARGE 32
#define FIFO_LARGE_WAKE 16
#define FIFO_SMALL 16
#define FIFO_SMALL_WAKE 15
#define FIFO_MINI 4
#define FIFO_MINI_WAKE 3
/**
* Allocates work object and launches work thread with work_func.
* @param jobs Handle to hb_list_t.
* @param die Handle to user inititated exit indicator.
* @param error Handle to error indicator.
*/
hb_thread_t * hb_work_init( hb_list_t * jobs, volatile int * die, hb_error_code * error, hb_job_t ** job )
{
hb_work_t * work = calloc( sizeof( hb_work_t ), 1 );
work->jobs = jobs;
work->current_job = job;
work->die = die;
work->error = error;
return hb_thread_init( "work", work_func, work, HB_LOW_PRIORITY );
}
static void InitWorkState( hb_handle_t * h )
{
hb_state_t state;
state.state = HB_STATE_WORKING;
#define p state.param.working
p.progress = 0.0;
p.rate_cur = 0.0;
p.rate_avg = 0.0;
p.hours = -1;
p.minutes = -1;
p.seconds = -1;
#undef p
hb_set_state( h, &state );
}
/**
* Iterates through job list and calls do_job for each job.
* @param _work Handle work object.
*/
static void work_func( void * _work )
{
hb_work_t * work = _work;
hb_job_t * job;
hb_log( "%d job(s) to process", hb_list_count( work->jobs ) );
while( !*work->die && ( job = hb_list_item( work->jobs, 0 ) ) )
{
hb_list_rem( work->jobs, job );
job->die = work->die;
job->done_error = work->error;
*(work->current_job) = job;
InitWorkState( job->h );
do_job( job );
*(work->current_job) = NULL;
}
free( work );
}
hb_work_object_t * hb_get_work( int id )
{
hb_work_object_t * w;
for( w = hb_objects; w; w = w->next )
{
if( w->id == id )
{
hb_work_object_t *wc = malloc( sizeof(*w) );
*wc = *w;
return wc;
}
}
return NULL;
}
hb_work_object_t* hb_codec_decoder(int codec)
{
if (codec & HB_ACODEC_FF_MASK)
{
return hb_get_work(WORK_DECAVCODEC);
}
switch (codec)
{
case HB_ACODEC_LPCM: return hb_get_work(WORK_DECLPCM);
default: break;
}
return NULL;
}
hb_work_object_t* hb_codec_encoder(int codec)
{
if (codec & HB_ACODEC_FF_MASK)
{
return hb_get_work(WORK_ENCAVCODEC_AUDIO);
}
switch (codec)
{
case HB_ACODEC_AC3: return hb_get_work(WORK_ENCAVCODEC_AUDIO);
case HB_ACODEC_LAME: return hb_get_work(WORK_ENCLAME);
case HB_ACODEC_VORBIS: return hb_get_work(WORK_ENCVORBIS);
case HB_ACODEC_CA_AAC: return hb_get_work(WORK_ENC_CA_AAC);
case HB_ACODEC_CA_HAAC: return hb_get_work(WORK_ENC_CA_HAAC);
default: break;
}
return NULL;
}
/**
* Displays job parameters in the debug log.
* @param job Handle work hb_job_t.
*/
void hb_display_job_info(hb_job_t *job)
{
int i;
hb_title_t *title = job->title;
hb_audio_t *audio;
hb_subtitle_t *subtitle;
hb_log("job configuration:");
hb_log( " * source");
hb_log( " + %s", title->path );
if( job->pts_to_start || job->pts_to_stop )
{
int64_t stop;
int hr_start, min_start, hr_stop, min_stop;
float sec_start, sec_stop;
stop = job->pts_to_start + job->pts_to_stop;
hr_start = job->pts_to_start / (90000 * 60 * 60);
min_start = job->pts_to_start / (90000 * 60);
sec_start = (float)job->pts_to_start / 90000.0 - min_start * 60;
min_start %= 60;
hr_stop = stop / (90000 * 60 * 60);
min_stop = stop / (90000 * 60);
sec_stop = (float)stop / 90000.0 - min_stop * 60;
min_stop %= 60;
hb_log(" + title %d, start %02d:%02d:%02.2f stop %02d:%02d:%02.2f",
title->index,
hr_start, min_start, sec_start,
hr_stop, min_stop, sec_stop);
}
else if( job->frame_to_start || job->frame_to_stop )
{
hb_log( " + title %d, frames %d to %d", title->index,
job->frame_to_start, job->frame_to_start + job->frame_to_stop );
}
else
{
hb_log( " + title %d, chapter(s) %d to %d", title->index,
job->chapter_start, job->chapter_end );
}
if( title->container_name != NULL )
hb_log( " + container: %s", title->container_name);
if( title->data_rate )
{
hb_log( " + data rate: %d kbps", title->data_rate / 1000 );
}
hb_log( " * destination");
hb_log( " + %s", job->file );
hb_log(" + container: %s", hb_container_get_long_name(job->mux));
switch (job->mux)
{
case HB_MUX_MP4V2:
if (job->largeFileSize)
hb_log(" + 64-bit chunk offsets");
case HB_MUX_AV_MP4:
if (job->mp4_optimize)
hb_log(" + optimized for HTTP streaming (fast start)");
if (job->ipod_atom)
hb_log(" + compatibility atom for iPod 5G");
break;
default:
break;
}
if( job->chapter_markers )
{
hb_log( " + chapter markers" );
}
hb_log(" * video track");
#ifdef USE_QSV
if (hb_qsv_decode_is_enabled(job))
{
hb_log(" + decoder: %s",
hb_qsv_decode_get_codec_name(title->video_codec_param));
}
else
#endif
{
hb_log(" + decoder: %s", title->video_codec_name);
}
if( title->video_bitrate )
{
hb_log( " + bitrate %d kbps", title->video_bitrate / 1000 );
}
// Filters can modify dimensions. So show them first.
if( hb_list_count( job->list_filter ) )
{
hb_log(" + %s", hb_list_count( job->list_filter) > 1 ? "filters" : "filter" );
for( i = 0; i < hb_list_count( job->list_filter ); i++ )
{
hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
if( filter->settings )
hb_log(" + %s (%s)", filter->name, filter->settings);
else
hb_log(" + %s (default settings)", filter->name);
if( filter->info )
{
hb_filter_info_t info;
filter->info( filter, &info );
if( info.human_readable_desc[0] )
{
hb_log(" + %s", info.human_readable_desc);
}
}
}
}
if( job->anamorphic.mode )
{
hb_log( " + %s anamorphic", job->anamorphic.mode == 1 ? "strict" : job->anamorphic.mode == 2? "loose" : "custom" );
if( job->anamorphic.mode == 3 && job->anamorphic.keep_display_aspect )
{
hb_log( " + keeping source display aspect ratio");
}
hb_log( " + storage dimensions: %d * %d, mod %i",
job->width, job->height, job->modulus );
if( job->anamorphic.itu_par )
{
hb_log( " + using ITU pixel aspect ratio values");
}
hb_log( " + pixel aspect ratio: %i / %i", job->anamorphic.par_width, job->anamorphic.par_height );
hb_log( " + display dimensions: %.0f * %i",
(float)( job->width * job->anamorphic.par_width / job->anamorphic.par_height ), job->height );
}
else
{
hb_log( " + dimensions: %d * %d, mod %i",
job->width, job->height, job->modulus );
}
if ( job->grayscale )
hb_log( " + grayscale mode" );
if( !job->indepth_scan )
{
/* Video encoder */
hb_log(" + encoder: %s",
hb_video_encoder_get_long_name(job->vcodec));
if (job->encoder_preset && *job->encoder_preset)
{
switch (job->vcodec)
{
case HB_VCODEC_X264:
case HB_VCODEC_X265:
case HB_VCODEC_QSV_H264:
hb_log(" + preset: %s", job->encoder_preset);
default:
break;
}
}
if (job->encoder_tune && *job->encoder_tune)
{
switch (job->vcodec)
{
case HB_VCODEC_X264:
case HB_VCODEC_X265:
hb_log(" + tune: %s", job->encoder_tune);
default:
break;
}
}
if (job->encoder_options != NULL && *job->encoder_options &&
job->vcodec != HB_VCODEC_THEORA)
{
hb_log(" + options: %s", job->encoder_options);
}
if (job->encoder_profile && *job->encoder_profile)
{
switch (job->vcodec)
{
case HB_VCODEC_X264:
case HB_VCODEC_X265:
case HB_VCODEC_QSV_H264:
hb_log(" + profile: %s", job->encoder_profile);
default:
break;
}
}
if (job->encoder_level && *job->encoder_level)
{
switch (job->vcodec)
{
case HB_VCODEC_X264:
case HB_VCODEC_QSV_H264:
hb_log(" + level: %s", job->encoder_level);
default:
break;
}
}
if (job->vquality >= 0)
{
hb_log(" + quality: %.2f (%s)", job->vquality,
hb_video_quality_get_name(job->vcodec));
}
else
{
hb_log( " + bitrate: %d kbps, pass: %d", job->vbitrate, job->pass );
if(job->pass == 1 && job->fastfirstpass == 1 &&
(job->vcodec == HB_VCODEC_X264 || job->vcodec == HB_VCODEC_X265))
{
hb_log( " + fast first pass" );
if (job->vcodec == HB_VCODEC_X264)
{
hb_log( " + options: ref=1:8x8dct=0:me=dia:trellis=0" );
hb_log( " analyse=i4x4 (if originally enabled, else analyse=none)" );
hb_log( " subq=2 (if originally greater than 2, else subq unchanged)" );
}
}
}
if (job->color_matrix_code && (job->vcodec == HB_VCODEC_X264 ||
job->mux == HB_MUX_MP4V2))
{
// color matrix is set:
// 1) at the stream level (x264 only),
// 2) at the container level (mp4v2 only)
hb_log(" + custom color matrix: %s",
job->color_matrix_code == 1 ? "ITU Bt.601 (NTSC)" :
job->color_matrix_code == 2 ? "ITU Bt.601 (PAL)" :
job->color_matrix_code == 3 ? "ITU Bt.709 (HD)" : "Custom");
}
}
if( job->indepth_scan )
{
hb_log( " * Foreign Audio Search: %s%s%s",
job->select_subtitle_config.dest == RENDERSUB ? "Render/Burn-in" : "Passthrough",
job->select_subtitle_config.force ? ", Forced Only" : "",
job->select_subtitle_config.default_track ? ", Default" : "" );
}
for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
{
subtitle = hb_list_item( job->list_subtitle, i );
if( subtitle )
{
if( job->indepth_scan )
{
hb_log( " + subtitle, %s (track %d, id 0x%x) %s [%s]",
subtitle->lang, subtitle->track, subtitle->id,
subtitle->format == PICTURESUB ? "Picture" : "Text",
hb_subsource_name( subtitle->source ) );
}
else if( subtitle->source == SRTSUB )
{
/* For SRT, print offset and charset too */
hb_log( " * subtitle track %d, %s (track %d, id 0x%x) Text [SRT] -> %s%s, offset: %"PRId64", charset: %s",
subtitle->out_track, subtitle->lang, subtitle->track, subtitle->id,
subtitle->config.dest == RENDERSUB ? "Render/Burn-in" : "Passthrough",
subtitle->config.default_track ? ", Default" : "",
subtitle->config.offset, subtitle->config.src_codeset );
}
else
{
hb_log( " * subtitle track %d, %s (track %d, id 0x%x) %s [%s] -> %s%s%s",
subtitle->out_track, subtitle->lang, subtitle->track, subtitle->id,
subtitle->format == PICTURESUB ? "Picture" : "Text",
hb_subsource_name( subtitle->source ),
subtitle->config.dest == RENDERSUB ? "Render/Burn-in" : "Passthrough",
subtitle->config.force ? ", Forced Only" : "",
subtitle->config.default_track ? ", Default" : "" );
}
}
}
if( !job->indepth_scan )
{
for( i = 0; i < hb_list_count( job->list_audio ); i++ )
{
audio = hb_list_item( job->list_audio, i );
hb_log( " * audio track %d", audio->config.out.track );
if( audio->config.out.name )
hb_log( " + name: %s", audio->config.out.name );
hb_log( " + decoder: %s (track %d, id 0x%x)", audio->config.lang.description, audio->config.in.track + 1, audio->id );
if (audio->config.in.bitrate >= 1000)
hb_log(" + bitrate: %d kbps, samplerate: %d Hz",
audio->config.in.bitrate / 1000,
audio->config.in.samplerate);
else
hb_log(" + samplerate: %d Hz",
audio->config.in.samplerate);
if( audio->config.out.codec & HB_ACODEC_PASS_FLAG )
{
hb_log(" + %s",
hb_audio_encoder_get_name(audio->config.out.codec));
}
else
{
hb_log(" + mixdown: %s",
hb_mixdown_get_name(audio->config.out.mixdown));
if( audio->config.out.normalize_mix_level != 0 )
{
hb_log( " + normalized mixing levels" );
}
if( audio->config.out.gain != 0.0 )
{
hb_log( " + gain: %.fdB", audio->config.out.gain );
}
if (audio->config.out.dynamic_range_compression > 0.0f &&
hb_audio_can_apply_drc(audio->config.in.codec,
audio->config.in.codec_param,
audio->config.out.codec))
{
hb_log( " + dynamic range compression: %f", audio->config.out.dynamic_range_compression );
}
if (hb_audio_dither_is_supported(audio->config.out.codec))
{
hb_log(" + dither: %s",
hb_audio_dither_get_description(audio->config.out.dither_method));
}
hb_log(" + encoder: %s",
hb_audio_encoder_get_long_name(audio->config.out.codec));
if (audio->config.out.bitrate > 0)
{
hb_log(" + bitrate: %d kbps, samplerate: %d Hz",
audio->config.out.bitrate, audio->config.out.samplerate);
}
else if (audio->config.out.quality != HB_INVALID_AUDIO_QUALITY)
{
hb_log(" + quality: %.2f, samplerate: %d Hz",
audio->config.out.quality, audio->config.out.samplerate);
}
else if (audio->config.out.samplerate > 0)
{
hb_log(" + samplerate: %d Hz",
audio->config.out.samplerate);
}
if (audio->config.out.compression_level >= 0)
{
hb_log(" + compression level: %.2f",
audio->config.out.compression_level);
}
}
}
}
}
/* Corrects framerates when actual duration and frame count numbers are known. */
void correct_framerate( hb_job_t * job )
{
hb_interjob_t * interjob = hb_interjob_get( job->h );
if( ( job->sequence_id & 0xFFFFFF ) != ( interjob->last_job & 0xFFFFFF) )
return; // Interjob information is for a different encode.
// compute actual output vrate from first pass
interjob->vrate = job->vrate_base * ( (double)interjob->out_frame_count * 90000 / interjob->total_time );
interjob->vrate_base = job->vrate_base;
}
/**
* Job initialization rountine.
* Initializes fifos.
* Creates work objects for synchronizer, video decoder, video renderer, video decoder, audio decoder, audio encoder, reader, muxer.
* Launches thread for each work object with work_loop.
* Loops while monitoring status of work threads and fifos.
* Exits loop when conversion is done and fifos are empty.
* Closes threads and frees fifos.
* @param job Handle work hb_job_t.
*/
static void do_job(hb_job_t *job)
{
int i;
hb_title_t *title;
hb_interjob_t *interjob;
hb_work_object_t *w;
hb_work_object_t *sync;
hb_work_object_t *muxer;
hb_work_object_t *reader = hb_get_work(WORK_READER);
hb_audio_t *audio;
hb_subtitle_t *subtitle;
unsigned int subtitle_highest = 0;
unsigned int subtitle_lowest = 0;
unsigned int subtitle_lowest_id = 0;
unsigned int subtitle_forced_id = 0;
unsigned int subtitle_forced_hits = 0;
unsigned int subtitle_hit = 0;
title = job->title;
interjob = hb_interjob_get( job->h );
if( job->pass == 2 )
{
correct_framerate( job );
}
job->list_work = hb_list_init();
/* OpenCL */
if (job->use_opencl && (hb_ocl_init() || hb_init_opencl_run_env(0, NULL, "-I.")))
{
hb_log("work: failed to initialize OpenCL environment, using fallback");
job->use_opencl = 0;
hb_ocl_close();
}
hb_log( "starting job" );
/* Look for the scanned subtitle in the existing subtitle list
* select_subtitle implies that we did a scan. */
if( !job->indepth_scan && interjob->select_subtitle )
{
/* Disable forced subtitles if we didn't find any in the scan, so that
* we display normal subtitles instead. */
if( interjob->select_subtitle->config.force &&
interjob->select_subtitle->forced_hits == 0 )
{
interjob->select_subtitle->config.force = 0;
}
for( i = 0; i < hb_list_count( job->list_subtitle ); )
{
subtitle = hb_list_item( job->list_subtitle, i );
if( subtitle )
{
/* Remove the scanned subtitle from the list if
* it would result in:
* - an emty track (forced and no forced hits)
* - an identical, duplicate subtitle track:
* -> both (or neither) are forced
* -> subtitle is not forced but all its hits are forced */
if( ( interjob->select_subtitle->id == subtitle->id ) &&
( ( subtitle->config.force &&
interjob->select_subtitle->forced_hits == 0 ) ||
( subtitle->config.force == interjob->select_subtitle->config.force ) ||
( !subtitle->config.force &&
interjob->select_subtitle->hits == interjob->select_subtitle->forced_hits ) ) )
{
hb_list_rem( job->list_subtitle, subtitle );
free( subtitle );
continue;
}
/* Adjust output track number, in case we removed one.
* Output tracks sadly still need to be in sequential order.
* Note: out.track starts at 1, i starts at 0, and track 1 is interjob->select_subtitle */
subtitle->out_track = ++i + 1;
}
else
{
// avoid infinite loop is subtitle == NULL
i++;
}
}
/* Add the subtitle that we found on the subtitle scan pass.
*
* Make sure it's the first subtitle in the list so that it becomes the
* first burned subtitle (explicitly or after sanitizing) - which should
* ensure that it doesn't get dropped. */
interjob->select_subtitle->out_track = 1;
if (job->pass == 0 || job->pass == 2)
{
// final pass, interjob->select_subtitle is no longer needed
hb_list_insert(job->list_subtitle, 0, interjob->select_subtitle);
interjob->select_subtitle = NULL;
}
else
{
// this is not the final pass, so we need to copy it instead
hb_list_insert(job->list_subtitle, 0, hb_subtitle_copy(interjob->select_subtitle));
}
}
if ( !job->indepth_scan )
{
// Sanitize subtitles
uint8_t one_burned = 0;
for( i = 0; i < hb_list_count( job->list_subtitle ); )
{
subtitle = hb_list_item( job->list_subtitle, i );
if ( subtitle->config.dest == RENDERSUB )
{
if ( one_burned )
{
if ( !hb_subtitle_can_pass(subtitle->source, job->mux) )
{
hb_log( "More than one subtitle burn-in requested, dropping track %d.", i );
hb_list_rem( job->list_subtitle, subtitle );
free( subtitle );
continue;
}
else
{
hb_log( "More than one subtitle burn-in requested. Changing track %d to soft subtitle.", i );
subtitle->config.dest = PASSTHRUSUB;
}
}
else if ( !hb_subtitle_can_burn(subtitle->source) )
{
hb_log( "Subtitle burn-in requested and input track can not be rendered. Changing track %d to soft subtitle.", i );
subtitle->config.dest = PASSTHRUSUB;
}
else
{
one_burned = 1;
}
}
if ( subtitle->config.dest == PASSTHRUSUB &&
!hb_subtitle_can_pass(subtitle->source, job->mux) )
{
if ( !one_burned )
{
hb_log( "Subtitle pass-thru requested and input track is not compatible with container. Changing track %d to burned-in subtitle.", i );
subtitle->config.dest = RENDERSUB;
subtitle->config.default_track = 0;
one_burned = 1;
}
else
{
hb_log( "Subtitle pass-thru requested and input track is not compatible with container. One track already burned, dropping track %d.", i );
hb_list_rem( job->list_subtitle, subtitle );
free( subtitle );
continue;
}
}
/* Adjust output track number, in case we removed one.
* Output tracks sadly still need to be in sequential order.
* Note: out.track starts at 1, i starts at 0 */
subtitle->out_track = ++i;
}
if (one_burned)
{
// Add subtitle rendering filter
// Note that if the filter is already in the filter chain, this
// has no effect. Note also that this means the front-end is
// not required to add the subtitle rendering filter since
// we will always try to do it here.
hb_filter_object_t *filter = hb_filter_init(HB_FILTER_RENDER_SUB);
char *filter_settings = hb_strdup_printf("%d:%d:%d:%d",
job->crop[0],
job->crop[1],
job->crop[2],
job->crop[3]);
hb_add_filter(job, filter, filter_settings);
free(filter_settings);
}
}
#ifdef USE_QSV
/*
* XXX: mfxCoreInterface's CopyFrame doesn't work in old drivers, and our
* workaround is really slow. If we have validated CPU-based filters in
* the list and we can't use CopyFrame, disable QSV decoding until a
* better solution is implemented.
*/
if (hb_qsv_copyframe_is_slow(job->vcodec))
{
if (job->list_filter != NULL)
{
int encode_only = 0;
for (i = 0; i < hb_list_count(job->list_filter) && !encode_only; i++)
{
hb_filter_object_t *filter = hb_list_item(job->list_filter, i);
switch (filter->id)
{
// validated, CPU-based filters
case HB_FILTER_ROTATE:
case HB_FILTER_RENDER_SUB:
encode_only = 1;
break;
// CPU-based deinterlace (validated)
case HB_FILTER_DEINTERLACE:
if (filter->settings != NULL &&
strcasecmp(filter->settings, "qsv") != 0)
{
encode_only = 1;
}
break;
// other filters will be removed
default:
break;
}
}
if (encode_only)
{
hb_log("do_job: QSV: possible CopyFrame bug, using encode-only path");
if (hb_get_cpu_platform() >= HB_CPU_PLATFORM_INTEL_IVB)
{
hb_log("do_job: QSV: please update your Intel graphics driver to version 9.18.10.3257 or later");
}
job->qsv.decode = 0;
}
}
}
/*
* When QSV is used for decoding, not all CPU-based filters are supported,
* so we need to do a little extra setup here.
*/
if (hb_qsv_decode_is_enabled(job))
{
int vpp_settings[7];
int num_cpu_filters = 0;
hb_filter_object_t *filter;
// default values for VPP filter
vpp_settings[0] = job->title->width;
vpp_settings[1] = job->title->height;
vpp_settings[2] = job->title->crop[0];
vpp_settings[3] = job->title->crop[1];
vpp_settings[4] = job->title->crop[2];
vpp_settings[5] = job->title->crop[3];
vpp_settings[6] = 0; // deinterlace: off
if (job->list_filter != NULL && hb_list_count(job->list_filter) > 0)
{
while (hb_list_count(job->list_filter) > num_cpu_filters)
{
filter = hb_list_item(job->list_filter, num_cpu_filters);
switch (filter->id)
{
// cropping and scaling always done via VPP filter
case HB_FILTER_CROP_SCALE:
if (filter->settings == NULL || *filter->settings == '\0')
{
// VPP defaults were set above, so not a problem
// however, this should never happen, print an error
hb_error("do_job: '%s': no settings!", filter->name);
}
else
{
sscanf(filter->settings, "%d:%d:%d:%d:%d:%d",
&vpp_settings[0], &vpp_settings[1],
&vpp_settings[2], &vpp_settings[3],
&vpp_settings[4], &vpp_settings[5]);
}
hb_list_rem(job->list_filter, filter);
hb_filter_close(&filter);
break;
// pick VPP or CPU deinterlace depending on settings
case HB_FILTER_DEINTERLACE:
if (filter->settings == NULL ||
strcasecmp(filter->settings, "qsv") == 0)
{
// deinterlacing via VPP filter
vpp_settings[6] = 1;
hb_list_rem(job->list_filter, filter);
hb_filter_close(&filter);
}
else
{
// validated
num_cpu_filters++;
}
break;
// then, validated filters
case HB_FILTER_ROTATE: // TODO: use Media SDK for this
case HB_FILTER_RENDER_SUB:
num_cpu_filters++;
break;
// finally, drop all unsupported filters
default:
hb_log("do_job: QSV: full path, removing unsupported filter '%s'",
filter->name);
hb_list_rem(job->list_filter, filter);
hb_filter_close(&filter);
break;
}
}
if (num_cpu_filters > 0)
{
// we need filters to copy to system memory and back
filter = hb_filter_init(HB_FILTER_QSV_PRE);
hb_add_filter(job, filter, NULL);
filter = hb_filter_init(HB_FILTER_QSV_POST);
hb_add_filter(job, filter, NULL);
}
if (vpp_settings[0] != job->title->width ||
vpp_settings[1] != job->title->height ||
vpp_settings[2] >= 1 /* crop */ ||
vpp_settings[3] >= 1 /* crop */ ||
vpp_settings[4] >= 1 /* crop */ ||
vpp_settings[5] >= 1 /* crop */ ||
vpp_settings[6] >= 1 /* deinterlace */)
{
// we need the VPP filter
char *settings = hb_strdup_printf("%d:%d:%d:%d:%d:%d_dei:%d",
vpp_settings[0],
vpp_settings[1],
vpp_settings[2],
vpp_settings[3],
vpp_settings[4],
vpp_settings[5],
vpp_settings[6]);
filter = hb_filter_init(HB_FILTER_QSV);
hb_add_filter(job, filter, settings);
free(settings);
}
}
}
#endif
// Filters have an effect on settings.
// So initialize the filters and update the job.
if( job->list_filter && hb_list_count( job->list_filter ) )
{
hb_filter_init_t init;
init.job = job;
init.pix_fmt = AV_PIX_FMT_YUV420P;
init.width = title->width;
init.height = title->height;
/* DXVA2 */
init.use_dxva = hb_use_dxva(title);
init.par_width = job->anamorphic.par_width;
init.par_height = job->anamorphic.par_height;
memcpy(init.crop, title->crop, sizeof(int[4]));
init.vrate_base = title->rate_base;
init.vrate = title->rate;
init.cfr = 0;
for( i = 0; i < hb_list_count( job->list_filter ); )
{
hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
if( filter->init( filter, &init ) )
{
hb_log( "Failure to initialise filter '%s', disabling",
filter->name );
hb_list_rem( job->list_filter, filter );
hb_filter_close( &filter );
continue;
}
i++;
}
job->width = init.width;
job->height = init.height;
job->anamorphic.par_width = init.par_width;
job->anamorphic.par_height = init.par_height;
memcpy(job->crop, init.crop, sizeof(int[4]));
job->vrate_base = init.vrate_base;
job->vrate = init.vrate;
job->cfr = init.cfr;
}
if( job->anamorphic.mode )
{
/* While x264 is smart enough to reduce fractions on its own, libavcodec and
* the MacGUI need some help with the math, so lose superfluous factors. */
hb_reduce( &job->anamorphic.par_width, &job->anamorphic.par_height,
job->anamorphic.par_width, job->anamorphic.par_height );
if( job->vcodec & HB_VCODEC_FFMPEG_MASK )
{
/* Just to make working with ffmpeg even more fun,
* lavc's MPEG-4 encoder can't handle PAR values >= 255,
* even though AVRational does. Adjusting downwards
* distorts the display aspect slightly, but such is life. */
while( ( job->anamorphic.par_width & ~0xFF ) ||
( job->anamorphic.par_height & ~0xFF ) )
{
job->anamorphic.par_width >>= 1;
job->anamorphic.par_height >>= 1;
hb_reduce( &job->anamorphic.par_width, &job->anamorphic.par_height,
job->anamorphic.par_width, job->anamorphic.par_height );
}
}
}
#ifdef USE_QSV
if (hb_qsv_decode_is_enabled(job))
{
job->fifo_mpeg2 = hb_fifo_init( FIFO_MINI, FIFO_MINI_WAKE );
job->fifo_raw = hb_fifo_init( FIFO_MINI, FIFO_MINI_WAKE );
job->fifo_sync = hb_fifo_init( FIFO_MINI, FIFO_MINI_WAKE );
job->fifo_render = hb_fifo_init( FIFO_MINI, FIFO_MINI_WAKE );
job->fifo_mpeg4 = hb_fifo_init( FIFO_MINI, FIFO_MINI_WAKE );
}
else
#endif
{
job->fifo_mpeg2 = hb_fifo_init( FIFO_LARGE, FIFO_LARGE_WAKE );
job->fifo_raw = hb_fifo_init( FIFO_SMALL, FIFO_SMALL_WAKE );
job->fifo_sync = hb_fifo_init( FIFO_SMALL, FIFO_SMALL_WAKE );
job->fifo_mpeg4 = hb_fifo_init( FIFO_LARGE, FIFO_LARGE_WAKE );
job->fifo_render = NULL; // Attached to filter chain
}
/* Audio fifos must be initialized before sync */
if (!job->indepth_scan)
{
// apply Auto Passthru settings
hb_autopassthru_apply_settings(job);
// sanitize audio settings
for (i = 0; i < hb_list_count(job->list_audio);)
{
audio = hb_list_item(job->list_audio, i);
if (audio->config.out.codec == HB_ACODEC_AUTO_PASS)
{
// Auto Passthru should have been handled above
// remove track to avoid a crash
hb_log("Auto Passthru error, dropping track %d",
audio->config.out.track);
hb_list_rem(job->list_audio, audio);
free(audio);
continue;
}
if ((audio->config.out.codec & HB_ACODEC_PASS_FLAG) &&
!(audio->config.in.codec &
audio->config.out.codec & HB_ACODEC_PASS_MASK))
{
hb_log("Passthru requested and input codec is not the same as output codec for track %d, dropping track",
audio->config.out.track);
hb_list_rem(job->list_audio, audio);
free(audio);
continue;
}
/* Adjust output track number, in case we removed one.
* Output tracks sadly still need to be in sequential order.
* Note: out.track starts at 1, i starts at 0 */
audio->config.out.track = ++i;
}
int best_mixdown = 0;
int best_bitrate = 0;
int best_samplerate = 0;
for (i = 0; i < hb_list_count(job->list_audio); i++)
{
audio = hb_list_item(job->list_audio, i);
/* set up the audio work structures */
audio->priv.fifo_raw = hb_fifo_init(FIFO_SMALL, FIFO_SMALL_WAKE);
audio->priv.fifo_sync = hb_fifo_init(FIFO_SMALL, FIFO_SMALL_WAKE);
audio->priv.fifo_out = hb_fifo_init(FIFO_LARGE, FIFO_LARGE_WAKE);
audio->priv.fifo_in = hb_fifo_init(FIFO_LARGE, FIFO_LARGE_WAKE);
/* Passthru audio */
if (audio->config.out.codec & HB_ACODEC_PASS_FLAG)
{
// Muxer needs these to be set correctly in order to
// set audio track MP4 time base.
audio->config.out.samples_per_frame =
audio->config.in.samples_per_frame;
audio->config.out.samplerate = audio->config.in.samplerate;
continue;
}
/* Vorbis language information */
if (audio->config.out.codec == HB_ACODEC_VORBIS)
audio->priv.config.vorbis.language = audio->config.lang.simple;
/* sense-check the requested samplerate */
if (audio->config.out.samplerate <= 0)
{
// if not specified, set to same as input
audio->config.out.samplerate = audio->config.in.samplerate;
}
best_samplerate =
hb_audio_samplerate_get_best(audio->config.out.codec,
audio->config.out.samplerate,
NULL);
if (best_samplerate != audio->config.out.samplerate)
{
hb_log("work: sanitizing track %d unsupported samplerate %d Hz to %s kHz",
audio->config.out.track, audio->config.out.samplerate,
hb_audio_samplerate_get_name(best_samplerate));
audio->config.out.samplerate = best_samplerate;
}
/* sense-check the requested mixdown */
if (audio->config.out.mixdown <= HB_AMIXDOWN_NONE)
{
/* Mixdown not specified, set the default mixdown */
audio->config.out.mixdown =
hb_mixdown_get_default(audio->config.out.codec,
audio->config.in.channel_layout);
hb_log("work: mixdown not specified, track %d setting mixdown %s",
audio->config.out.track,
hb_mixdown_get_name(audio->config.out.mixdown));
}
else
{
best_mixdown =
hb_mixdown_get_best(audio->config.out.codec,
audio->config.in.channel_layout,
audio->config.out.mixdown);
if (audio->config.out.mixdown != best_mixdown)
{
/* log the output mixdown */
hb_log("work: sanitizing track %d mixdown %s to %s",
audio->config.out.track,
hb_mixdown_get_name(audio->config.out.mixdown),
hb_mixdown_get_name(best_mixdown));
audio->config.out.mixdown = best_mixdown;
}
}
/* sense-check the requested compression level */
if (audio->config.out.compression_level < 0)
{
audio->config.out.compression_level =
hb_audio_compression_get_default(audio->config.out.codec);
if (audio->config.out.compression_level >= 0)
{
hb_log("work: compression level not specified, track %d setting compression level %.2f",
audio->config.out.track,
audio->config.out.compression_level);
}
}
else
{
float best_compression =
hb_audio_compression_get_best(audio->config.out.codec,
audio->config.out.compression_level);
if (best_compression != audio->config.out.compression_level)
{
if (best_compression == -1)
{
hb_log("work: track %d, compression level not supported by codec",
audio->config.out.track);
}
else
{
hb_log("work: sanitizing track %d compression level %.2f to %.2f",
audio->config.out.track,
audio->config.out.compression_level,
best_compression);
}
audio->config.out.compression_level = best_compression;
}
}
/* sense-check the requested quality */
if (audio->config.out.quality != HB_INVALID_AUDIO_QUALITY)
{
float best_quality =
hb_audio_quality_get_best(audio->config.out.codec,
audio->config.out.quality);
if (best_quality != audio->config.out.quality)
{
if (best_quality == HB_INVALID_AUDIO_QUALITY)
{
hb_log("work: track %d, quality mode not supported by codec",
audio->config.out.track);
}
else
{
hb_log("work: sanitizing track %d quality %.2f to %.2f",
audio->config.out.track,
audio->config.out.quality, best_quality);
}
audio->config.out.quality = best_quality;
}
}
/* sense-check the requested bitrate */
if (audio->config.out.quality == HB_INVALID_AUDIO_QUALITY)
{
if (audio->config.out.bitrate <= 0)
{
/* Bitrate not specified, set the default bitrate */
audio->config.out.bitrate =
hb_audio_bitrate_get_default(audio->config.out.codec,
audio->config.out.samplerate,
audio->config.out.mixdown);
if (audio->config.out.bitrate > 0)
{
hb_log("work: bitrate not specified, track %d setting bitrate %d Kbps",
audio->config.out.track,
audio->config.out.bitrate);
}
}
else
{
best_bitrate =
hb_audio_bitrate_get_best(audio->config.out.codec,
audio->config.out.bitrate,
audio->config.out.samplerate,
audio->config.out.mixdown);
if (best_bitrate > 0 &&
best_bitrate != audio->config.out.bitrate)
{
/* log the output bitrate */
hb_log("work: sanitizing track %d bitrate %d to %d Kbps",
audio->config.out.track,
audio->config.out.bitrate, best_bitrate);
}
audio->config.out.bitrate = best_bitrate;
}
}
/* sense-check the requested dither */
if (hb_audio_dither_is_supported(audio->config.out.codec))
{
if (audio->config.out.dither_method ==
hb_audio_dither_get_default())
{
/* "auto", enable with default settings */
audio->config.out.dither_method =
hb_audio_dither_get_default_method();
}
}
else if (audio->config.out.dither_method !=
hb_audio_dither_get_default())
{
/* specific dither requested but dithering not supported */
hb_log("work: track %d, dithering not supported by codec",
audio->config.out.track);
}
}
}
/* Synchronization */
sync = hb_sync_init( job );
/* Video decoder */
if (title->video_codec == WORK_NONE)
{
hb_error("No video decoder set!");
goto cleanup;
}
hb_list_add(job->list_work, (w = hb_get_work(title->video_codec)));
w->codec_param = title->video_codec_param;
w->fifo_in = job->fifo_mpeg2;
w->fifo_out = job->fifo_raw;
for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
{
subtitle = hb_list_item( job->list_subtitle, i );
if( subtitle )
{
subtitle->fifo_in = hb_fifo_init( FIFO_SMALL, FIFO_SMALL_WAKE );
// Must set capacity of the raw-FIFO to be set >= the maximum number of subtitle
// lines that could be decoded prior to a video frame in order to prevent the following
// deadlock condition:
// 1. Subtitle decoder blocks trying to generate more subtitle lines than will fit in the FIFO.
// 2. Blocks the processing of further subtitle packets read from the input stream.
// 3. And that blocks the processing of any further video packets read from the input stream.
// 4. And that blocks the sync work-object from running, which is needed to consume the subtitle lines in the raw-FIFO.
// Since that number is unbounded, the FIFO must be made (effectively) unbounded in capacity.
subtitle->fifo_raw = hb_fifo_init( FIFO_UNBOUNDED, FIFO_UNBOUNDED_WAKE );
subtitle->fifo_sync = hb_fifo_init( FIFO_SMALL, FIFO_SMALL_WAKE );
subtitle->fifo_out = hb_fifo_init( FIFO_SMALL, FIFO_SMALL_WAKE );
w = hb_get_work( subtitle->codec );
w->fifo_in = subtitle->fifo_in;
w->fifo_out = subtitle->fifo_raw;
w->subtitle = subtitle;
hb_list_add( job->list_work, w );
}
}
/* Set up the video filter fifo pipeline */
if( !job->indepth_scan )
{
if( job->list_filter )
{
int filter_count = hb_list_count( job->list_filter );
int i;
hb_fifo_t * fifo_in = job->fifo_sync;
for( i = 0; i < filter_count; i++ )
{
hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
filter->fifo_in = fifo_in;
filter->fifo_out = hb_fifo_init( FIFO_MINI, FIFO_MINI_WAKE );
fifo_in = filter->fifo_out;
}
job->fifo_render = fifo_in;
}
else if ( !job->list_filter )
{
hb_log("work: Internal Error: no filters");
job->fifo_render = NULL;
}
/* Video encoder */
switch( job->vcodec )
{
case HB_VCODEC_FFMPEG_MPEG4:
w = hb_get_work( WORK_ENCAVCODEC );
w->codec_param = AV_CODEC_ID_MPEG4;
break;
case HB_VCODEC_FFMPEG_MPEG2:
w = hb_get_work( WORK_ENCAVCODEC );
w->codec_param = AV_CODEC_ID_MPEG2VIDEO;
break;
case HB_VCODEC_FFMPEG_VP8:
w = hb_get_work( WORK_ENCAVCODEC );
w->codec_param = AV_CODEC_ID_VP8;
break;
case HB_VCODEC_X264:
w = hb_get_work( WORK_ENCX264 );
break;
case HB_VCODEC_QSV_H264:
w = hb_get_work( WORK_ENCQSV );
break;
case HB_VCODEC_THEORA:
w = hb_get_work( WORK_ENCTHEORA );
break;
#ifdef USE_X265
case HB_VCODEC_X265:
w = hb_get_work( WORK_ENCX265 );
break;
#endif
}
// Handle case where there are no filters.
// This really should never happen.
if ( job->fifo_render )
w->fifo_in = job->fifo_render;
else
w->fifo_in = job->fifo_sync;
w->fifo_out = job->fifo_mpeg4;
w->config = &job->config;
hb_list_add( job->list_work, w );
for( i = 0; i < hb_list_count( job->list_audio ); i++ )
{
audio = hb_list_item( job->list_audio, i );
/*
* Audio Decoder Thread
*/
if ( audio->priv.fifo_in )
{
if ( ( w = hb_codec_decoder( audio->config.in.codec ) ) == NULL )
{
hb_error("Invalid input codec: %d", audio->config.in.codec);
*job->done_error = HB_ERROR_WRONG_INPUT;
*job->die = 1;
goto cleanup;
}
w->fifo_in = audio->priv.fifo_in;
w->fifo_out = audio->priv.fifo_raw;
w->config = &audio->priv.config;
w->audio = audio;
w->codec_param = audio->config.in.codec_param;
hb_list_add( job->list_work, w );
}
/*
* Audio Encoder Thread
*/
if ( !(audio->config.out.codec & HB_ACODEC_PASS_FLAG ) )
{
/*
* Add the encoder thread if not doing AC-3 pass through
*/
if ( ( w = hb_codec_encoder( audio->config.out.codec ) ) == NULL )
{
hb_error("Invalid audio codec: %#x", audio->config.out.codec);
w = NULL;
*job->done_error = HB_ERROR_WRONG_INPUT;
*job->die = 1;
goto cleanup;
}
w->fifo_in = audio->priv.fifo_sync;
w->fifo_out = audio->priv.fifo_out;
w->config = &audio->priv.config;
w->audio = audio;
hb_list_add( job->list_work, w );
}
}
}
if( job->chapter_markers && job->chapter_start == job->chapter_end )
{
job->chapter_markers = 0;
hb_log("work: only 1 chapter, disabling chapter markers");
}
/* Display settings */
hb_display_job_info( job );
/* Init read & write threads */
if ( reader->init( reader, job ) )
{
hb_error( "Failure to initialise thread '%s'", reader->name );
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
goto cleanup;
}
reader->done = &job->done;
reader->thread = hb_thread_init( reader->name, ReadLoop, reader, HB_NORMAL_PRIORITY );
job->done = 0;
if( job->list_filter && !job->indepth_scan )
{
int filter_count = hb_list_count( job->list_filter );
int i;
for( i = 0; i < filter_count; i++ )
{
hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
if( !filter ) continue;
// Filters were initialized earlier, so we just need
// to start the filter's thread
filter->done = &job->done;
filter->thread = hb_thread_init( filter->name, filter_loop, filter,
HB_LOW_PRIORITY );
}
}
/* Launch processing threads */
for( i = 0; i < hb_list_count( job->list_work ); i++ )
{
w = hb_list_item( job->list_work, i );
w->done = &job->done;
w->thread_sleep_interval = 10;
if( w->init( w, job ) )
{
hb_error( "Failure to initialise thread '%s'", w->name );
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
goto cleanup;
}
w->thread = hb_thread_init( w->name, work_loop, w,
HB_LOW_PRIORITY );
}
if ( job->indepth_scan )
{
muxer = NULL;
w = sync;
sync->done = &job->done;
}
else
{
sync->done = &job->done;
sync->thread_sleep_interval = 10;
if( sync->init( w, job ) )
{
hb_error( "Failure to initialise thread '%s'", w->name );
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
goto cleanup;
}
sync->thread = hb_thread_init( sync->name, work_loop, sync,
HB_LOW_PRIORITY );
// The muxer requires track information that's set up by the encoder
// init routines so we have to init the muxer last.
muxer = hb_muxer_init( job );
w = muxer;
}
hb_buffer_t * buf_in, * buf_out = NULL;
while ( !*job->die && !*w->done && w->status != HB_WORK_DONE )
{
buf_in = hb_fifo_get_wait( w->fifo_in );
if ( buf_in == NULL )
continue;
if ( *job->die )
{
if( buf_in )
{
hb_buffer_close( &buf_in );
}
break;
}
buf_out = NULL;
w->status = w->work( w, &buf_in, &buf_out );
if( buf_in )
{
hb_buffer_close( &buf_in );
}
if ( buf_out && w->fifo_out == NULL )
{
hb_buffer_close( &buf_out );
}
if( buf_out )
{
while ( !*job->die )
{
if ( hb_fifo_full_wait( w->fifo_out ) )
{
hb_fifo_push( w->fifo_out, buf_out );
buf_out = NULL;
break;
}
}
}
}
if ( buf_out )
{
hb_buffer_close( &buf_out );
}
hb_handle_t * h = job->h;
hb_state_t state;
hb_get_state( h, &state );
hb_log("work: average encoding speed for job is %f fps", state.param.working.rate_avg);
job->done = 1;
if( muxer != NULL )
{
muxer->close( muxer );
free( muxer );
if( sync->thread != NULL )
{
hb_thread_close( &sync->thread );
sync->close( sync );
}
free( sync );
}
cleanup:
/* Stop the write thread (thread_close will block until the muxer finishes) */
job->done = 1;
// Close render filter pipeline
if( job->list_filter )
{
int filter_count = hb_list_count( job->list_filter );
int i;
for( i = 0; i < filter_count; i++ )
{
hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
if( !filter ) continue;
if( filter->thread != NULL )
{
hb_thread_close( &filter->thread );
}
filter->close( filter );
}
}
/* Close work objects */
while( ( w = hb_list_item( job->list_work, 0 ) ) )
{
hb_list_rem( job->list_work, w );
if( w->thread != NULL )
{
hb_thread_close( &w->thread );
w->close( w );
}
free( w );
}
hb_list_close( &job->list_work );
/* Stop the read thread */
if( reader->thread != NULL )
{
hb_thread_close( &reader->thread );
reader->close( reader );
}
free( reader );
/* Close fifos */
hb_fifo_close( &job->fifo_mpeg2 );
hb_fifo_close( &job->fifo_raw );
hb_fifo_close( &job->fifo_sync );
hb_fifo_close( &job->fifo_mpeg4 );
for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
{
subtitle = hb_list_item( job->list_subtitle, i );
if( subtitle )
{
hb_fifo_close( &subtitle->fifo_in );
hb_fifo_close( &subtitle->fifo_raw );
hb_fifo_close( &subtitle->fifo_sync );
hb_fifo_close( &subtitle->fifo_out );
}
}
for( i = 0; i < hb_list_count( job->list_audio ); i++ )
{
audio = hb_list_item( job->list_audio, i );
if( audio->priv.fifo_in != NULL )
hb_fifo_close( &audio->priv.fifo_in );
if( audio->priv.fifo_raw != NULL )
hb_fifo_close( &audio->priv.fifo_raw );
if( audio->priv.fifo_sync != NULL )
hb_fifo_close( &audio->priv.fifo_sync );
if( audio->priv.fifo_out != NULL )
hb_fifo_close( &audio->priv.fifo_out );
}
if( job->list_filter )
{
for( i = 0; i < hb_list_count( job->list_filter ); i++ )
{
hb_filter_object_t * filter = hb_list_item( job->list_filter, i );
hb_fifo_close( &filter->fifo_out );
}
}
if( job->indepth_scan )
{
/* Before closing the title print out our subtitle stats if we need to
* find the highest and lowest. */
for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
{
subtitle = hb_list_item( job->list_subtitle, i );
hb_log( "Subtitle track %d (id 0x%x) '%s': %d hits (%d forced)",
subtitle->track, subtitle->id, subtitle->lang,
subtitle->hits, subtitle->forced_hits );
if( subtitle->hits == 0 )
continue;
if( subtitle_highest < subtitle->hits )
{
subtitle_highest = subtitle->hits;
}
if( subtitle_lowest == 0 ||
subtitle_lowest > subtitle->hits )
{
subtitle_lowest = subtitle->hits;
subtitle_lowest_id = subtitle->id;
}
// pick the track with fewest forced hits
if( subtitle->forced_hits > 0 &&
( subtitle_forced_hits == 0 ||
subtitle_forced_hits > subtitle->forced_hits ) )
{
subtitle_forced_id = subtitle->id;
subtitle_forced_hits = subtitle->forced_hits;
}
}
if( subtitle_forced_id && job->select_subtitle_config.force )
{
/* If there is a subtitle stream with forced subtitles and forced-only
* is set, then select it in preference to the lowest. */
subtitle_hit = subtitle_forced_id;
hb_log( "Found a subtitle candidate with id 0x%x (contains forced subs)",
subtitle_hit );
}
else if( subtitle_lowest > 0 &&
subtitle_lowest < ( subtitle_highest * 0.1 ) )
{
/* OK we have more than one, and the lowest is lower,
* but how much lower to qualify for turning it on by
* default?
*
* Let's say 10% as a default. */
subtitle_hit = subtitle_lowest_id;
hb_log( "Found a subtitle candidate with id 0x%x", subtitle_hit );
}
else
{
hb_log( "No candidate detected during subtitle scan" );
}
for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
{
subtitle = hb_list_item( job->list_subtitle, i );
if( subtitle->id == subtitle_hit )
{
subtitle->config = job->select_subtitle_config;
// Remove from list since we are taking ownership
// of the subtitle.
hb_list_rem( job->list_subtitle, subtitle );
interjob->select_subtitle = subtitle;
break;
}
}
}
hb_buffer_pool_free();
/* OpenCL: must be closed *after* freeing the buffer pool */
if (job->use_opencl)
{
hb_ocl_close();
}
hb_job_close( &job );
}
static inline void copy_chapter( hb_buffer_t * dst, hb_buffer_t * src )
{
// Propagate any chapter breaks for the worker if and only if the
// output frame has the same time stamp as the input frame (any
// worker that delays frames has to propagate the chapter marks itself
// and workers that move chapter marks to a different time should set
// 'src' to NULL so that this code won't generate spurious duplicates.)
if( src && dst && src->s.start == dst->s.start)
{
// restore log below to debug chapter mark propagation problems
dst->s.new_chap = src->s.new_chap;
}
}
/**
* Performs the work object's specific work function.
* Loops calling work function for associated work object. Sleeps when fifo is full.
* Monitors work done indicator.
* Exits loop when work indiactor is set.
* @param _w Handle to work object.
*/
static void work_loop( void * _w )
{
hb_work_object_t * w = _w;
hb_buffer_t * buf_in = NULL, * buf_out = NULL;
while( !*w->done && w->status != HB_WORK_DONE )
{
buf_in = hb_fifo_get_wait( w->fifo_in );
if ( buf_in == NULL )
continue;
if ( *w->done )
{
if( buf_in )
{
hb_buffer_close( &buf_in );
}
break;
}
// Invalidate buf_out so that if there is no output
// we don't try to pass along junk.
buf_out = NULL;
w->status = w->work( w, &buf_in, &buf_out );
copy_chapter( buf_out, buf_in );
if( buf_in )
{
hb_buffer_close( &buf_in );
}
if ( buf_out && w->fifo_out == NULL )
{
hb_buffer_close( &buf_out );
}
if( buf_out )
{
while ( !*w->done )
{
if ( hb_fifo_full_wait( w->fifo_out ) )
{
hb_fifo_push( w->fifo_out, buf_out );
buf_out = NULL;
break;
}
}
}
}
if ( buf_out )
{
hb_buffer_close( &buf_out );
}
// Consume data in incoming fifo till job complete so that
// residual data does not stall the pipeline
while( !*w->done )
{
buf_in = hb_fifo_get_wait( w->fifo_in );
if ( buf_in != NULL )
hb_buffer_close( &buf_in );
}
}
/**
* Performs the filter object's specific work function.
* Loops calling work function for associated filter object.
* Sleeps when fifo is full.
* Monitors work done indicator.
* Exits loop when work indiactor is set.
* @param _w Handle to work object.
*/
static void filter_loop( void * _f )
{
hb_filter_object_t * f = _f;
hb_buffer_t * buf_in, * buf_out = NULL;
while( !*f->done && f->status != HB_FILTER_DONE )
{
buf_in = hb_fifo_get_wait( f->fifo_in );
if ( buf_in == NULL )
continue;
// Filters can drop buffers. Remember chapter information
// so that it can be propagated to the next buffer
if ( buf_in->s.new_chap )
{
f->chapter_time = buf_in->s.start;
f->chapter_val = buf_in->s.new_chap;
// don't let 'filter_loop' put a chapter mark on the wrong buffer
buf_in->s.new_chap = 0;
}
if ( *f->done )
{
if( buf_in )
{
hb_buffer_close( &buf_in );
}
break;
}
buf_out = NULL;
#ifdef USE_QSV
hb_buffer_t *last_buf_in = buf_in;
#endif
f->status = f->work( f, &buf_in, &buf_out );
#ifdef USE_QSV
if (f->status == HB_FILTER_DELAY &&
last_buf_in->qsv_details.filter_details != NULL && buf_out == NULL)
{
hb_filter_private_t_qsv *qsv_user = buf_in ? buf_in->qsv_details.filter_details : last_buf_in->qsv_details.filter_details ;
qsv_user->post.status = f->status;
hb_lock(qsv_user->post.frame_completed_lock);
qsv_user->post.frame_go = 1;
hb_cond_broadcast(qsv_user->post.frame_completed);
hb_unlock(qsv_user->post.frame_completed_lock);
}
#endif
if ( buf_out && f->chapter_val && f->chapter_time <= buf_out->s.start )
{
buf_out->s.new_chap = f->chapter_val;
f->chapter_val = 0;
}
if( buf_in )
{
hb_buffer_close( &buf_in );
}
if ( buf_out && f->fifo_out == NULL )
{
hb_buffer_close( &buf_out );
}
if( buf_out )
{
while ( !*f->done )
{
if ( hb_fifo_full_wait( f->fifo_out ) )
{
hb_fifo_push( f->fifo_out, buf_out );
buf_out = NULL;
break;
}
}
}
}
if ( buf_out )
{
hb_buffer_close( &buf_out );
}
// Consume data in incoming fifo till job complete so that
// residual data does not stall the pipeline
while( !*f->done )
{
buf_in = hb_fifo_get_wait( f->fifo_in );
if ( buf_in != NULL )
hb_buffer_close( &buf_in );
}
}
HandBrake-0.10.2/libhb/module.rules 0000664 0001752 0001752 00000002455 11171166721 017471 0 ustar handbrake handbrake $(eval $(call import.MODULE.rules,LIBHB))
libhb.build: $(LIBHB.a)
$(LIBHB.a): | $(dir $(LIBHB.a))
$(LIBHB.a): $(LIBHB.c.o) $(LIBHB.yasm.o)
$(AR.exe) rsu $@ $^
$(LIBHB.c.o): $(LIBHB.d)
$(LIBHB.c.o): | $(dir $(LIBHB.c.o))
$(LIBHB.c.o): $(BUILD/)%.o: $(SRC/)%.c
$(call LIBHB.GCC.C_O,$@,$<)
$(LIBHB.m4.out): $(BUILD/)project/handbrake.m4
$(LIBHB.m4.out): | $(dir $(LIBHB.m4.out))
$(LIBHB.m4.out): $(LIBHB.build/)%: $(LIBHB.src/)%.m4
$(M4.exe) -Iproject $< > $@
$(LIBHB.h.out): | $(dir $(LIBHB.h.out))
$(LIBHB.h.out): $(BUILD/)%: $(SRC/)%
$(CP.exe) $< $@
libhb.clean:
$(RM.exe) -f $(LIBHB.out)
###############################################################################
ifneq (disabled,$(FEATURE.asm))
$(LIBHB.yasm.o): $(LIBHB.yasm.d)
$(LIBHB.yasm.o): | $(dir $(LIBHB.yasm.o))
$(LIBHB.yasm.o): $(LIBHB.yasm.build/)%.o: $(LIBHB.yasm.src/)%.asm
$(call LIBHB.YASM.ASM_O,$@,$<)
endif
###############################################################################
ifeq (1-mingw,$(BUILD.cross)-$(BUILD.system))
libhb.build: $(LIBHB.dll)
$(LIBHB.dll): | $(dirname $(LIBHB.dll) $(LIBHB.lib))
$(LIBHB.dll): $(LIBHB.c.o) $(LIBHB.yasm.o)
$(call LIBHB.GCC.DYLIB++,$@,$^ $(LIBHB.dll.libs))
endif
###############################################################################
clean: libhb.clean
build: libhb.build
HandBrake-0.10.2/libhb/qsv_filter_pp.c 0000664 0001752 0001752 00000101625 12220306351 020137 0 ustar handbrake handbrake /* ********************************************************************* *\
Copyright (C) 2013 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\* ********************************************************************* */
#ifdef USE_QSV
#include "hb.h"
#include "hbffmpeg.h"
#include "libavcodec/qsv.h"
#include "qsv_filter_pp.h"
#include "qsv_filter.h"
#include "qsv_memory.h"
static int hb_qsv_filter_pre_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_qsv_filter_pre_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static int hb_qsv_filter_pre_info( hb_filter_object_t * filter,
hb_filter_info_t * info );
static void hb_qsv_filter_pre_close( hb_filter_object_t * filter );
static int hb_qsv_filter_post_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_qsv_filter_post_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static int hb_qsv_filter_post_info( hb_filter_object_t * filter,
hb_filter_info_t * info );
static void hb_qsv_filter_post_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_qsv_pre =
{
.id = HB_FILTER_QSV_PRE,
.enforce_order = 1,
.name = "Quick Sync Video user filter (pre)",
.settings = NULL,
.init = hb_qsv_filter_pre_init,
.work = hb_qsv_filter_pre_work,
.close = hb_qsv_filter_pre_close,
.info = hb_qsv_filter_pre_info,
};
hb_filter_object_t hb_filter_qsv_post =
{
.id = HB_FILTER_QSV_POST,
.enforce_order = 1,
.name = "Quick Sync Video user filter (post)",
.settings = NULL,
.init = hb_qsv_filter_post_init,
.work = hb_qsv_filter_post_work,
.close = hb_qsv_filter_post_close,
.info = hb_qsv_filter_post_info,
};
static int filter_pre_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
mfxStatus sts = MFX_ERR_NONE;
int i=0;
if(!qsv) return 3;
av_qsv_space *prev_vpp = 0;
if(!qsv->vpp_space){
qsv->vpp_space = av_qsv_list_init(HAVE_THREADS);
// note some change as : when no size changes -> no VPP used
// impact on : prev_vpp
}
if(!pv->vpp_space){
for(i=0; ivpp_space);i++){
av_qsv_space *qsv_vpp = av_qsv_list_item( qsv->vpp_space, i );
if(qsv_vpp->type == AV_QSV_VPP_USER){
pv->vpp_space = qsv_vpp;
break;
}
else
if(qsv_vpp->type == AV_QSV_VPP_DEFAULT){
prev_vpp = qsv_vpp;
}
}
}
if(!pv->vpp_space){
pv->vpp_space = calloc( 1, sizeof( av_qsv_space ));
pv->vpp_space->type = AV_QSV_VPP_USER;
av_qsv_list_add( qsv->vpp_space, pv->vpp_space );
av_qsv_add_context_usage(qsv,HAVE_THREADS);
}
else
if(pv->vpp_space->is_init_done ) return 1;
if(!qsv->dec_space || !qsv->dec_space->is_init_done) return 2;
av_qsv_space *qsv_vpp = pv->vpp_space;
AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
if (prev_vpp)
{
memcpy( &qsv_vpp->m_mfxVideoParam.vpp, &prev_vpp->m_mfxVideoParam.vpp, sizeof(prev_vpp->m_mfxVideoParam.vpp));
}
else
{
AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
// FrameRate is important for VPP to start with
if( qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN == 0 &&
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD == 0 ){
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN = pv->job->title->rate;
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD = pv->job->title->rate_base;
}
qsv_vpp->m_mfxVideoParam.vpp.In.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
qsv_vpp->m_mfxVideoParam.vpp.In.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
qsv_vpp->m_mfxVideoParam.vpp.In.CropX = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropX;
qsv_vpp->m_mfxVideoParam.vpp.In.CropY = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropY;
qsv_vpp->m_mfxVideoParam.vpp.In.CropW = pv->job->title->width;
qsv_vpp->m_mfxVideoParam.vpp.In.CropH = pv->job->title->height;
qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtN = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN;
qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtD = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD;
qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
qsv_vpp->m_mfxVideoParam.vpp.In.Width = AV_QSV_ALIGN16(pv->job->title->width);
qsv_vpp->m_mfxVideoParam.vpp.In.Height = (MFX_PICSTRUCT_PROGRESSIVE == qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct)?
AV_QSV_ALIGN16(pv->job->title->height) : AV_QSV_ALIGN32(pv->job->title->height);
qsv_vpp->m_mfxVideoParam.vpp.Out.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
qsv_vpp->m_mfxVideoParam.vpp.Out.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
qsv_vpp->m_mfxVideoParam.vpp.Out.CropX = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropX;
qsv_vpp->m_mfxVideoParam.vpp.Out.CropY = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropY;
qsv_vpp->m_mfxVideoParam.vpp.Out.CropW = pv->job->title->width;
qsv_vpp->m_mfxVideoParam.vpp.Out.CropH = pv->job->title->height;
qsv_vpp->m_mfxVideoParam.vpp.Out.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN;
qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD;
qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
qsv_vpp->m_mfxVideoParam.vpp.Out.Width = AV_QSV_ALIGN16(pv->job->title->width);
qsv_vpp->m_mfxVideoParam.vpp.Out.Height = (MFX_PICSTRUCT_PROGRESSIVE == qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct)?
AV_QSV_ALIGN16(pv->job->title->height) : AV_QSV_ALIGN32(pv->job->title->height);
memset(&qsv_vpp->request, 0, sizeof(mfxFrameAllocRequest)*2);
}
qsv_vpp->m_mfxVideoParam.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
qsv_vpp->surface_num = FFMIN(prev_vpp ? prev_vpp->surface_num : qsv->dec_space->surface_num/2, AV_QSV_SURFACE_NUM);
for(i = 0; i < qsv_vpp->surface_num; i++){
qsv_vpp->p_surfaces[i] = av_mallocz( sizeof(mfxFrameSurface1) );
AV_QSV_CHECK_POINTER(qsv_vpp->p_surfaces[i], MFX_ERR_MEMORY_ALLOC);
memcpy(&(qsv_vpp->p_surfaces[i]->Info), &(qsv_vpp->m_mfxVideoParam.vpp.Out), sizeof(mfxFrameInfo));
}
qsv_vpp->sync_num = FFMIN(prev_vpp ? prev_vpp->sync_num : qsv->dec_space->sync_num, AV_QSV_SYNC_NUM);
for (i = 0; i < qsv_vpp->sync_num; i++){
qsv_vpp->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i], MFX_ERR_MEMORY_ALLOC);
qsv_vpp->p_syncp[i]->p_sync = av_mallocz(sizeof(mfxSyncPoint));
AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i]->p_sync, MFX_ERR_MEMORY_ALLOC);
}
memset(&qsv_vpp->ext_opaque_alloc, 0, sizeof(mfxExtOpaqueSurfaceAlloc));
qsv_vpp->m_mfxVideoParam.NumExtParam = qsv_vpp->p_ext_param_num = 1;
qsv_vpp->p_ext_params = av_mallocz(sizeof(mfxExtBuffer *)*qsv_vpp->p_ext_param_num);
AV_QSV_CHECK_POINTER(qsv_vpp->p_ext_params, MFX_ERR_MEMORY_ALLOC);
qsv_vpp->m_mfxVideoParam.ExtParam = qsv_vpp->p_ext_params;
qsv_vpp->ext_opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
qsv_vpp->ext_opaque_alloc.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
qsv_vpp->p_ext_params[0] = (mfxExtBuffer*)&qsv_vpp->ext_opaque_alloc;
if(prev_vpp){
qsv_vpp->ext_opaque_alloc.In.Surfaces = prev_vpp->p_surfaces;
qsv_vpp->ext_opaque_alloc.In.NumSurface = prev_vpp->surface_num;
}
else{
qsv_vpp->ext_opaque_alloc.In.Surfaces = qsv->dec_space->p_surfaces;
qsv_vpp->ext_opaque_alloc.In.NumSurface = qsv->dec_space->surface_num;
}
qsv_vpp->ext_opaque_alloc.In.Type = qsv->dec_space->request[0].Type;
qsv_vpp->ext_opaque_alloc.Out.Surfaces = qsv_vpp->p_surfaces;
qsv_vpp->ext_opaque_alloc.Out.NumSurface = qsv_vpp->surface_num;
qsv_vpp->ext_opaque_alloc.Out.Type = qsv->dec_space->request[0].Type;
pv->qsv_user = hb_list_init();
qsv_filter_t *plugin = av_mallocz( sizeof(qsv_filter_t) );
plugin->pv = pv;
plugin->plug.pthis = plugin;
plugin->plug.PluginInit = qsv_PluginInit;
plugin->plug.PluginClose = qsv_PluginClose;
plugin->plug.GetPluginParam = qsv_GetPluginParam;
plugin->plug.Submit = qsv_Submit;
plugin->plug.Execute = qsv_Execute;
plugin->plug.FreeResources = qsv_FreeResources;
hb_list_add(pv->qsv_user,plugin);
sts=MFXVideoUSER_Register(qsv->mfx_session,0,&plugin->plug);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
plugin_init(plugin,&qsv_vpp->m_mfxVideoParam);
qsv_vpp->is_init_done = 1;
return 0;
}
static int hb_qsv_filter_pre_info( hb_filter_object_t * filter,
hb_filter_info_t * info ){
hb_filter_private_t * pv = filter->private_data;
if( !pv )
return 0;
sprintf(info->human_readable_desc, "copy data to system memory");
return 0;
}
static int hb_qsv_filter_pre_init( hb_filter_object_t * filter,
hb_filter_init_t * init ){
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
pv->job = init->job;
pv->pre.frame_go = 0;
pv->pre.frame_completed = hb_cond_init();
pv->pre.frame_completed_lock = hb_lock_init();
pv->post.frame_go = 0;
pv->post.frame_completed = hb_cond_init();
pv->post.frame_completed_lock = hb_lock_init();
pv->pre_busy.frame_go = 0;
pv->pre_busy.frame_completed = hb_cond_init();
pv->pre_busy.frame_completed_lock = hb_lock_init();
pv->post_busy.frame_go = 0;
pv->post_busy.frame_completed = hb_cond_init();
pv->post_busy.frame_completed_lock = hb_lock_init();
pv->list = hb_list_init();
// just to remind:
// PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) , 3 planes: Y, U, V
// PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
pv->sws_context_from_nv12 = hb_sws_get_context(
pv->job->title->width, pv->job->title->height, AV_PIX_FMT_NV12,
pv->job->title->width, pv->job->title->height, AV_PIX_FMT_YUV420P,
SWS_LANCZOS|SWS_ACCURATE_RND);
pv->sws_context_to_nv12 = hb_sws_get_context(
pv->job->title->width, pv->job->title->height, AV_PIX_FMT_YUV420P,
pv->job->title->width, pv->job->title->height, AV_PIX_FMT_NV12,
SWS_LANCZOS|SWS_ACCURATE_RND);
return 0;
}
int pre_process_frame(hb_buffer_t *in, av_qsv_context* qsv, hb_filter_private_t * pv ){
// 1 if have results , 0 otherwise
int ret = 1;
av_qsv_list* received_item = in->qsv_details.qsv_atom;
mfxStatus sts = MFX_ERR_NONE;
mfxFrameSurface1 *work_surface = NULL;
av_qsv_stage* stage = 0;
av_qsv_space *qsv_vpp = pv->vpp_space;
if (received_item)
{
stage = av_qsv_get_last_stage( received_item );
work_surface = stage->out.p_surface;
}
int sync_idx = av_qsv_get_free_sync(qsv_vpp, qsv);
int surface_idx = -1;
for (;;)
{
if (sync_idx == -1)
{
hb_error("qsv: Not enough resources allocated for the preprocessing filter");
ret = 0;
break;
}
if (sts == MFX_ERR_MORE_SURFACE || sts == MFX_ERR_NONE)
surface_idx = av_qsv_get_free_surface(qsv_vpp, qsv, &(qsv_vpp->m_mfxVideoParam.vpp.Out), QSV_PART_ANY);
if (surface_idx == -1) {
hb_error("qsv: Not enough resources allocated for the preprocessing filter");
ret = 0;
break;
}
sts = MFXVideoUSER_ProcessFrameAsync(qsv->mfx_session, &work_surface, 1, &qsv_vpp->p_surfaces[surface_idx] , 1, qsv_vpp->p_syncp[sync_idx]->p_sync);
if (MFX_ERR_MORE_DATA == sts)
{
if (!qsv_vpp->pending)
{
qsv_vpp->pending = av_qsv_list_init(0);
}
// if we have no results, we should not miss resource(s)
av_qsv_list_add( qsv_vpp->pending, received_item);
ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
ret = 0;
break;
}
if( MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE <= sts){
if( MFX_ERR_MORE_SURFACE == sts )
continue;
if (qsv_vpp->p_surfaces[surface_idx] && MFX_WRN_DEVICE_BUSY != sts )
ff_qsv_atomic_inc(&qsv_vpp->p_surfaces[surface_idx]->Data.Locked);
}
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
if (MFX_ERR_NONE <= sts ) // repeat the call if warning and no output
{
if (MFX_WRN_DEVICE_BUSY == sts){
hb_lock(pv->pre_busy.frame_completed_lock);
while(!pv->pre_busy.frame_go){
hb_cond_timedwait(pv->pre_busy.frame_completed,pv->pre_busy.frame_completed_lock,1000);
if(*pv->job->die)
break;
}
pv->pre_busy.frame_go = 0;
hb_unlock(pv->pre_busy.frame_completed_lock);
continue;
}
hb_lock(pv->pre.frame_completed_lock);
while(!pv->pre.frame_go){
hb_cond_timedwait(pv->pre.frame_completed,pv->pre.frame_completed_lock,1000);
if(*pv->job->die)
break;
}
pv->pre.frame_go = 0;
hb_unlock(pv->pre.frame_completed_lock);
in = pv->pre.out;
if (work_surface){
ff_qsv_atomic_dec(&work_surface->Data.Locked);
}
// inserting for the future, will be locked until very ready
if(stage){
av_qsv_stage* new_stage = av_qsv_stage_init();
new_stage->type = AV_QSV_VPP_USER;
new_stage->in.p_surface = work_surface;
new_stage->out.p_surface = qsv_vpp->p_surfaces[surface_idx];
new_stage->out.sync = qsv_vpp->p_syncp[sync_idx];
av_qsv_add_stagee( &received_item, new_stage,HAVE_THREADS );
// add pending resources for the proper reclaim later
if( qsv_vpp->pending ){
if( av_qsv_list_count(qsv_vpp->pending)>0 ){
new_stage->pending = qsv_vpp->pending;
}
qsv_vpp->pending = 0;
// making free via decrement for all pending
int i = 0;
for (i = av_qsv_list_count(new_stage->pending); i > 0; i--){
av_qsv_list *atom_list = av_qsv_list_item(new_stage->pending, i-1);
av_qsv_stage *stage = av_qsv_get_last_stage( atom_list );
mfxFrameSurface1 *work_surface = stage->out.p_surface;
if (work_surface)
ff_qsv_atomic_dec(&work_surface->Data.Locked);
}
}
}
break;
}
ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
HB_DEBUG_ASSERT(1, "The bitstream buffer size is insufficient.");
break;
}
return ret;
}
static int hb_qsv_filter_pre_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out ){
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * out = *buf_out;
int sts = 0;
av_qsv_context* qsv = pv->job->qsv.ctx;
if(!in->qsv_details.filter_details)
in->qsv_details.filter_details = pv;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
while(1){
int ret = filter_pre_init(qsv,pv);
if(ret >= 2)
av_qsv_sleep(1);
else
break;
}
pv->pre.in = in;
pv->pre.out = in;
sts = pre_process_frame(in, qsv, pv);
if(sts){
hb_list_add(pv->list,out);
}
if( hb_list_count(pv->list) ){
*buf_out = hb_list_item(pv->list,0);
hb_list_rem(pv->list,*buf_out);
*buf_in = NULL;
}
else{
*buf_in = NULL;
*buf_out = in;
}
return HB_FILTER_OK;
}
static void hb_qsv_filter_pre_close( hb_filter_object_t * filter ){
int i = 0;
mfxStatus sts = MFX_ERR_NONE;
hb_filter_private_t * pv = filter->private_data;
if ( !pv )
{
return;
}
sws_freeContext(pv->sws_context_to_nv12);
sws_freeContext(pv->sws_context_from_nv12);
av_qsv_context* qsv = pv->job->qsv.ctx;
if(qsv && qsv->vpp_space && av_qsv_list_count(qsv->vpp_space) > 0 ){
if(pv->qsv_user && qsv->mfx_session){
sts=MFXVideoUSER_Unregister(qsv->mfx_session,0);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
for(i=hb_list_count(pv->qsv_user);i>0;i--){
qsv_filter_t *plugin = hb_list_item(pv->qsv_user,i-1);
hb_list_rem(pv->qsv_user,plugin);
plugin_close(plugin);
}
hb_list_close(&pv->qsv_user);
}
// closing local stuff
qsv_filter_close(qsv,AV_QSV_VPP_USER);
// closing the commong stuff
av_qsv_context_clean(qsv);
}
hb_cond_close(&pv->pre.frame_completed);
hb_lock_close(&pv->pre.frame_completed_lock);
hb_cond_close(&pv->post.frame_completed);
hb_lock_close(&pv->post.frame_completed_lock);
hb_cond_close(&pv->pre_busy.frame_completed);
hb_lock_close(&pv->pre_busy.frame_completed_lock);
hb_cond_close(&pv->post_busy.frame_completed);
hb_lock_close(&pv->post_busy.frame_completed_lock);
hb_list_close( &pv->list );
free( pv );
filter->private_data = NULL;
}
static int hb_qsv_filter_post_info( hb_filter_object_t * filter,
hb_filter_info_t * info ){
hb_filter_private_t * pv = filter->private_data;
if( !pv )
return 0;
sprintf(info->human_readable_desc, "copy data to opaque memory");
return 0;
}
static int hb_qsv_filter_post_init( hb_filter_object_t * filter,
hb_filter_init_t * init ){
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
pv->job = init->job;
return 0;
}
static int hb_qsv_filter_post_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out ){
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * out = *buf_out;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
av_qsv_context* qsv = pv->job->qsv.ctx;
pv = in->qsv_details.filter_details;
if (!pv)
{
*buf_out = NULL;
*buf_in = NULL;
return HB_FILTER_OK;
}
while(1){
int ret = filter_pre_init(qsv,pv);
if(ret >= 2)
av_qsv_sleep(1);
else
break;
}
pv->post.in = in;
pv->post.out = out;
// signal: input is prepared, can start inserting data back into pipeline
hb_lock(pv->post.frame_completed_lock);
pv->post.frame_go = 1;
hb_cond_broadcast(pv->post.frame_completed);
hb_unlock(pv->post.frame_completed_lock);
// wait: on signal that data is ready
hb_lock(pv->post_busy.frame_completed_lock);
while(!pv->post_busy.frame_go){
hb_cond_timedwait(pv->post_busy.frame_completed,pv->post_busy.frame_completed_lock,1000);
if(*pv->job->die)
break;
}
pv->post_busy.frame_go = 0;
hb_unlock(pv->post_busy.frame_completed_lock);
if (pv->post.status == HB_FILTER_OK || pv->post.status == HB_FILTER_DONE)
{
*buf_out = in;
}
else
{
*buf_out = NULL;
pv->post.status = HB_FILTER_OK;
}
*buf_in = NULL;
return HB_FILTER_OK;
}
static void hb_qsv_filter_post_close( hb_filter_object_t * filter ){
hb_filter_private_t * pv = filter->private_data;
if ( !pv )
{
return;
}
free( pv );
filter->private_data = NULL;
}
mfxStatus MFX_CDECL qsv_PluginInit(mfxHDL pthis, mfxCoreInterface *core){
mfxStatus sts = MFX_ERR_NONE;
if(core && pthis){
qsv_filter_t *plugin = pthis;
plugin->core = core;
plugin->pluginparam.MaxThreadNum = 1;
plugin->pluginparam.ThreadPolicy = MFX_THREADPOLICY_SERIAL;
}
else
sts = MFX_ERR_NULL_PTR;
return sts;
}
mfxStatus MFX_CDECL qsv_PluginClose (mfxHDL pthis){
mfxStatus sts = MFX_ERR_NONE;
return sts;
}
mfxStatus MFX_CDECL qsv_GetPluginParam(mfxHDL pthis, mfxPluginParam *par){
mfxStatus sts = MFX_ERR_NONE;
if(pthis){
qsv_filter_t *plugin = pthis;
*par = plugin->pluginparam;
}
else
sts = MFX_ERR_NULL_PTR;
return sts;
}
mfxStatus MFX_CDECL qsv_Submit(mfxHDL pthis, const mfxHDL *in, mfxU32 in_num, const mfxHDL *out, mfxU32 out_num, mfxThreadTask *task){
mfxStatus sts = MFX_ERR_NONE;
qsv_filter_t *plugin = pthis;
mfxFrameSurface1 *surface_in = (mfxFrameSurface1 *)in[0];
mfxFrameSurface1 *surface_out = (mfxFrameSurface1 *)out[0];
mfxFrameSurface1 *real_surface_in = surface_in;
mfxFrameSurface1 *real_surface_out = surface_out;
sts = plugin->core->GetRealSurface(plugin->core->pthis, surface_in, &real_surface_in);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);
sts = plugin->core->GetRealSurface(plugin->core->pthis, surface_out, &real_surface_out);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, MFX_ERR_MEMORY_ALLOC);
int task_idx = get_free_task(plugin->tasks);
if (task_idx == -1)
{
return MFX_WRN_DEVICE_BUSY;
}
plugin->core->IncreaseReference(plugin->core->pthis, &(real_surface_in->Data));
plugin->core->IncreaseReference(plugin->core->pthis, &(real_surface_out->Data));
// to preserve timing if other filters are used in-between
surface_out->Data.TimeStamp = surface_in->Data.TimeStamp;
surface_out->Data.FrameOrder = surface_in->Data.FrameOrder;
qsv_filter_task_t *current_task = hb_list_item(plugin->tasks,task_idx);
current_task->in = real_surface_in;
current_task->out = real_surface_out;
current_task->busy = 1;
current_task->pv = plugin->pv;
*task = (mfxThreadTask)current_task;
return sts;
}
mfxStatus MFX_CDECL qsv_Execute(mfxHDL pthis, mfxThreadTask task, mfxU32 uid_p, mfxU32 uid_a){
mfxStatus sts = MFX_ERR_NONE;
qsv_filter_task_t *current_task = (qsv_filter_task_t *)task;
qsv_filter_t *plugin = pthis;
sts = (current_task->processor.process)(current_task,0);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
sts = MFX_TASK_DONE;
return sts;
}
mfxStatus MFX_CDECL qsv_FreeResources(mfxHDL pthis, mfxThreadTask task, mfxStatus sts){
qsv_filter_t *plugin = pthis;
qsv_filter_task_t *current_task = (qsv_filter_task_t *)task;
plugin->core->DecreaseReference(plugin->core->pthis, &(current_task->in->Data));
plugin->core->DecreaseReference(plugin->core->pthis, &(current_task->out->Data));
current_task->busy = 0;
hb_lock(plugin->pv->pre_busy.frame_completed_lock);
plugin->pv->pre_busy.frame_go = 1;
hb_cond_broadcast(plugin->pv->pre_busy.frame_completed);
hb_unlock(plugin->pv->pre_busy.frame_completed_lock);
return MFX_ERR_NONE;
}
mfxStatus plugin_init(qsv_filter_t* plugin, mfxVideoParam *param){
mfxStatus sts = MFX_ERR_NONE;
if(plugin->is_init_done) return sts;
plugin->videoparam = param;
mfxExtOpaqueSurfaceAlloc* plugin_opaque_alloc = NULL;
plugin_opaque_alloc = (mfxExtOpaqueSurfaceAlloc*) get_ext_buffer(plugin->videoparam->ExtParam,
plugin->videoparam->NumExtParam, MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION);
if(!plugin_opaque_alloc || !plugin_opaque_alloc->In.Surfaces || !plugin_opaque_alloc->Out.Surfaces)
return MFX_ERR_INVALID_VIDEO_PARAM;
sts = plugin->core->MapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->In.NumSurface,
plugin_opaque_alloc->In.Type, plugin_opaque_alloc->In.Surfaces);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
sts = plugin->core->MapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->Out.NumSurface,
plugin_opaque_alloc->Out.Type, plugin_opaque_alloc->Out.Surfaces);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
plugin->tasks = hb_list_init();
qsv_filter_task_t *task = calloc( 1, sizeof( qsv_filter_task_t ));
task->processor.process = process_filter;
task->processor.alloc = &plugin->core->FrameAllocator;
task->processor.core = plugin->core;
hb_list_add(plugin->tasks,task);
plugin->is_init_done = 1;
return sts;
}
mfxStatus plugin_close(qsv_filter_t* plugin){
int i = 0;
mfxStatus sts = MFX_ERR_NONE;
if(!plugin->is_init_done) return sts;
mfxExtOpaqueSurfaceAlloc* plugin_opaque_alloc = NULL;
plugin_opaque_alloc = (mfxExtOpaqueSurfaceAlloc*) get_ext_buffer(plugin->videoparam->ExtParam,
plugin->videoparam->NumExtParam, MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION);
if(!plugin_opaque_alloc || !plugin_opaque_alloc->In.Surfaces || !plugin_opaque_alloc->Out.Surfaces)
return MFX_ERR_INVALID_VIDEO_PARAM;
sts = plugin->core->UnmapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->In.NumSurface,
plugin_opaque_alloc->In.Type, plugin_opaque_alloc->In.Surfaces);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
sts = plugin->core->UnmapOpaqueSurface(plugin->core->pthis, plugin_opaque_alloc->Out.NumSurface,
plugin_opaque_alloc->Out.Type, plugin_opaque_alloc->Out.Surfaces);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
if(plugin->tasks){
for(i=hb_list_count(plugin->tasks);i>0;i--){
qsv_filter_task_t *task = hb_list_item(plugin->tasks,i-1);
hb_list_rem(plugin->tasks,task);
free(task);
}
hb_list_close(&plugin->tasks);
}
plugin->is_init_done = 0;
return sts;
}
mfxExtBuffer* get_ext_buffer(mfxExtBuffer** buffers, mfxU32 buffers_num, mfxU32 buffer_id){
int i = 0;
if(!buffers) return 0;
for(i=0;iBufferId == buffer_id)
return buffers[i];
}
return 0;
}
int get_free_task(hb_list_t* tasks){
int ret = -1;
int i = 0;
for(i=0;ibusy){
ret = i;
break;
}
}
return ret;
}
mfxStatus lock_frame(mfxFrameAllocator *alloc,mfxFrameSurface1 *surface){
mfxStatus sts = MFX_ERR_NONE;
// prevent double lock
if (surface->Data.Y != 0 && surface->Data.MemId !=0){
return MFX_ERR_UNSUPPORTED;
}
// not allocated, therefore no lock
if (surface->Data.Y != 0){
return MFX_ERR_NONE;
}
sts = alloc->Lock(alloc->pthis,surface->Data.MemId,&surface->Data);
return sts;
}
mfxStatus unlock_frame(mfxFrameAllocator *alloc,mfxFrameSurface1 *surface){
mfxStatus sts = MFX_ERR_NONE;
// not allocated
if (surface->Data.Y != 0 && surface->Data.MemId == 0){
return MFX_ERR_NONE;
}
// not locked
if (surface->Data.Y == 0){
return MFX_ERR_NONE;
}
sts = alloc->Unlock(alloc->pthis,surface->Data.MemId,&surface->Data);
return sts;
}
int process_filter(qsv_filter_task_t* task, void* params){
mfxStatus sts = MFX_ERR_NONE;
if (MFX_ERR_NONE != (sts = lock_frame(task->processor.alloc,task->in)))return sts;
if (MFX_ERR_NONE != (sts = lock_frame(task->processor.alloc,task->out)))
{
unlock_frame(task->processor.alloc,task->in);
return sts;
}
qsv_nv12_to_yuv420(task->pv->sws_context_from_nv12,task->pv->pre.out, task->in, task->processor.core);
// signal: input is prepared, converted from pipeline into internal buffer
hb_lock(task->pv->pre.frame_completed_lock);
task->pv->pre.frame_go = 1;
hb_cond_broadcast(task->pv->pre.frame_completed);
hb_unlock(task->pv->pre.frame_completed_lock);
// wait: input is prepared, converted from pipeline into internal buffer
hb_lock(task->pv->post.frame_completed_lock);
while(!task->pv->post.frame_go){
hb_cond_timedwait(task->pv->post.frame_completed,task->pv->post.frame_completed_lock,1000);
if(*task->pv->job->die)
break;
}
task->pv->post.frame_go = 0;
hb_unlock(task->pv->post.frame_completed_lock);
// this is just a simple fun/test case
#if 0
{
int i = 0;
char *cur_line;
char* luma = task->pv->post.in->plane[0].data;
int pitch = task->pv->post.in->plane[0].stride;
int h = task->pv->post.in->plane[0].height;
int w = task->pv->post.in->plane[0].width;
for (i = 0; i < h; i++){
cur_line = luma + i * pitch;
if(i>h/4 && i < 3*h/4 && i % 5 == 0 )
memset(cur_line, 0 , w );
}
}
#endif
if(task->pv->post.in)
{
qsv_yuv420_to_nv12(task->pv->sws_context_to_nv12, task->out, task->pv->post.in);
}
// signal: output is prepared, converted from internal buffer into pipeline
hb_lock(task->pv->post_busy.frame_completed_lock);
task->pv->post_busy.frame_go = 1;
hb_cond_broadcast(task->pv->post_busy.frame_completed);
hb_unlock(task->pv->post_busy.frame_completed_lock);
unlock_frame(task->processor.alloc,task->in);
unlock_frame(task->processor.alloc,task->out);
return sts;
}
#endif // USE_QSV
HandBrake-0.10.2/libhb/platform/ 0000775 0001752 0001752 00000000000 12535641635 016755 5 ustar handbrake handbrake HandBrake-0.10.2/libhb/platform/macosx/ 0000775 0001752 0001752 00000000000 12535641635 020247 5 ustar handbrake handbrake HandBrake-0.10.2/libhb/platform/macosx/encca_aac.c 0000664 0001752 0001752 00000037705 12463330511 022270 0 ustar handbrake handbrake /* encca_aac.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "audio_remap.h"
#include
#include
enum AAC_MODE { AAC_MODE_LC, AAC_MODE_HE };
int encCoreAudioInitLC(hb_work_object_t*, hb_job_t*);
int encCoreAudioInitHE(hb_work_object_t*, hb_job_t*);
int encCoreAudioInit(hb_work_object_t*, hb_job_t*, enum AAC_MODE mode);
int encCoreAudioWork(hb_work_object_t*, hb_buffer_t**, hb_buffer_t**);
void encCoreAudioClose(hb_work_object_t*);
hb_work_object_t hb_encca_aac =
{
WORK_ENC_CA_AAC,
"AAC encoder (Apple)",
encCoreAudioInitLC,
encCoreAudioWork,
encCoreAudioClose
};
hb_work_object_t hb_encca_haac =
{
WORK_ENC_CA_HAAC,
"HE-AAC encoder (Apple)",
encCoreAudioInitHE,
encCoreAudioWork,
encCoreAudioClose
};
struct hb_work_private_s
{
uint8_t *buf;
hb_job_t *job;
hb_list_t *list;
AudioConverterRef converter;
unsigned long isamples, isamplesiz, omaxpacket, nchannels;
uint64_t samples, ibytes;
Float64 osamplerate;
hb_audio_remap_t *remap;
};
#define MP4ESDescrTag 0x03
#define MP4DecConfigDescrTag 0x04
#define MP4DecSpecificDescrTag 0x05
// based off of mov_mp4_read_descr_len from mov.c in ffmpeg's libavformat
static int readDescrLen(UInt8 **buffer)
{
int len = 0;
int count = 4;
while (count--)
{
int c = *(*buffer)++;
len = (len << 7) | (c & 0x7f);
if (!(c & 0x80))
break;
}
return len;
}
// based off of mov_mp4_read_descr from mov.c in ffmpeg's libavformat
static int readDescr(UInt8 **buffer, int *tag)
{
*tag = *(*buffer)++;
return readDescrLen(buffer);
}
// based off of mov_read_esds from mov.c in ffmpeg's libavformat
static long ReadESDSDescExt(void* descExt, UInt8 **buffer, UInt32 *size, int versionFlags)
{
UInt8 *esds = (UInt8*)descExt;
int tag, len;
*size = 0;
if (versionFlags)
esds += 4; // version + flags
readDescr(&esds, &tag);
esds += 2; // ID
if (tag == MP4ESDescrTag)
esds++; // priority
readDescr(&esds, &tag);
if (tag == MP4DecConfigDescrTag)
{
esds++; // object type id
esds++; // stream type
esds += 3; // buffer size db
esds += 4; // max bitrate
esds += 4; // average bitrate
len = readDescr(&esds, &tag);
if (tag == MP4DecSpecificDescrTag)
{
*buffer = calloc(1, len + 8);
if (*buffer)
{
memcpy(*buffer, esds, len);
*size = len;
}
}
}
return noErr;
}
/***********************************************************************
* hb_work_encCoreAudio_init switches
***********************************************************************
*
**********************************************************************/
int encCoreAudioInitLC(hb_work_object_t *w, hb_job_t *job)
{
return encCoreAudioInit(w, job, AAC_MODE_LC);
}
int encCoreAudioInitHE(hb_work_object_t *w, hb_job_t *job)
{
return encCoreAudioInit(w, job, AAC_MODE_HE);
}
/***********************************************************************
* hb_work_encCoreAudio_init
***********************************************************************
*
**********************************************************************/
int encCoreAudioInit(hb_work_object_t *w, hb_job_t *job, enum AAC_MODE mode)
{
hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
hb_audio_t *audio = w->audio;
AudioStreamBasicDescription input, output;
UInt32 tmp, tmpsiz = sizeof(tmp);
OSStatus err;
w->private_data = pv;
pv->job = job;
// pass the number of channels used into the private work data
pv->nchannels =
hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
bzero(&input, sizeof(AudioStreamBasicDescription));
input.mSampleRate = (Float64)audio->config.out.samplerate;
input.mFormatID = kAudioFormatLinearPCM;
input.mFormatFlags = (kLinearPCMFormatFlagIsFloat|kAudioFormatFlagsNativeEndian);
input.mBytesPerPacket = 4 * pv->nchannels;
input.mFramesPerPacket = 1;
input.mBytesPerFrame = input.mBytesPerPacket * input.mFramesPerPacket;
input.mChannelsPerFrame = pv->nchannels;
input.mBitsPerChannel = 32;
bzero(&output, sizeof(AudioStreamBasicDescription));
switch (mode)
{
case AAC_MODE_HE:
output.mFormatID = kAudioFormatMPEG4AAC_HE;
break;
case AAC_MODE_LC:
default:
output.mFormatID = kAudioFormatMPEG4AAC;
break;
}
output.mSampleRate = (Float64)audio->config.out.samplerate;
output.mChannelsPerFrame = pv->nchannels;
// let CoreAudio decide the rest
// initialise encoder
err = AudioConverterNew(&input, &output, &pv->converter);
if (err != noErr)
{
// Retry without the samplerate
bzero(&output, sizeof(AudioStreamBasicDescription));
switch (mode)
{
case AAC_MODE_HE:
output.mFormatID = kAudioFormatMPEG4AAC_HE;
break;
case AAC_MODE_LC:
default:
output.mFormatID = kAudioFormatMPEG4AAC;
break;
}
output.mChannelsPerFrame = pv->nchannels;
err = AudioConverterNew(&input, &output, &pv->converter);
if (err != noErr)
{
hb_log("Error creating an AudioConverter err=%"PRId64" output.mBytesPerFrame=%"PRIu64"",
(int64_t)err, (uint64_t)output.mBytesPerFrame);
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return -1;
}
}
// set encoder quality to maximum
tmp = kAudioConverterQuality_Max;
AudioConverterSetProperty(pv->converter, kAudioConverterCodecQuality,
sizeof(tmp), &tmp);
if (audio->config.out.bitrate > 0)
{
// set encoder bitrate control mode to constrained variable
tmp = kAudioCodecBitRateControlMode_VariableConstrained;
AudioConverterSetProperty(pv->converter,
kAudioCodecPropertyBitRateControlMode,
sizeof(tmp), &tmp);
// get available bitrates
AudioValueRange *bitrates;
ssize_t bitrateCounts;
err = AudioConverterGetPropertyInfo(pv->converter,
kAudioConverterApplicableEncodeBitRates,
&tmpsiz, NULL);
bitrates = malloc(tmpsiz);
err = AudioConverterGetProperty(pv->converter,
kAudioConverterApplicableEncodeBitRates,
&tmpsiz, bitrates);
bitrateCounts = tmpsiz / sizeof(AudioValueRange);
// set bitrate
tmp = audio->config.out.bitrate * 1000;
if (tmp < bitrates[0].mMinimum)
tmp = bitrates[0].mMinimum;
if (tmp > bitrates[bitrateCounts-1].mMinimum)
tmp = bitrates[bitrateCounts-1].mMinimum;
free(bitrates);
if (tmp != audio->config.out.bitrate * 1000)
{
hb_log("encCoreAudioInit: sanitizing track %d audio bitrate %d to %"PRIu32"",
audio->config.out.track, audio->config.out.bitrate, tmp / 1000);
}
AudioConverterSetProperty(pv->converter,
kAudioConverterEncodeBitRate,
sizeof(tmp), &tmp);
}
else if (audio->config.out.quality >= 0)
{
if (mode != AAC_MODE_LC)
{
hb_error("encCoreAudioInit: internal error, VBR set but not applicable");
return 1;
}
// set encoder bitrate control mode to variable
tmp = kAudioCodecBitRateControlMode_Variable;
AudioConverterSetProperty(pv->converter,
kAudioCodecPropertyBitRateControlMode,
sizeof(tmp), &tmp);
// set quality
tmp = audio->config.out.quality;
AudioConverterSetProperty(pv->converter,
kAudioCodecPropertySoundQualityForVBR,
sizeof(tmp), &tmp);
}
else
{
hb_error("encCoreAudioInit: internal error, bitrate/quality not set");
return 1;
}
// get real input
tmpsiz = sizeof(input);
AudioConverterGetProperty(pv->converter,
kAudioConverterCurrentInputStreamDescription,
&tmpsiz, &input);
// get real output
tmpsiz = sizeof(output);
AudioConverterGetProperty(pv->converter,
kAudioConverterCurrentOutputStreamDescription,
&tmpsiz, &output);
// set sizes
pv->isamplesiz = input.mBytesPerPacket;
pv->isamples = output.mFramesPerPacket;
pv->osamplerate = output.mSampleRate;
audio->config.out.samples_per_frame = pv->isamples;
// channel remapping
pv->remap = hb_audio_remap_init(AV_SAMPLE_FMT_FLT, &hb_aac_chan_map,
audio->config.in.channel_map);
if (pv->remap == NULL)
{
hb_error("encCoreAudioInit: hb_audio_remap_init() failed");
}
uint64_t layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
hb_audio_remap_set_channel_layout(pv->remap, layout);
// get maximum output size
AudioConverterGetProperty(pv->converter,
kAudioConverterPropertyMaximumOutputPacketSize,
&tmpsiz, &tmp);
pv->omaxpacket = tmp;
// get magic cookie (elementary stream descriptor)
tmp = HB_CONFIG_MAX_SIZE;
AudioConverterGetProperty(pv->converter,
kAudioConverterCompressionMagicCookie,
&tmp, w->config->extradata.bytes);
// CoreAudio returns a complete ESDS, but we only need
// the DecoderSpecific info.
UInt8* buffer = NULL;
ReadESDSDescExt(w->config->extradata.bytes, &buffer, &tmpsiz, 0);
w->config->extradata.length = tmpsiz;
memmove(w->config->extradata.bytes, buffer, w->config->extradata.length);
free(buffer);
pv->list = hb_list_init();
pv->buf = NULL;
return 0;
}
/***********************************************************************
* Close
***********************************************************************
*
**********************************************************************/
void encCoreAudioClose(hb_work_object_t *w)
{
hb_work_private_t *pv = w->private_data;
if (pv != NULL)
{
if (pv->converter)
{
AudioConverterDispose(pv->converter);
}
if (pv->buf != NULL)
{
free(pv->buf);
}
if (pv->remap != NULL)
{
hb_audio_remap_free(pv->remap);
}
hb_list_empty(&pv->list);
free(pv);
w->private_data = NULL;
}
}
/* Called whenever necessary by AudioConverterFillComplexBuffer */
static OSStatus inInputDataProc(AudioConverterRef converter, UInt32 *npackets,
AudioBufferList *buffers,
AudioStreamPacketDescription **ignored,
void *userdata)
{
hb_work_private_t *pv = userdata;
if (!pv->ibytes)
{
*npackets = 0;
return 1;
}
if (pv->buf != NULL)
{
free(pv->buf);
}
buffers->mBuffers[0].mDataByteSize = MIN(pv->ibytes,
pv->isamplesiz * *npackets);
pv->buf = calloc(1, buffers->mBuffers[0].mDataByteSize);
buffers->mBuffers[0].mData = pv->buf;
if (hb_list_bytes(pv->list) >= buffers->mBuffers[0].mDataByteSize)
{
hb_list_getbytes(pv->list, buffers->mBuffers[0].mData,
buffers->mBuffers[0].mDataByteSize, NULL, NULL);
}
else
{
*npackets = 0;
return 1;
}
*npackets = buffers->mBuffers[0].mDataByteSize / pv->isamplesiz;
pv->ibytes -= buffers->mBuffers[0].mDataByteSize;
hb_audio_remap(pv->remap, (uint8_t**)(&buffers->mBuffers[0].mData),
(int)(*npackets));
return noErr;
}
/***********************************************************************
* Encode
***********************************************************************
*
**********************************************************************/
static hb_buffer_t* Encode(hb_work_object_t *w)
{
hb_work_private_t *pv = w->private_data;
UInt32 npackets = 1;
/* check if we need more data */
if ((pv->ibytes = hb_list_bytes(pv->list)) < pv->isamples * pv->isamplesiz)
{
return NULL;
}
hb_buffer_t *obuf;
AudioStreamPacketDescription odesc = { 0 };
AudioBufferList obuflist =
{
.mNumberBuffers = 1,
.mBuffers = { { .mNumberChannels = pv->nchannels } },
};
obuf = hb_buffer_init(pv->omaxpacket);
obuflist.mBuffers[0].mDataByteSize = obuf->size;
obuflist.mBuffers[0].mData = obuf->data;
OSStatus err = AudioConverterFillComplexBuffer(pv->converter,
inInputDataProc, pv,
&npackets, &obuflist, &odesc);
if (err != noErr && err != 1)
{
hb_log("encCoreAudio: unexpected error in AudioConverterFillComplexBuffer()");
}
// only drop the output buffer if it's actually empty
if (!npackets || odesc.mDataByteSize <= 0)
{
hb_log("encCoreAudio: 0 packets returned");
return NULL;
}
obuf->size = odesc.mDataByteSize;
obuf->s.start = 90000LL * pv->samples / pv->osamplerate;
pv->samples += pv->isamples;
obuf->s.stop = 90000LL * pv->samples / pv->osamplerate;
obuf->s.duration = (double)90000 * pv->isamples / pv->osamplerate;
obuf->s.type = AUDIO_BUF;
obuf->s.frametype = HB_FRAME_AUDIO;
return obuf;
}
static hb_buffer_t* Flush(hb_work_object_t *w, hb_buffer_t *bufin)
{
hb_work_private_t *pv = w->private_data;
// pad whatever data we have out to four input frames.
int nbytes = hb_list_bytes(pv->list);
int pad = pv->isamples * pv->isamplesiz - nbytes;
if (pad > 0)
{
hb_buffer_t *tmp = hb_buffer_init(pad);
memset(tmp->data, 0, pad);
hb_list_add(pv->list, tmp);
}
hb_buffer_t *bufout = NULL, *buf = NULL;
while (hb_list_bytes(pv->list) >= pv->isamples * pv->isamplesiz)
{
hb_buffer_t *b = Encode(w);
if (b != NULL)
{
if (bufout == NULL)
{
bufout = b;
}
else
{
buf->next = b;
}
buf = b;
}
}
// add the eof marker to the end of our buf chain
if (buf != NULL)
{
buf->next = bufin;
}
else
{
bufout = bufin;
}
return bufout;
}
/***********************************************************************
* Work
***********************************************************************
*
**********************************************************************/
int encCoreAudioWork(hb_work_object_t *w, hb_buffer_t **buf_in,
hb_buffer_t **buf_out)
{
hb_work_private_t *pv = w->private_data;
hb_buffer_t *buf;
if ((*buf_in)->size <= 0)
{
// EOF on input. Finish encoding what we have buffered then send
// it & the eof downstream.
*buf_out = Flush(w, *buf_in);
*buf_in = NULL;
return HB_WORK_DONE;
}
hb_list_add(pv->list, *buf_in);
*buf_in = NULL;
*buf_out = buf = Encode(w);
while (buf != NULL)
{
buf->next = Encode(w);
buf = buf->next;
}
return HB_WORK_OK;
}
HandBrake-0.10.2/libhb/dectx3gsub.c 0000664 0001752 0001752 00000020172 12463330511 017336 0 ustar handbrake handbrake /* dectx3gsub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/*
* Converts TX3G subtitles to UTF-8 subtitles with limited HTML-style markup (, , ).
*
* TX3G == MPEG 4, Part 17 (ISO/IEC 14496-17) == 3GPP Timed Text (26.245)
* A full reference to the format can be found here:
* http://www.3gpp.org/ftp/Specs/html-info/26245.htm
*
* @author David Foster (davidfstr)
*/
#include
#include
#include "hb.h"
#include "colormap.h"
struct hb_work_private_s
{
int line; // SSA line number
};
typedef enum {
BOLD = 0x1,
ITALIC = 0x2,
UNDERLINE = 0x4
} FaceStyleFlag;
#define MAX_MARKUP_LEN 40
#define SSA_PREAMBLE_LEN 24
typedef struct {
uint16_t startChar; // NOTE: indices in terms of *character* (not: byte) positions
uint16_t endChar;
uint16_t fontID;
uint8_t faceStyleFlags; // FaceStyleFlag
uint8_t fontSize;
uint32_t textColorRGBA;
} StyleRecord;
// NOTE: None of these macros check for buffer overflow
#define READ_U8() *pos; pos += 1;
#define READ_U16() (pos[0] << 8) | pos[1]; pos += 2;
#define READ_U32() (pos[0] << 24) | (pos[1] << 16) | (pos[2] << 8) | pos[3]; pos += 4;
#define READ_ARRAY(n) pos; pos += n;
#define SKIP_ARRAY(n) pos += n;
#define WRITE_CHAR(c) {dst[0]=c; dst += 1;}
#define FOURCC(str) ((((uint32_t) str[0]) << 24) | \
(((uint32_t) str[1]) << 16) | \
(((uint32_t) str[2]) << 8) | \
(((uint32_t) str[3]) << 0))
#define IS_10xxxxxx(c) ((c & 0xC0) == 0x80)
static int write_ssa_markup(char *dst, StyleRecord *style)
{
if (style == NULL)
{
sprintf(dst, "{\\r}");
return strlen(dst);
}
sprintf(dst, "{\\i%d\\b%d\\u%d\\1c&H%X&\\1a&H%02X&}",
!!(style->faceStyleFlags & ITALIC),
!!(style->faceStyleFlags & BOLD),
!!(style->faceStyleFlags & UNDERLINE),
HB_RGB_TO_BGR(style->textColorRGBA >> 8),
255 - (style->textColorRGBA & 0xFF)); // SSA alpha is inverted 0==opaque
return strlen(dst);
}
static hb_buffer_t *tx3g_decode_to_ssa(hb_work_private_t *pv, hb_buffer_t *in)
{
uint8_t *pos = in->data;
uint8_t *end = in->data + in->size;
uint16_t numStyleRecords = 0;
StyleRecord *styleRecords = NULL;
/*
* Parse the packet as a TX3G TextSample.
*
* Look for a single StyleBox ('styl') and read all contained StyleRecords.
* Ignore all other box types.
*
* NOTE: Buffer overflows on read are not checked.
*/
uint16_t textLength = READ_U16();
uint8_t *text = READ_ARRAY(textLength);
while ( pos < end )
{
/*
* Read TextSampleModifierBox
*/
uint32_t size = READ_U32();
if ( size == 0 )
{
size = pos - end; // extends to end of packet
}
if ( size == 1 )
{
hb_log( "dectx3gsub: TextSampleModifierBox has unsupported large size" );
break;
}
uint32_t type = READ_U32();
if (type == FOURCC("uuid"))
{
hb_log( "dectx3gsub: TextSampleModifierBox has unsupported extended type" );
break;
}
if (type == FOURCC("styl"))
{
// Found a StyleBox. Parse the contained StyleRecords
if ( numStyleRecords != 0 )
{
hb_log( "dectx3gsub: found additional StyleBoxes on subtitle; skipping" );
SKIP_ARRAY(size);
continue;
}
numStyleRecords = READ_U16();
if (numStyleRecords > 0)
styleRecords = calloc(numStyleRecords, sizeof(StyleRecord));
int i;
for (i = 0; i < numStyleRecords; i++)
{
styleRecords[i].startChar = READ_U16();
styleRecords[i].endChar = READ_U16();
styleRecords[i].fontID = READ_U16();
styleRecords[i].faceStyleFlags = READ_U8();
styleRecords[i].fontSize = READ_U8();
styleRecords[i].textColorRGBA = READ_U32();
}
}
else
{
// Found some other kind of TextSampleModifierBox. Skip it.
SKIP_ARRAY(size);
}
}
/*
* Copy text to output buffer, and add HTML markup for the style records
*/
int maxOutputSize = textLength + SSA_PREAMBLE_LEN + (numStyleRecords * MAX_MARKUP_LEN);
hb_buffer_t *out = hb_buffer_init( maxOutputSize );
if ( out == NULL )
goto fail;
uint8_t *dst = out->data;
uint8_t *start;
int charIndex = 0;
int styleIndex = 0;
sprintf((char*)dst, "%d,,Default,,0,0,0,,", pv->line);
dst += strlen((char*)dst);
start = dst;
for (pos = text, end = text + textLength; pos < end; pos++)
{
if (IS_10xxxxxx(*pos))
{
// Is a non-first byte of a multi-byte UTF-8 character
WRITE_CHAR(*pos);
continue; // ...without incrementing 'charIndex'
}
if (styleIndex < numStyleRecords)
{
if (styleRecords[styleIndex].endChar == charIndex)
{
if (styleIndex + 1 >= numStyleRecords ||
styleRecords[styleIndex+1].startChar > charIndex)
{
dst += write_ssa_markup((char*)dst, NULL);
}
styleIndex++;
}
if (styleRecords[styleIndex].startChar == charIndex)
{
dst += write_ssa_markup((char*)dst, &styleRecords[styleIndex]);
}
}
if (*pos == '\n')
{
WRITE_CHAR('\\');
WRITE_CHAR('N');
}
else
{
WRITE_CHAR(*pos);
}
charIndex++;
}
if (start == dst)
{
// No text in the subtitle. This sub is just filler, drop it.
free(styleRecords);
hb_buffer_close(&out);
return NULL;
}
*dst = '\0';
dst++;
// Trim output buffer to the actual amount of data written
out->size = dst - out->data;
// Copy metadata from the input packet to the output packet
out->s.frametype = HB_FRAME_SUBTITLE;
out->s.start = in->s.start;
out->s.stop = in->s.stop;
fail:
free(styleRecords);
return out;
}
#undef READ_U8
#undef READ_U16
#undef READ_U32
#undef READ_ARRAY
#undef SKIP_ARRAY
#undef WRITE_CHAR
#undef WRITE_START_TAG
#undef WRITE_END_TAG
static int dectx3gInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv;
pv = calloc( 1, sizeof( hb_work_private_t ) );
if (pv == NULL)
return 1;
w->private_data = pv;
// TODO:
// parse w->subtitle->extradata txg3 sample description into
// SSA format and replace extradata.
// For now we just create a generic SSA Script Info.
int height = job->title->height - job->crop[0] - job->crop[1];
int width = job->title->width - job->crop[2] - job->crop[3];
hb_subtitle_add_ssa_header(w->subtitle, width, height);
return 0;
}
static int dectx3gWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in;
if ( in->s.stop == 0 ) {
hb_log( "dectx3gsub: subtitle packet lacks duration" );
}
if (in->size == 0)
{
*buf_out = in;
*buf_in = NULL;
return HB_WORK_DONE;
}
*buf_out = tx3g_decode_to_ssa(pv, in);
return HB_WORK_OK;
}
static void dectx3gClose( hb_work_object_t * w )
{
free(w->private_data);
}
hb_work_object_t hb_dectx3gsub =
{
WORK_DECTX3GSUB,
"TX3G Subtitle Decoder",
dectx3gInit,
dectx3gWork,
dectx3gClose
};
HandBrake-0.10.2/libhb/deinterlace.c 0000664 0001752 0001752 00000046670 12265031673 017564 0 ustar handbrake handbrake /*
Copyright (C) 2006 Michael Niedermayer
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "hb.h"
#include "hbffmpeg.h"
#include "taskset.h"
// yadif_mode is a bit vector with the following flags
#define MODE_YADIF_ENABLE 1
#define MODE_YADIF_SPATIAL 2
#define MODE_YADIF_2PASS 4
#define MODE_YADIF_BOB 8
#define YADIF_MODE_DEFAULT 0
#define YADIF_PARITY_DEFAULT -1
#define ABS(a) ((a) > 0 ? (a) : (-(a)))
#define MIN3(a,b,c) MIN(MIN(a,b),c)
#define MAX3(a,b,c) MAX(MAX(a,b),c)
typedef struct yadif_arguments_s {
hb_buffer_t * dst;
int parity;
int tff;
} yadif_arguments_t;
typedef struct deint_arguments_s {
hb_buffer_t * src;
hb_buffer_t * dst;
} deint_arguments_t;
typedef struct deint_thread_arg_s {
hb_filter_private_t *pv;
int segment;
} deint_thread_arg_t;
struct hb_filter_private_s
{
int width;
int height;
int yadif_mode;
int yadif_parity;
int yadif_ready;
hb_buffer_t * yadif_ref[3];
int cpu_count;
int segments;
int deint_nsegs;
taskset_t deint_taskset; // Threads for fast deint
taskset_t yadif_taskset; // Threads for Yadif
deint_arguments_t *deint_arguments; // Arguments to thread for work
yadif_arguments_t *yadif_arguments; // Arguments to thread for work
};
static int hb_deinterlace_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_deinterlace_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static void hb_deinterlace_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_deinterlace =
{
.id = HB_FILTER_DEINTERLACE,
.enforce_order = 1,
.name = "Deinterlace",
.settings = NULL,
.init = hb_deinterlace_init,
.work = hb_deinterlace_work,
.close = hb_deinterlace_close,
};
static void yadif_store_ref(hb_filter_private_t *pv, hb_buffer_t *b)
{
hb_buffer_close(&pv->yadif_ref[0]);
memmove(&pv->yadif_ref[0], &pv->yadif_ref[1], sizeof(hb_buffer_t *) * 2 );
pv->yadif_ref[2] = b;
}
static void yadif_filter_line(
hb_filter_private_t * pv,
uint8_t * dst,
uint8_t * prev,
uint8_t * cur,
uint8_t * next,
int width,
int stride,
int parity)
{
uint8_t *prev2 = parity ? prev : cur ;
uint8_t *next2 = parity ? cur : next;
int x;
for( x = 0; x < width; x++)
{
int c = cur[-stride];
int d = (prev2[0] + next2[0])>>1;
int e = cur[+stride];
int temporal_diff0 = ABS(prev2[0] - next2[0]);
int temporal_diff1 = ( ABS(prev[-stride] - c) + ABS(prev[+stride] - e) ) >> 1;
int temporal_diff2 = ( ABS(next[-stride] - c) + ABS(next[+stride] - e) ) >> 1;
int diff = MAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2);
int spatial_pred = (c+e)>>1;
int spatial_score = ABS(cur[-stride-1] - cur[+stride-1]) + ABS(c-e) +
ABS(cur[-stride+1] - cur[+stride+1]) - 1;
#define YADIF_CHECK(j)\
{ int score = ABS(cur[-stride-1+j] - cur[+stride-1-j])\
+ ABS(cur[-stride +j] - cur[+stride -j])\
+ ABS(cur[-stride+1+j] - cur[+stride+1-j]);\
if( score < spatial_score ){\
spatial_score = score;\
spatial_pred = (cur[-stride +j] + cur[+stride -j])>>1;\
YADIF_CHECK(-1) YADIF_CHECK(-2) }} }}
YADIF_CHECK( 1) YADIF_CHECK( 2) }} }}
if( pv->yadif_mode & MODE_YADIF_SPATIAL )
{
int b = (prev2[-2*stride] + next2[-2*stride])>>1;
int f = (prev2[+2*stride] + next2[+2*stride])>>1;
int max = MAX3(d-e, d-c, MIN(b-c, f-e));
int min = MIN3(d-e, d-c, MAX(b-c, f-e));
diff = MAX3( diff, min, -max );
}
if( spatial_pred > d + diff )
{
spatial_pred = d + diff;
}
else if( spatial_pred < d - diff )
{
spatial_pred = d - diff;
}
dst[0] = spatial_pred;
dst++;
cur++;
prev++;
next++;
prev2++;
next2++;
}
}
typedef struct yadif_thread_arg_s {
hb_filter_private_t *pv;
int segment;
} yadif_thread_arg_t;
/*
* deinterlace this segment of all three planes in a single thread.
*/
void yadif_filter_thread( void *thread_args_v )
{
yadif_arguments_t *yadif_work = NULL;
hb_filter_private_t * pv;
int run = 1;
int segment, segment_start, segment_stop;
yadif_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("Yadif Deinterlace thread started for segment %d", segment);
while( run )
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->yadif_taskset, segment );
if( taskset_thread_stop( &pv->yadif_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
run = 0;
goto report_completion;
}
yadif_work = &pv->yadif_arguments[segment];
if( yadif_work->dst == NULL )
{
hb_error( "Thread started when no work available" );
goto report_completion;
}
/*
* Process all three planes, but only this segment of it.
*/
int pp;
for(pp = 0; pp < 3; pp++)
{
hb_buffer_t *dst = yadif_work->dst;
int w = dst->plane[pp].width;
int s = dst->plane[pp].stride;
int h = dst->plane[pp].height;
int yy;
int parity = yadif_work->parity;
int tff = yadif_work->tff;
int penultimate = h - 2;
int segment_height = (h / pv->segments) & ~1;
segment_start = segment_height * segment;
if( segment == pv->segments - 1 )
{
/*
* Final segment
*/
segment_stop = h;
} else {
segment_stop = segment_height * ( segment + 1 );
}
uint8_t *dst2 = &dst->plane[pp].data[segment_start * s];
uint8_t *prev = &pv->yadif_ref[0]->plane[pp].data[segment_start * s];
uint8_t *cur = &pv->yadif_ref[1]->plane[pp].data[segment_start * s];
uint8_t *next = &pv->yadif_ref[2]->plane[pp].data[segment_start * s];
for( yy = segment_start; yy < segment_stop; yy++ )
{
if(((yy ^ parity) & 1))
{
/* This is the bottom field when TFF and vice-versa.
It's the field that gets filtered. Because yadif
needs 2 lines above and below the one being filtered,
we need to mirror the edges. When TFF, this means
replacing the 2nd line with a copy of the 1st,
and the last with the second-to-last. */
if( yy > 1 && yy < penultimate )
{
/* This isn't the top or bottom,
* proceed as normal to yadif. */
yadif_filter_line(pv, dst2, prev, cur, next, w, s,
parity ^ tff);
}
else
{
// parity == 0 (TFF), y1 = y0
// parity == 1 (BFF), y0 = y1
// parity == 0 (TFF), yu = yp
// parity == 1 (BFF), yp = yu
uint8_t *src = &pv->yadif_ref[1]->plane[pp].data[(yy^parity)*s];
memcpy(dst2, src, w);
}
}
else
{
/* Preserve this field unfiltered */
memcpy(dst2, cur, w);
}
dst2 += s;
prev += s;
cur += s;
next += s;
}
}
report_completion:
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->yadif_taskset, segment );
}
}
/*
* threaded yadif - each thread deinterlaces a single segment of all
* three planes. Where a segment is defined as the frame divided by
* the number of CPUs.
*
* This function blocks until the frame is deinterlaced.
*/
static void yadif_filter( hb_filter_private_t * pv,
hb_buffer_t * dst, int parity, int tff)
{
int segment;
for( segment = 0; segment < pv->segments; segment++ )
{
/*
* Setup the work for this plane.
*/
pv->yadif_arguments[segment].parity = parity;
pv->yadif_arguments[segment].tff = tff;
pv->yadif_arguments[segment].dst = dst;
}
/* Allow the taskset threads to make one pass over the data. */
taskset_cycle( &pv->yadif_taskset );
/*
* Entire frame is now deinterlaced.
*/
}
/*
* deinterlace a frame in a single thread.
*/
void deint_filter_thread( void *thread_args_v )
{
deint_arguments_t *args = NULL;
hb_filter_private_t * pv;
int run = 1;
int segment;
deint_thread_arg_t *thread_args = thread_args_v;
pv = thread_args->pv;
segment = thread_args->segment;
hb_log("Fast Deinterlace thread started for segment %d", segment);
while( run )
{
/*
* Wait here until there is work to do.
*/
taskset_thread_wait4start( &pv->deint_taskset, segment );
if( taskset_thread_stop( &pv->deint_taskset, segment ) )
{
/*
* No more work to do, exit this thread.
*/
run = 0;
goto report_completion;
}
args = &pv->deint_arguments[segment];
if( args->dst == NULL )
{
// This can happen when flushing final buffers.
goto report_completion;
}
/*
* Process all three planes, but only this segment of it.
*/
hb_deinterlace(args->dst, args->src);
report_completion:
/*
* Finished this segment, let everyone know.
*/
taskset_thread_complete( &pv->deint_taskset, segment );
}
}
/*
* threaded fast deint - each thread deinterlaces a single frame.
*
* This function blocks until all frames are deinterlaced.
*/
static hb_buffer_t * deint_fast(hb_filter_private_t * pv, hb_buffer_t * in)
{
int ii;
hb_buffer_t *dst, *src;
if (in != NULL)
{
dst = hb_frame_buffer_init(in->f.fmt, in->f.width, in->f.height);
pv->deint_arguments[pv->deint_nsegs].src = in;
pv->deint_arguments[pv->deint_nsegs].dst = dst;
pv->deint_nsegs++;
}
if (in != NULL && pv->deint_nsegs < pv->segments)
{
return NULL;
}
if (pv->deint_nsegs > 0)
{
/* Allow the taskset threads to make one pass over the data. */
taskset_cycle( &pv->deint_taskset );
}
hb_buffer_t *first = NULL, *last = NULL;
for (ii = 0; ii < pv->deint_nsegs; ii++)
{
src = pv->deint_arguments[ii].src;
dst = pv->deint_arguments[ii].dst;
pv->deint_arguments[ii].src = NULL;
pv->deint_arguments[ii].dst = NULL;
if (first == NULL)
{
first = dst;
}
if (last != NULL)
{
last->next = dst;
}
last = dst;
dst->s = src->s;
hb_buffer_move_subs(dst, src);
hb_buffer_close(&src);
}
if (in == NULL)
{
// Flushing final buffers. Append EOS marker buffer.
dst = hb_buffer_init(0);
if (first == NULL)
{
first = dst;
}
else
{
last->next = dst;
}
}
pv->deint_nsegs = 0;
return first;
}
static int hb_deinterlace_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
pv->width = init->width;
pv->height = init->height;
pv->yadif_ready = 0;
pv->yadif_mode = YADIF_MODE_DEFAULT;
pv->yadif_parity = YADIF_PARITY_DEFAULT;
if( filter->settings )
{
sscanf( filter->settings, "%d:%d",
&pv->yadif_mode,
&pv->yadif_parity);
}
pv->cpu_count = hb_get_cpu_count();
/* Allocate yadif specific buffers */
if( pv->yadif_mode & MODE_YADIF_ENABLE )
{
/*
* Setup yadif taskset.
*/
pv->segments = pv->cpu_count;
pv->yadif_arguments = malloc( sizeof( yadif_arguments_t ) * pv->segments );
if( pv->yadif_arguments == NULL ||
taskset_init( &pv->yadif_taskset, /*thread_count*/pv->segments,
sizeof( yadif_thread_arg_t ) ) == 0 )
{
hb_error( "yadif could not initialize taskset" );
}
int ii;
for( ii = 0; ii < pv->segments; ii++ )
{
yadif_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->yadif_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
pv->yadif_arguments[ii].dst = NULL;
if( taskset_thread_spawn( &pv->yadif_taskset, ii,
"yadif_filter_segment",
yadif_filter_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "yadif could not spawn thread" );
}
}
}
else
{
/*
* Setup fast deint taskset.
*/
pv->segments = pv->cpu_count;
pv->deint_arguments = malloc( sizeof( deint_arguments_t ) * pv->segments );
if( pv->deint_arguments == NULL ||
taskset_init( &pv->deint_taskset, pv->segments,
sizeof( deint_thread_arg_t ) ) == 0 )
{
hb_error( "deint could not initialize taskset" );
}
int ii;
for( ii = 0; ii < pv->segments; ii++ )
{
deint_thread_arg_t *thread_args;
thread_args = taskset_thread_args( &pv->deint_taskset, ii );
thread_args->pv = pv;
thread_args->segment = ii;
pv->deint_arguments[ii].dst = NULL;
if( taskset_thread_spawn( &pv->deint_taskset, ii,
"deint_filter_segment",
deint_filter_thread,
HB_NORMAL_PRIORITY ) == 0 )
{
hb_error( "deint could not spawn thread" );
}
}
}
return 0;
}
static void hb_deinterlace_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
{
return;
}
/* Cleanup yadif specific buffers */
if( pv->yadif_mode & MODE_YADIF_ENABLE )
{
taskset_fini( &pv->yadif_taskset );
int ii;
for(ii = 0; ii < 3; ii++)
{
hb_buffer_close(&pv->yadif_ref[ii]);
}
free( pv->yadif_arguments );
}
else
{
taskset_fini( &pv->deint_taskset );
free( pv->deint_arguments );
}
free( pv );
filter->private_data = NULL;
}
static int hb_deinterlace_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * last = NULL, * out = NULL;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
if( !( pv->yadif_mode & MODE_YADIF_ENABLE ) )
{
// Flush final frames
*buf_out = deint_fast(pv, NULL);
}
return HB_FILTER_DONE;
}
/* Use libavcodec deinterlace if yadif_mode < 0 */
if( !( pv->yadif_mode & MODE_YADIF_ENABLE ) )
{
*buf_in = NULL;
*buf_out = deint_fast(pv, in);
return HB_FILTER_OK;
}
/* Store current frame in yadif cache */
*buf_in = NULL;
yadif_store_ref(pv, in);
// yadif requires 3 buffers, prev, cur, and next. For the first
// frame, there can be no prev, so we duplicate the first frame.
if (!pv->yadif_ready)
{
// If yadif is not ready, store another ref and return HB_FILTER_DELAY
yadif_store_ref(pv, hb_buffer_dup(in));
pv->yadif_ready = 1;
// Wait for next
return HB_FILTER_DELAY;
}
/* Determine if top-field first layout */
int tff;
if( pv->yadif_parity < 0 )
{
tff = !!(in->s.flags & PIC_FLAG_TOP_FIELD_FIRST);
}
else
{
tff = (pv->yadif_parity & 1) ^ 1;
}
/* deinterlace both fields if yadif 2 pass or bob */
int frame, num_frames = 1;
if ((pv->yadif_mode & MODE_YADIF_2PASS) ||
(pv->yadif_mode & MODE_YADIF_BOB))
{
num_frames = 2;
}
// Will need up to 2 buffers simultaneously
int idx = 0;
hb_buffer_t * o_buf[2] = {NULL,};
/* Perform yadif filtering */
for( frame = 0; frame < num_frames; frame++ )
{
int parity = frame ^ tff ^ 1;
if (o_buf[idx] == NULL)
{
o_buf[idx] = hb_frame_buffer_init(in->f.fmt, in->f.width, in->f.height);
}
yadif_filter(pv, o_buf[idx], parity, tff);
// If bob, add both frames
// else, add only final frame
if (( pv->yadif_mode & MODE_YADIF_BOB ) || frame == num_frames - 1)
{
if ( out == NULL )
{
last = out = o_buf[idx];
}
else
{
last->next = o_buf[idx];
last = last->next;
}
last->next = NULL;
// Indicate that buffer was consumed
o_buf[idx] = NULL;
/* Copy buffered settings to output buffer settings */
last->s = pv->yadif_ref[1]->s;
idx ^= 1;
}
}
// Copy subs only to first output buffer
hb_buffer_move_subs( out, pv->yadif_ref[1] );
hb_buffer_close(&o_buf[0]);
hb_buffer_close(&o_buf[1]);
/* if bob mode is engaged, halve the duration of the
* timestamps. */
if (pv->yadif_mode & MODE_YADIF_BOB)
{
out->s.stop -= (out->s.stop - out->s.start) / 2LL;
last->s.start = out->s.stop;
last->s.new_chap = 0;
}
*buf_out = out;
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/dvd.c 0000664 0001752 0001752 00000126565 12463330511 016055 0 ustar handbrake handbrake /* dvd.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "lang.h"
#include "dvd.h"
#include "dvdread/ifo_read.h"
#include "dvdread/ifo_print.h"
#include "dvdread/nav_read.h"
static hb_dvd_t * hb_dvdread_init( char * path );
static void hb_dvdread_close( hb_dvd_t ** _d );
static char * hb_dvdread_name( char * path );
static int hb_dvdread_title_count( hb_dvd_t * d );
static hb_title_t * hb_dvdread_title_scan( hb_dvd_t * d, int t, uint64_t min_duration );
static int hb_dvdread_start( hb_dvd_t * d, hb_title_t *title, int chapter );
static void hb_dvdread_stop( hb_dvd_t * d );
static int hb_dvdread_seek( hb_dvd_t * d, float f );
static hb_buffer_t * hb_dvdread_read( hb_dvd_t * d );
static int hb_dvdread_chapter( hb_dvd_t * d );
static int hb_dvdread_angle_count( hb_dvd_t * d );
static void hb_dvdread_set_angle( hb_dvd_t * d, int angle );
static int hb_dvdread_main_feature( hb_dvd_t * d, hb_list_t * list_title );
hb_dvd_func_t hb_dvdread_func =
{
hb_dvdread_init,
hb_dvdread_close,
hb_dvdread_name,
hb_dvdread_title_count,
hb_dvdread_title_scan,
hb_dvdread_start,
hb_dvdread_stop,
hb_dvdread_seek,
hb_dvdread_read,
hb_dvdread_chapter,
hb_dvdread_angle_count,
hb_dvdread_set_angle,
hb_dvdread_main_feature
};
static hb_dvd_func_t *dvd_methods = &hb_dvdread_func;
/***********************************************************************
* Local prototypes
**********************************************************************/
static void FindNextCell( hb_dvdread_t * );
static int dvdtime2msec( dvd_time_t * );
static int hb_dvdread_is_break( hb_dvdread_t * d );
hb_dvd_func_t * hb_dvdread_methods( void )
{
return &hb_dvdread_func;
}
static int hb_dvdread_main_feature( hb_dvd_t * e, hb_list_t * list_title )
{
int ii;
uint64_t longest_duration = 0;
int longest = -1;
for ( ii = 0; ii < hb_list_count( list_title ); ii++ )
{
hb_title_t * title = hb_list_item( list_title, ii );
if ( title->duration > longest_duration )
{
longest_duration = title->duration;
longest = title->index;
}
}
return longest;
}
static char * hb_dvdread_name( char * path )
{
static char name[1024];
unsigned char unused[1024];
dvd_reader_t * reader;
reader = DVDOpen( path );
if( !reader )
{
return NULL;
}
if( DVDUDFVolumeInfo( reader, name, sizeof( name ),
unused, sizeof( unused ) ) )
{
DVDClose( reader );
return NULL;
}
DVDClose( reader );
return name;
}
/***********************************************************************
* hb_dvdread_init
***********************************************************************
*
**********************************************************************/
hb_dvd_t * hb_dvdread_init( char * path )
{
hb_dvd_t * e;
hb_dvdread_t * d;
int region_mask;
char * path_ccp;
e = calloc( sizeof( hb_dvd_t ), 1 );
d = &(e->dvdread);
/*
* Convert UTF-8 path to current code page on Windows
* hb_utf8_to_cp() is the same as strdup on non-Windows,
* so no #ifdef required here
*/
path_ccp = hb_utf8_to_cp( path );
/* Log DVD drive region code */
if ( hb_dvd_region( path_ccp, ®ion_mask ) == 0 )
{
hb_log( "dvd: Region mask 0x%02x", region_mask );
if ( region_mask == 0xFF )
{
hb_log( "dvd: Warning, DVD device has no region set" );
}
}
/* Open device */
if( !( d->reader = DVDOpen( path_ccp ) ) )
{
/*
* Not an error, may be a stream - which we'll try in a moment.
*/
hb_log( "dvd: not a dvd - trying as a stream/file instead" );
goto fail;
}
/* Open main IFO */
if( !( d->vmg = ifoOpen( d->reader, 0 ) ) )
{
hb_error( "dvd: ifoOpen failed" );
goto fail;
}
d->path = strdup( path ); /* hb_dvdread_title_scan assumes UTF-8 path, so not path_ccp here */
free( path_ccp );
return e;
fail:
if( d->vmg ) ifoClose( d->vmg );
if( d->reader ) DVDClose( d->reader );
free( e );
free( path_ccp );
return NULL;
}
/***********************************************************************
* hb_dvdread_title_count
**********************************************************************/
static int hb_dvdread_title_count( hb_dvd_t * e )
{
hb_dvdread_t *d = &(e->dvdread);
return d->vmg->tt_srpt->nr_of_srpts;
}
/***********************************************************************
* hb_dvdread_title_scan
**********************************************************************/
static hb_title_t * hb_dvdread_title_scan( hb_dvd_t * e, int t, uint64_t min_duration )
{
hb_dvdread_t *d = &(e->dvdread);
hb_title_t * title;
ifo_handle_t * vts = NULL;
int pgc_id, pgn, i;
hb_chapter_t * chapter;
unsigned char unused[1024];
const char * codec_name;
hb_log( "scan: scanning title %d", t );
title = hb_title_init( d->path, t );
title->type = HB_DVD_TYPE;
if( DVDUDFVolumeInfo( d->reader, title->name, sizeof( title->name ),
unused, sizeof( unused ) ) )
{
char * p_cur, * p_last = d->path;
for( p_cur = d->path; *p_cur; p_cur++ )
{
if( IS_DIR_SEP(p_cur[0]) && p_cur[1] )
{
p_last = &p_cur[1];
}
}
snprintf( title->name, sizeof( title->name ), "%s", p_last );
char *dot_term = strrchr(title->name, '.');
if (dot_term)
*dot_term = '\0';
}
/* VTS which our title is in */
title->vts = d->vmg->tt_srpt->title[t-1].title_set_nr;
if ( !title->vts )
{
/* A VTS of 0 means the title wasn't found in the title set */
hb_log("Invalid VTS (title set) number: %i", title->vts);
goto fail;
}
hb_log( "scan: opening IFO for VTS %d", title->vts );
if( !( vts = ifoOpen( d->reader, title->vts ) ) )
{
hb_log( "scan: ifoOpen failed" );
goto fail;
}
/* ignore titles with bogus cell addresses so we don't abort later
* in libdvdread. */
for ( i = 0; i < vts->vts_c_adt->nr_of_vobs; ++i)
{
if( (vts->vts_c_adt->cell_adr_table[i].start_sector & 0xffffff ) ==
0xffffff )
{
hb_log( "scan: cell_adr_table[%d].start_sector invalid (0x%x) "
"- skipping title", i,
vts->vts_c_adt->cell_adr_table[i].start_sector );
goto fail;
}
if( (vts->vts_c_adt->cell_adr_table[i].last_sector & 0xffffff ) ==
0xffffff )
{
hb_log( "scan: cell_adr_table[%d].last_sector invalid (0x%x) "
"- skipping title", i,
vts->vts_c_adt->cell_adr_table[i].last_sector );
goto fail;
}
if( vts->vts_c_adt->cell_adr_table[i].start_sector >=
vts->vts_c_adt->cell_adr_table[i].last_sector )
{
hb_log( "scan: cell_adr_table[%d].start_sector (0x%x) "
"is not before last_sector (0x%x) - skipping title", i,
vts->vts_c_adt->cell_adr_table[i].start_sector,
vts->vts_c_adt->cell_adr_table[i].last_sector );
goto fail;
}
}
if( global_verbosity_level == 3 )
{
ifo_print( d->reader, title->vts );
}
/* Position of the title in the VTS */
title->ttn = d->vmg->tt_srpt->title[t-1].vts_ttn;
if ( title->ttn < 1 || title->ttn > vts->vts_ptt_srpt->nr_of_srpts )
{
hb_log( "invalid VTS PTT offset %d for title %d, skipping", title->ttn, t );
goto fail;
}
/* Get pgc */
pgc_id = vts->vts_ptt_srpt->title[title->ttn-1].ptt[0].pgcn;
if ( pgc_id < 1 || pgc_id > vts->vts_pgcit->nr_of_pgci_srp )
{
hb_log( "invalid PGC ID %d for title %d, skipping", pgc_id, t );
goto fail;
}
pgn = vts->vts_ptt_srpt->title[title->ttn-1].ptt[0].pgn;
d->pgc = vts->vts_pgcit->pgci_srp[pgc_id-1].pgc;
hb_log("pgc_id: %d, pgn: %d: pgc: %p", pgc_id, pgn, d->pgc);
if( !d->pgc || !d->pgc->program_map )
{
hb_log( "scan: pgc not valid, skipping" );
goto fail;
}
if (d->pgc->cell_playback == NULL)
{
hb_log( "invalid PGC cell_playback table for title %d, skipping", t );
goto fail;
}
if( pgn <= 0 || pgn > 99 )
{
hb_log( "scan: pgn %d not valid, skipping", pgn );
goto fail;
}
/* Start cell */
title->cell_start = d->pgc->program_map[pgn-1] - 1;
title->block_start = d->pgc->cell_playback[title->cell_start].first_sector;
/* End cell */
title->cell_end = d->pgc->nr_of_cells - 1;
title->block_end = d->pgc->cell_playback[title->cell_end].last_sector;
/* Block count */
title->block_count = 0;
d->cell_cur = title->cell_start;
while( d->cell_cur <= title->cell_end )
{
#define cp d->pgc->cell_playback[d->cell_cur]
title->block_count += cp.last_sector + 1 - cp.first_sector;
#undef cp
FindNextCell( d );
d->cell_cur = d->cell_next;
}
hb_log( "scan: vts=%d, ttn=%d, cells=%d->%d, blocks=%"PRIu64"->%"PRIu64", "
"%"PRIu64" blocks", title->vts, title->ttn, title->cell_start,
title->cell_end, title->block_start, title->block_end,
title->block_count );
/* Get duration */
title->duration = 90LL * dvdtime2msec( &d->pgc->playback_time );
title->hours = title->duration / 90000 / 3600;
title->minutes = ( ( title->duration / 90000 ) % 3600 ) / 60;
title->seconds = ( title->duration / 90000 ) % 60;
hb_log( "scan: duration is %02d:%02d:%02d (%"PRId64" ms)",
title->hours, title->minutes, title->seconds,
title->duration / 90 );
/* ignore titles under 10 seconds because they're often stills or
* clips with no audio & our preview code doesn't currently handle
* either of these. */
if( title->duration < min_duration )
{
hb_log( "scan: ignoring title (too short)" );
goto fail;
}
/* Detect languages */
for( i = 0; i < vts->vtsi_mat->nr_of_vts_audio_streams; i++ )
{
int audio_format, lang_code, lang_extension, audio_control, position, j;
hb_audio_t * audio, * audio_tmp;
iso639_lang_t * lang;
hb_log( "scan: checking audio %d", i + 1 );
audio = calloc( sizeof( hb_audio_t ), 1 );
audio_format = vts->vtsi_mat->vts_audio_attr[i].audio_format;
lang_code = vts->vtsi_mat->vts_audio_attr[i].lang_code;
lang_extension = vts->vtsi_mat->vts_audio_attr[i].code_extension;
audio_control =
vts->vts_pgcit->pgci_srp[pgc_id-1].pgc->audio_control[i];
if( !( audio_control & 0x8000 ) )
{
hb_log( "scan: audio channel is not active" );
free( audio );
continue;
}
position = ( audio_control & 0x7F00 ) >> 8;
switch( audio_format )
{
case 0x00:
audio->id = ( ( 0x80 + position ) << 8 ) | 0xbd;
audio->config.in.codec = HB_ACODEC_AC3;
audio->config.in.codec_param = AV_CODEC_ID_AC3;
codec_name = "AC3";
break;
case 0x02:
case 0x03:
audio->id = 0xc0 + position;
audio->config.in.codec = HB_ACODEC_FFMPEG;
audio->config.in.codec_param = AV_CODEC_ID_MP2;
codec_name = "MPEG";
break;
case 0x04:
audio->id = ( ( 0xa0 + position ) << 8 ) | 0xbd;
audio->config.in.codec = HB_ACODEC_LPCM;
codec_name = "LPCM";
break;
case 0x06:
audio->id = ( ( 0x88 + position ) << 8 ) | 0xbd;
audio->config.in.codec = HB_ACODEC_DCA;
audio->config.in.codec_param = AV_CODEC_ID_DTS;
codec_name = "DTS";
break;
default:
audio->id = 0;
audio->config.in.codec = 0;
codec_name = "Unknown";
hb_log( "scan: unknown audio codec (%x)",
audio_format );
break;
}
if( !audio->id )
{
continue;
}
/* Check for duplicate tracks */
audio_tmp = NULL;
for( j = 0; j < hb_list_count( title->list_audio ); j++ )
{
audio_tmp = hb_list_item( title->list_audio, j );
if( audio->id == audio_tmp->id )
{
break;
}
audio_tmp = NULL;
}
if( audio_tmp )
{
hb_log( "scan: duplicate audio track" );
free( audio );
continue;
}
lang = lang_for_code( lang_code );
audio->config.lang.type = lang_extension;
snprintf( audio->config.lang.simple,
sizeof( audio->config.lang.simple ), "%s",
strlen( lang->native_name ) ? lang->native_name : lang->eng_name );
snprintf( audio->config.lang.iso639_2,
sizeof( audio->config.lang.iso639_2 ), "%s", lang->iso639_2 );
hb_log("scan: id=0x%x, lang=%s (%s), 3cc=%s ext=%i", audio->id,
audio->config.lang.simple, codec_name,
audio->config.lang.iso639_2, lang_extension);
audio->config.in.track = i;
hb_list_add( title->list_audio, audio );
}
/* Check for subtitles */
for( i = 0; i < vts->vtsi_mat->nr_of_vts_subp_streams; i++ )
{
hb_subtitle_t * subtitle;
int spu_control;
int position;
iso639_lang_t * lang;
int lang_extension = 0;
hb_log( "scan: checking subtitle %d", i + 1 );
spu_control =
vts->vts_pgcit->pgci_srp[pgc_id-1].pgc->subp_control[i];
if( !( spu_control & 0x80000000 ) )
{
hb_log( "scan: subtitle channel is not active" );
continue;
}
if( vts->vtsi_mat->vts_video_attr.display_aspect_ratio )
{
switch( vts->vtsi_mat->vts_video_attr.permitted_df )
{
case 1:
position = spu_control & 0xFF;
break;
case 2:
position = ( spu_control >> 8 ) & 0xFF;
break;
default:
position = ( spu_control >> 16 ) & 0xFF;
}
}
else
{
position = ( spu_control >> 24 ) & 0x7F;
}
lang_extension = vts->vtsi_mat->vts_subp_attr[i].code_extension;
lang = lang_for_code( vts->vtsi_mat->vts_subp_attr[i].lang_code );
subtitle = calloc( sizeof( hb_subtitle_t ), 1 );
subtitle->track = i+1;
subtitle->id = ( ( 0x20 + position ) << 8 ) | 0xbd;
snprintf( subtitle->lang, sizeof( subtitle->lang ), "%s",
strlen(lang->native_name) ? lang->native_name : lang->eng_name);
snprintf( subtitle->iso639_2, sizeof( subtitle->iso639_2 ), "%s",
lang->iso639_2);
subtitle->format = PICTURESUB;
subtitle->source = VOBSUB;
subtitle->config.dest = RENDERSUB; // By default render (burn-in) the VOBSUB.
subtitle->stream_type = 0xbd;
subtitle->substream_type = 0x20 + position;
subtitle->codec = WORK_DECVOBSUB;
subtitle->type = lang_extension;
memcpy( subtitle->palette,
vts->vts_pgcit->pgci_srp[pgc_id-1].pgc->palette,
16 * sizeof( uint32_t ) );
subtitle->palette_set = 1;
switch( lang_extension )
{
case 2:
strcat( subtitle->lang, " (Caption with bigger size character)" );
break;
case 3:
strcat( subtitle->lang, " (Caption for Children)" );
break;
case 5:
strcat( subtitle->lang, " (Closed Caption)" );
break;
case 6:
strcat( subtitle->lang, " (Closed Caption with bigger size character)" );
break;
case 7:
strcat( subtitle->lang, " (Closed Caption for Children)" );
break;
case 9:
strcat( subtitle->lang, " (Forced Caption)" );
break;
case 13:
strcat( subtitle->lang, " (Director's Commentary)" );
break;
case 14:
strcat( subtitle->lang, " (Director's Commentary with bigger size character)" );
break;
case 15:
strcat( subtitle->lang, " (Director's Commentary for Children)" );
default:
break;
}
hb_log( "scan: id=0x%x, lang=%s, 3cc=%s ext=%i", subtitle->id,
subtitle->lang, subtitle->iso639_2, lang_extension );
hb_list_add( title->list_subtitle, subtitle );
}
/* Chapters */
hb_log( "scan: title %d has %d chapters", t,
vts->vts_ptt_srpt->title[title->ttn-1].nr_of_ptts );
for( i = 0;
i < vts->vts_ptt_srpt->title[title->ttn-1].nr_of_ptts; i++ )
{
char chapter_title[80];
chapter = calloc( sizeof( hb_chapter_t ), 1 );
/* remember the on-disc chapter number */
chapter->index = i + 1;
sprintf( chapter_title, "Chapter %d", chapter->index );
hb_chapter_set_title( chapter, chapter_title );
pgc_id = vts->vts_ptt_srpt->title[title->ttn-1].ptt[i].pgcn;
pgn = vts->vts_ptt_srpt->title[title->ttn-1].ptt[i].pgn;
d->pgc = vts->vts_pgcit->pgci_srp[pgc_id-1].pgc;
/* Start cell */
chapter->cell_start = d->pgc->program_map[pgn-1] - 1;
chapter->block_start =
d->pgc->cell_playback[chapter->cell_start].first_sector;
// if there are no more programs in this pgc, the end cell is the
// last cell. Otherwise it's the cell before the start cell of the
// next program.
if ( pgn == d->pgc->nr_of_programs )
{
chapter->cell_end = d->pgc->nr_of_cells - 1;
}
else
{
chapter->cell_end = d->pgc->program_map[pgn] - 2;;
}
chapter->block_end = d->pgc->cell_playback[chapter->cell_end].last_sector;
/* Block count, duration */
chapter->block_count = 0;
chapter->duration = 0;
d->cell_cur = chapter->cell_start;
while( d->cell_cur <= chapter->cell_end )
{
#define cp d->pgc->cell_playback[d->cell_cur]
chapter->block_count += cp.last_sector + 1 - cp.first_sector;
chapter->duration += 90LL * dvdtime2msec( &cp.playback_time );
#undef cp
FindNextCell( d );
d->cell_cur = d->cell_next;
}
hb_list_add( title->list_chapter, chapter );
}
for( i = 0; i < hb_list_count( title->list_chapter ); i++ )
{
chapter = hb_list_item( title->list_chapter, i );
int seconds = ( chapter->duration + 45000 ) / 90000;
chapter->hours = ( seconds / 3600 );
chapter->minutes = ( seconds % 3600 ) / 60;
chapter->seconds = ( seconds % 60 );
hb_log( "scan: chap %d c=%d->%d, b=%"PRIu64"->%"PRIu64" (%"PRIu64"), %"PRId64" ms",
chapter->index, chapter->cell_start, chapter->cell_end,
chapter->block_start, chapter->block_end,
chapter->block_count, chapter->duration / 90 );
}
/* Get aspect. We don't get width/height/rate infos here as
they tend to be wrong */
switch( vts->vtsi_mat->vts_video_attr.display_aspect_ratio )
{
case 0:
title->container_aspect = 4. / 3.;
break;
case 3:
title->container_aspect = 16. / 9.;
break;
default:
hb_log( "scan: unknown aspect" );
goto fail;
}
hb_log( "scan: aspect = %g", title->container_aspect );
/* This title is ok so far */
goto cleanup;
fail:
hb_title_close( &title );
cleanup:
if( vts ) ifoClose( vts );
return title;
}
/***********************************************************************
* hb_dvdread_start
***********************************************************************
* Title and chapter start at 1
**********************************************************************/
static int hb_dvdread_start( hb_dvd_t * e, hb_title_t *title, int chapter )
{
hb_dvdread_t *d = &(e->dvdread);
int pgc_id, pgn;
int i;
int t = title->index;
/* Open the IFO and the VOBs for this title */
d->vts = d->vmg->tt_srpt->title[t-1].title_set_nr;
d->ttn = d->vmg->tt_srpt->title[t-1].vts_ttn;
if( !( d->ifo = ifoOpen( d->reader, d->vts ) ) )
{
hb_error( "dvd: ifoOpen failed for VTS %d", d->vts );
return 0;
}
if( !( d->file = DVDOpenFile( d->reader, d->vts,
DVD_READ_TITLE_VOBS ) ) )
{
hb_error( "dvd: DVDOpenFile failed for VTS %d", d->vts );
return 0;
}
/* Get title first and last blocks */
pgc_id = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[0].pgcn;
pgn = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[0].pgn;
d->pgc = d->ifo->vts_pgcit->pgci_srp[pgc_id-1].pgc;
d->cell_start = d->pgc->program_map[pgn - 1] - 1;
d->cell_end = d->pgc->nr_of_cells - 1;
d->title_start = d->pgc->cell_playback[d->cell_start].first_sector;
d->title_end = d->pgc->cell_playback[d->cell_end].last_sector;
/* Block count */
d->title_block_count = 0;
for( i = d->cell_start; i <= d->cell_end; i++ )
{
d->title_block_count += d->pgc->cell_playback[i].last_sector + 1 -
d->pgc->cell_playback[i].first_sector;
}
/* Get pgc for the current chapter */
pgc_id = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[chapter-1].pgcn;
pgn = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[chapter-1].pgn;
d->pgc = d->ifo->vts_pgcit->pgci_srp[pgc_id-1].pgc;
/* Get the two first cells */
d->cell_cur = d->pgc->program_map[pgn-1] - 1;
FindNextCell( d );
d->block = d->pgc->cell_playback[d->cell_cur].first_sector;
d->next_vobu = d->block;
d->pack_len = 0;
d->cell_overlap = 0;
d->in_cell = 0;
d->in_sync = 2;
return 1;
}
/***********************************************************************
* hb_dvdread_stop
***********************************************************************
*
**********************************************************************/
static void hb_dvdread_stop( hb_dvd_t * e )
{
hb_dvdread_t *d = &(e->dvdread);
if( d->ifo )
{
ifoClose( d->ifo );
d->ifo = NULL;
}
if( d->file )
{
DVDCloseFile( d->file );
d->file = NULL;
}
}
/***********************************************************************
* hb_dvdread_seek
***********************************************************************
*
**********************************************************************/
static int hb_dvdread_seek( hb_dvd_t * e, float f )
{
hb_dvdread_t *d = &(e->dvdread);
int count, sizeCell;
int i;
count = f * d->title_block_count;
for( i = d->cell_start; i <= d->cell_end; i++ )
{
sizeCell = d->pgc->cell_playback[i].last_sector + 1 -
d->pgc->cell_playback[i].first_sector;
if( count < sizeCell )
{
d->cell_cur = i;
d->cur_cell_id = 0;
FindNextCell( d );
/* Now let hb_dvdread_read find the next VOBU */
d->next_vobu = d->pgc->cell_playback[i].first_sector + count;
d->pack_len = 0;
break;
}
count -= sizeCell;
}
if( i > d->cell_end )
{
return 0;
}
/*
* Assume that we are in sync, even if we are not given that it is obvious
* that we are out of sync if we have just done a seek.
*/
d->in_sync = 2;
return 1;
}
/***********************************************************************
* is_nav_pack
***********************************************************************/
int is_nav_pack( unsigned char *buf )
{
/*
* The NAV Pack is comprised of the PCI Packet and DSI Packet, both
* of these start at known offsets and start with a special identifier.
*
* NAV = {
* PCI = { 00 00 01 bf # private stream header
* ?? ?? # length
* 00 # substream
* ...
* }
* DSI = { 00 00 01 bf # private stream header
* ?? ?? # length
* 01 # substream
* ...
* }
*
* The PCI starts at offset 0x26 into the sector, and the DSI starts at 0x400
*
* This information from: http://dvd.sourceforge.net/dvdinfo/
*/
if( ( buf[0x26] == 0x00 && // PCI
buf[0x27] == 0x00 &&
buf[0x28] == 0x01 &&
buf[0x29] == 0xbf &&
buf[0x2c] == 0x00 ) &&
( buf[0x400] == 0x00 && // DSI
buf[0x401] == 0x00 &&
buf[0x402] == 0x01 &&
buf[0x403] == 0xbf &&
buf[0x406] == 0x01 ) )
{
return ( 1 );
} else {
return ( 0 );
}
}
/***********************************************************************
* hb_dvdread_read
***********************************************************************
*
**********************************************************************/
static hb_buffer_t * hb_dvdread_read( hb_dvd_t * e )
{
hb_dvdread_t *d = &(e->dvdread);
hb_buffer_t *b = hb_buffer_init( HB_DVD_READ_BUFFER_SIZE );
top:
if( !d->pack_len )
{
/* New pack */
dsi_t dsi_pack;
int error = 0;
// if we've just finished the last cell of the title we don't
// want to read another block because our next_vobu pointer
// is probably invalid. Just return 'no data' & our caller
// should check and discover we're at eof.
if ( d->cell_cur > d->cell_end )
{
hb_buffer_close( &b );
return NULL;
}
for( ;; )
{
int block, pack_len, next_vobu, read_retry;
for( read_retry = 1; read_retry < 1024; read_retry++ )
{
if( DVDReadBlocks( d->file, d->next_vobu, 1, b->data ) == 1 )
{
/*
* Successful read.
*/
if( read_retry > 1 && !is_nav_pack( b->data) )
{
// But wasn't a nav pack, so carry on looking
read_retry = 1;
d->next_vobu++;
continue;
}
break;
} else {
// First retry the same block, then try the next one,
// adjust the skip increment upwards so that we can skip
// large sections of bad blocks more efficiently (at the
// cost of some missed good blocks at the end).
hb_log( "dvd: vobu read error blk %d - skipping to next blk incr %d",
d->next_vobu, (read_retry * 10));
d->next_vobu += (read_retry * 10);
}
}
if( read_retry == 1024 )
{
// That's too many bad blocks, jump to the start of the
// next cell.
hb_log( "dvd: vobu read error blk %d - skipping to cell %d",
d->next_vobu, d->cell_next );
d->cell_cur = d->cell_next;
if ( d->cell_cur > d->cell_end )
{
hb_buffer_close( &b );
return NULL;
}
d->in_cell = 0;
d->next_vobu = d->pgc->cell_playback[d->cell_cur].first_sector;
FindNextCell( d );
d->cell_overlap = 1;
continue;
}
if ( !is_nav_pack( b->data ) ) {
(d->next_vobu)++;
if( d->in_sync == 1 ) {
hb_log("dvd: Lost sync, searching for NAV pack at blk %d",
d->next_vobu);
d->in_sync = 0;
}
continue;
}
navRead_DSI( &dsi_pack, &b->data[DSI_START_BYTE] );
if ( d->in_sync == 0 && d->cur_cell_id &&
(d->cur_vob_id != dsi_pack.dsi_gi.vobu_vob_idn ||
d->cur_cell_id != dsi_pack.dsi_gi.vobu_c_idn ) )
{
// We walked out of the cell we're supposed to be in.
// If we're not at the start of our next cell go there.
hb_log("dvd: left cell %d (%u,%u) for (%u,%u) at block %u",
d->cell_cur, d->cur_vob_id, d->cur_cell_id,
dsi_pack.dsi_gi.vobu_vob_idn, dsi_pack.dsi_gi.vobu_c_idn,
d->next_vobu );
if ( d->next_vobu != d->pgc->cell_playback[d->cell_next].first_sector )
{
d->next_vobu = d->pgc->cell_playback[d->cell_next].first_sector;
d->cur_cell_id = 0;
continue;
}
}
block = dsi_pack.dsi_gi.nv_pck_lbn;
pack_len = dsi_pack.dsi_gi.vobu_ea;
// There are a total of 21 next vobu offsets (and 21 prev_vobu
// offsets) in the navpack SRI structure. The primary one is
// 'next_vobu' which is the offset (in dvd blocks) from the current
// block to the start of the next vobu. If the block at 'next_vobu'
// can't be read, 'next_video' is the offset to the vobu closest to it.
// The other 19 offsets are vobus at successively longer distances from
// the current block (this is so that a hardware player can do
// adaptive error recovery to skip over a bad spot on the disk). In all
// these offsets the high bit is set to indicate when it contains a
// valid offset. The next bit (2^30) is set to indicate that there's
// another valid offset in the SRI that's closer to the current block.
// A hardware player uses these bits to chose the closest valid offset
// and uses that as its next vobu. (Some mastering schemes appear to
// put a bogus offset in next_vobu with the 2^30 bit set & the
// correct offset in next_video. This works fine in hardware players
// but will mess up software that doesn't implement the full next
// vobu decision logic.) In addition to the flag bits there's a
// reserved value of the offset that indicates 'no next vobu' (which
// indicates the end of a cell). But having no next vobu pointer with a
// 'valid' bit set also indicates end of cell. Different mastering
// schemes seem to use all possible combinations of the flag bits
// and reserved values to indicate end of cell so we have to check
// them all or we'll get a disk read error from following an
// invalid offset.
uint32_t next_ptr = dsi_pack.vobu_sri.next_vobu;
if ( ( next_ptr & ( 1 << 31 ) ) == 0 ||
( next_ptr & ( 1 << 30 ) ) != 0 ||
( next_ptr & 0x3fffffff ) == 0x3fffffff )
{
next_ptr = dsi_pack.vobu_sri.next_video;
if ( ( next_ptr & ( 1 << 31 ) ) == 0 ||
( next_ptr & 0x3fffffff ) == 0x3fffffff )
{
// don't have a valid forward pointer - assume end-of-cell
d->block = block;
d->pack_len = pack_len;
break;
}
}
next_vobu = block + ( next_ptr & 0x3fffffff );
if( pack_len > 0 &&
pack_len < 1024 &&
block >= d->next_vobu &&
( block <= d->title_start + d->title_block_count ||
block <= d->title_end ) )
{
d->block = block;
d->pack_len = pack_len;
d->next_vobu = next_vobu;
break;
}
/* Wasn't a valid VOBU, try next block */
if( ++error > 1024 )
{
hb_log( "dvd: couldn't find a VOBU after 1024 blocks" );
hb_buffer_close( &b );
return NULL;
}
(d->next_vobu)++;
}
if( d->in_sync == 0 || d->in_sync == 2 )
{
if( d->in_sync == 0 )
{
hb_log( "dvd: In sync with DVD at block %d", d->block );
}
d->in_sync = 1;
}
// Revert the cell overlap, and check for a chapter break
// If this cell is zero length (prev_vobu & next_vobu both
// set to END_OF_CELL) we need to check for beginning of
// cell before checking for end or we'll advance to the next
// cell too early and fail to generate a chapter mark when this
// cell starts a chapter.
if( ( dsi_pack.vobu_sri.prev_vobu & (1 << 31 ) ) == 0 ||
( dsi_pack.vobu_sri.prev_vobu & 0x3fffffff ) == 0x3fffffff )
{
// A vobu that's not at the start of a cell can have an
// EOC prev pointer (this seems to be associated with some
// sort of drm). The rest of the content in the cell may be
// booby-trapped so treat this like an end-of-cell rather
// than a beginning of cell.
if ( d->pgc->cell_playback[d->cell_cur].first_sector < dsi_pack.dsi_gi.nv_pck_lbn &&
d->pgc->cell_playback[d->cell_cur].last_sector >= dsi_pack.dsi_gi.nv_pck_lbn )
{
hb_log( "dvd: null prev_vobu in cell %d at block %d", d->cell_cur,
d->block );
// treat like end-of-cell then go directly to start of next cell.
d->cell_cur = d->cell_next;
d->in_cell = 0;
d->next_vobu = d->pgc->cell_playback[d->cell_cur].first_sector;
FindNextCell( d );
d->cell_overlap = 1;
goto top;
}
else
{
if ( d->block != d->pgc->cell_playback[d->cell_cur].first_sector )
{
hb_log( "dvd: beginning of cell %d at block %d", d->cell_cur,
d->block );
}
if( d->in_cell )
{
hb_error( "dvd: assuming missed end of cell %d at block %d", d->cell_cur, d->block );
d->cell_cur = d->cell_next;
d->next_vobu = d->pgc->cell_playback[d->cell_cur].first_sector;
FindNextCell( d );
d->cell_overlap = 1;
d->in_cell = 0;
} else {
d->in_cell = 1;
}
d->cur_vob_id = dsi_pack.dsi_gi.vobu_vob_idn;
d->cur_cell_id = dsi_pack.dsi_gi.vobu_c_idn;
if( d->cell_overlap )
{
b->s.new_chap = hb_dvdread_is_break( d );
d->cell_overlap = 0;
}
}
}
if( ( dsi_pack.vobu_sri.next_vobu & (1 << 31 ) ) == 0 ||
( dsi_pack.vobu_sri.next_vobu & 0x3fffffff ) == 0x3fffffff )
{
if ( d->block <= d->pgc->cell_playback[d->cell_cur].first_sector ||
d->block > d->pgc->cell_playback[d->cell_cur].last_sector )
{
hb_log( "dvd: end of cell %d at block %d", d->cell_cur,
d->block );
}
d->cell_cur = d->cell_next;
d->in_cell = 0;
d->next_vobu = d->pgc->cell_playback[d->cell_cur].first_sector;
FindNextCell( d );
d->cell_overlap = 1;
}
}
else
{
if( DVDReadBlocks( d->file, d->block, 1, b->data ) != 1 )
{
// this may be a real DVD error or may be DRM. Either way
// we don't want to quit because of one bad block so set
// things up so we'll advance to the next vobu and recurse.
hb_error( "dvd: DVDReadBlocks failed (%d), skipping to vobu %u",
d->block, d->next_vobu );
d->pack_len = 0;
goto top; /* XXX need to restructure this routine & avoid goto */
}
d->pack_len--;
}
d->block++;
return b;
}
/***********************************************************************
* hb_dvdread_chapter
***********************************************************************
* Returns in which chapter the next block to be read is.
* Chapter numbers start at 1.
**********************************************************************/
static int hb_dvdread_chapter( hb_dvd_t * e )
{
hb_dvdread_t *d = &(e->dvdread);
int i;
int pgc_id, pgn;
int nr_of_ptts = d->ifo->vts_ptt_srpt->title[d->ttn-1].nr_of_ptts;
pgc_t * pgc;
for( i = nr_of_ptts - 1;
i >= 0;
i-- )
{
/* Get pgc for chapter (i+1) */
pgc_id = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[i].pgcn;
pgn = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[i].pgn;
pgc = d->ifo->vts_pgcit->pgci_srp[pgc_id-1].pgc;
if( d->cell_cur - d->cell_overlap >= pgc->program_map[pgn-1] - 1 &&
d->cell_cur - d->cell_overlap <= pgc->nr_of_cells - 1 )
{
/* We are in this chapter */
return i + 1;
}
}
/* End of title */
return -1;
}
/***********************************************************************
* hb_dvdread_is_break
***********************************************************************
* Returns chapter number if the current block is a new chapter start
**********************************************************************/
static int hb_dvdread_is_break( hb_dvdread_t * d )
{
int i;
int pgc_id, pgn;
int nr_of_ptts = d->ifo->vts_ptt_srpt->title[d->ttn-1].nr_of_ptts;
pgc_t * pgc;
int cell;
for( i = nr_of_ptts - 1;
i > 0;
i-- )
{
/* Get pgc for chapter (i+1) */
pgc_id = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[i].pgcn;
pgn = d->ifo->vts_ptt_srpt->title[d->ttn-1].ptt[i].pgn;
pgc = d->ifo->vts_pgcit->pgci_srp[pgc_id-1].pgc;
cell = pgc->program_map[pgn-1] - 1;
if( cell <= d->cell_start )
break;
// This must not match against the start cell.
if( pgc->cell_playback[cell].first_sector == d->block && cell != d->cell_start )
{
return i + 1;
}
}
return 0;
}
/***********************************************************************
* hb_dvdread_close
***********************************************************************
* Closes and frees everything
**********************************************************************/
static void hb_dvdread_close( hb_dvd_t ** _d )
{
hb_dvdread_t * d = &((*_d)->dvdread);
if( d->vmg )
{
ifoClose( d->vmg );
}
if( d->reader )
{
DVDClose( d->reader );
}
free( d );
*_d = NULL;
}
/***********************************************************************
* hb_dvdread_angle_count
***********************************************************************
* Returns the number of angles supported. We do not support angles
* with dvdread
**********************************************************************/
static int hb_dvdread_angle_count( hb_dvd_t * d )
{
return 1;
}
/***********************************************************************
* hb_dvdread_set_angle
***********************************************************************
* Sets the angle to read. Not supported with dvdread
**********************************************************************/
static void hb_dvdread_set_angle( hb_dvd_t * d, int angle )
{
}
/***********************************************************************
* FindNextCell
***********************************************************************
* Assumes pgc and cell_cur are correctly set, and sets cell_next to the
* cell to be read when we will be done with cell_cur.
**********************************************************************/
static void FindNextCell( hb_dvdread_t * d )
{
int i = 0;
if( d->pgc->cell_playback[d->cell_cur].block_type ==
BLOCK_TYPE_ANGLE_BLOCK )
{
while( d->pgc->cell_playback[d->cell_cur+i].block_mode !=
BLOCK_MODE_LAST_CELL )
{
i++;
}
d->cell_next = d->cell_cur + i + 1;
hb_log( "dvd: Skipping multi-angle cells %d-%d",
d->cell_cur,
d->cell_next - 1 );
}
else
{
d->cell_next = d->cell_cur + 1;
}
}
/***********************************************************************
* dvdtime2msec
***********************************************************************
* From lsdvd
**********************************************************************/
static int dvdtime2msec(dvd_time_t * dt)
{
double frames_per_s[4] = {-1.0, 25.00, -1.0, 29.97};
double fps = frames_per_s[(dt->frame_u & 0xc0) >> 6];
long ms;
ms = (((dt->hour & 0xf0) >> 3) * 5 + (dt->hour & 0x0f)) * 3600000;
ms += (((dt->minute & 0xf0) >> 3) * 5 + (dt->minute & 0x0f)) * 60000;
ms += (((dt->second & 0xf0) >> 3) * 5 + (dt->second & 0x0f)) * 1000;
if( fps > 0 )
{
ms += (((dt->frame_u & 0x30) >> 3) * 5 +
(dt->frame_u & 0x0f)) * 1000.0 / fps;
}
return ms;
}
char * hb_dvd_name( char * path )
{
return dvd_methods->name(path);
}
hb_dvd_t * hb_dvd_init( char * path )
{
return dvd_methods->init(path);
}
int hb_dvd_title_count( hb_dvd_t * d )
{
return dvd_methods->title_count(d);
}
hb_title_t * hb_dvd_title_scan( hb_dvd_t * d, int t, uint64_t min_duration )
{
return dvd_methods->title_scan(d, t, min_duration);
}
int hb_dvd_start( hb_dvd_t * d, hb_title_t *title, int chapter )
{
return dvd_methods->start(d, title, chapter);
}
void hb_dvd_stop( hb_dvd_t * d )
{
dvd_methods->stop(d);
}
int hb_dvd_seek( hb_dvd_t * d, float f )
{
return dvd_methods->seek(d, f);
}
hb_buffer_t * hb_dvd_read( hb_dvd_t * d )
{
return dvd_methods->read(d);
}
int hb_dvd_chapter( hb_dvd_t * d )
{
return dvd_methods->chapter(d);
}
void hb_dvd_close( hb_dvd_t ** _d )
{
dvd_methods->close(_d);
}
int hb_dvd_angle_count( hb_dvd_t * d )
{
return dvd_methods->angle_count(d);
}
void hb_dvd_set_angle( hb_dvd_t * d, int angle )
{
dvd_methods->set_angle(d, angle);
}
int hb_dvd_main_feature( hb_dvd_t * d, hb_list_t * list_title )
{
return dvd_methods->main_feature(d, list_title);
}
// hb_dvd_set_dvdnav must only be called when no dvd source is open
// it rips the rug out from under things so be careful
void hb_dvd_set_dvdnav( int enable )
{
if (enable)
dvd_methods = hb_dvdnav_methods();
else
dvd_methods = hb_dvdread_methods();
}
HandBrake-0.10.2/libhb/enc_qsv.c 0000664 0001752 0001752 00000202734 12321011646 016725 0 ustar handbrake handbrake /* ********************************************************************* *\
Copyright (C) 2013 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\* ********************************************************************* */
#ifdef USE_QSV
#include "hb.h"
#include "nal_units.h"
#include "qsv_common.h"
#include "qsv_memory.h"
#include "h264_common.h"
/*
* The frame info struct remembers information about each frame across calls to
* the encoder. Since frames are uniquely identified by their timestamp, we use
* some bits of the timestamp as an index. The LSB is chosen so that two
* successive frames will have different values in the bits over any plausible
* range of frame rates (starting with bit 8 allows any frame rate slower than
* 352fps). The MSB determines the size of the array. It is chosen so that two
* frames can't use the same slot during the encoder's max frame delay so that,
* up to some minimum frame rate, frames are guaranteed to map to different
* slots (an MSB of 17 which is 2^(17-8+1) = 1024 slots guarantees no collisions
* down to a rate of 0.7 fps).
*/
#define FRAME_INFO_MAX2 (8) // 2^8 = 256; 90000/256 = 352 frames/sec
#define FRAME_INFO_MIN2 (17) // 2^17 = 128K; 90000/131072 = 0.7 frames/sec
#define FRAME_INFO_SIZE (1 << (FRAME_INFO_MIN2 - FRAME_INFO_MAX2 + 1))
#define FRAME_INFO_MASK (FRAME_INFO_SIZE - 1)
int encqsvInit (hb_work_object_t*, hb_job_t*);
int encqsvWork (hb_work_object_t*, hb_buffer_t**, hb_buffer_t**);
void encqsvClose(hb_work_object_t*);
hb_work_object_t hb_encqsv =
{
WORK_ENCQSV,
"H.264/AVC encoder (Intel QSV)",
encqsvInit,
encqsvWork,
encqsvClose
};
struct hb_work_private_s
{
hb_job_t *job;
uint32_t frames_in;
uint32_t frames_out;
int64_t last_start;
hb_qsv_param_t param;
av_qsv_space enc_space;
hb_qsv_info_t *qsv_info;
hb_list_t *delayed_chapters;
int64_t next_chapter_pts;
#define BFRM_DELAY_MAX 16
uint32_t *init_delay;
int bfrm_delay;
int64_t init_pts[BFRM_DELAY_MAX + 1];
hb_list_t *list_dts;
int64_t frame_duration[FRAME_INFO_SIZE];
int async_depth;
int max_async_depth;
// if encode-only, system memory used
int is_sys_mem;
mfxSession mfx_session;
struct SwsContext *sws_context_to_nv12;
// whether to expect input from VPP or from QSV decode
int is_vpp_present;
// whether the encoder is initialized
int init_done;
hb_list_t *delayed_processing;
hb_list_t *encoded_frames;
};
// used in delayed_chapters list
struct chapter_s
{
int index;
int64_t start;
};
static void hb_qsv_add_new_dts(hb_list_t *list, int64_t new_dts)
{
if (list != NULL)
{
int64_t *item = malloc(sizeof(int64_t));
if (item != NULL)
{
*item = new_dts;
hb_list_add(list, item);
}
}
}
static int64_t hb_qsv_pop_next_dts(hb_list_t *list)
{
int64_t next_dts = INT64_MIN;
if (list != NULL && hb_list_count(list) > 0)
{
int64_t *item = hb_list_item(list, 0);
if (item != NULL)
{
next_dts = *item;
hb_list_rem(list, item);
free(item);
}
}
return next_dts;
}
static void save_frame_duration(hb_work_private_t *pv, hb_buffer_t *buf)
{
int i = (buf->s.start >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
pv->frame_duration[i] = buf->s.stop - buf->s.start;
}
static int64_t get_frame_duration(hb_work_private_t *pv, hb_buffer_t *buf)
{
int i = (buf->s.start >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
return pv->frame_duration[i];
}
static const char* qsv_h264_profile_xlat(int profile)
{
switch (profile)
{
case MFX_PROFILE_AVC_CONSTRAINED_BASELINE:
return "Constrained Baseline";
case MFX_PROFILE_AVC_BASELINE:
return "Baseline";
case MFX_PROFILE_AVC_EXTENDED:
return "Extended";
case MFX_PROFILE_AVC_MAIN:
return "Main";
case MFX_PROFILE_AVC_CONSTRAINED_HIGH:
return "Constrained High";
case MFX_PROFILE_AVC_PROGRESSIVE_HIGH:
return "Progressive High";
case MFX_PROFILE_AVC_HIGH:
return "High";
case MFX_PROFILE_UNKNOWN:
default:
return NULL;
}
}
static const char* qsv_h264_level_xlat(int level)
{
int i;
for (i = 0; hb_h264_level_names[i] != NULL; i++)
{
if (hb_h264_level_values[i] == level)
{
return hb_h264_level_names[i];
}
}
return NULL;
}
int qsv_enc_init(hb_work_private_t *pv)
{
av_qsv_context *qsv = pv->job->qsv.ctx;
hb_job_t *job = pv->job;
mfxStatus sts;
int i;
if (pv->init_done)
{
return 0;
}
if (qsv == NULL)
{
if (!pv->is_sys_mem)
{
hb_error("qsv_enc_init: decode enabled but no context!");
return 3;
}
job->qsv.ctx = qsv = av_mallocz(sizeof(av_qsv_context));
}
av_qsv_space *qsv_encode = qsv->enc_space;
if (qsv_encode == NULL)
{
// if only for encode
if (pv->is_sys_mem)
{
// no need to use additional sync as encode only -> single thread
av_qsv_add_context_usage(qsv, 0);
// re-use the session from encqsvInit
qsv->mfx_session = pv->mfx_session;
}
qsv->enc_space = qsv_encode = &pv->enc_space;
}
if (!pv->is_sys_mem)
{
if (!pv->is_vpp_present && job->list_filter != NULL)
{
for (i = 0; i < hb_list_count(job->list_filter); i++)
{
hb_filter_object_t *filter = hb_list_item(job->list_filter, i);
if (filter->id == HB_FILTER_QSV_PRE ||
filter->id == HB_FILTER_QSV_POST ||
filter->id == HB_FILTER_QSV)
{
pv->is_vpp_present = 1;
break;
}
}
}
if (pv->is_vpp_present)
{
if (qsv->vpp_space == NULL)
{
return 2;
}
for (i = 0; i < av_qsv_list_count(qsv->vpp_space); i++)
{
av_qsv_space *vpp = av_qsv_list_item(qsv->vpp_space, i);
if (!vpp->is_init_done)
{
return 2;
}
}
}
av_qsv_space *dec_space = qsv->dec_space;
if (dec_space == NULL || !dec_space->is_init_done)
{
return 2;
}
}
else
{
pv->sws_context_to_nv12 = hb_sws_get_context(job->width, job->height,
AV_PIX_FMT_YUV420P,
job->width, job->height,
AV_PIX_FMT_NV12,
SWS_LANCZOS|SWS_ACCURATE_RND);
}
// allocate tasks
qsv_encode->p_buf_max_size = AV_QSV_BUF_SIZE_DEFAULT;
qsv_encode->tasks = av_qsv_list_init(HAVE_THREADS);
for (i = 0; i < pv->max_async_depth; i++)
{
av_qsv_task *task = av_mallocz(sizeof(av_qsv_task));
task->bs = av_mallocz(sizeof(mfxBitstream));
task->bs->Data = av_mallocz(sizeof(uint8_t) * qsv_encode->p_buf_max_size);
task->bs->MaxLength = qsv_encode->p_buf_max_size;
task->bs->DataLength = 0;
task->bs->DataOffset = 0;
av_qsv_list_add(qsv_encode->tasks, task);
}
// setup surface allocation
pv->param.videoParam->IOPattern = (pv->is_sys_mem ?
MFX_IOPATTERN_IN_SYSTEM_MEMORY :
MFX_IOPATTERN_IN_OPAQUE_MEMORY);
memset(&qsv_encode->request, 0, sizeof(mfxFrameAllocRequest) * 2);
sts = MFXVideoENCODE_QueryIOSurf(qsv->mfx_session,
pv->param.videoParam,
&qsv_encode->request[0]);
if (sts < MFX_ERR_NONE) // ignore warnings
{
hb_error("qsv_enc_init: MFXVideoENCODE_QueryIOSurf failed (%d)", sts);
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
return -1;
}
// allocate surfaces
if (pv->is_sys_mem)
{
qsv_encode->surface_num = FFMIN(qsv_encode->request[0].NumFrameSuggested +
pv->max_async_depth, AV_QSV_SURFACE_NUM);
if (qsv_encode->surface_num <= 0)
{
qsv_encode->surface_num = AV_QSV_SURFACE_NUM;
}
for (i = 0; i < qsv_encode->surface_num; i++)
{
mfxFrameSurface1 *surface = av_mallocz(sizeof(mfxFrameSurface1));
mfxFrameInfo info = pv->param.videoParam->mfx.FrameInfo;
surface->Info = info;
surface->Data.Pitch = info.Width;
surface->Data.Y = av_mallocz(info.Width * info.Height);
surface->Data.VU = av_mallocz(info.Width * info.Height / 2);
qsv_encode->p_surfaces[i] = surface;
}
}
else
{
av_qsv_space *in_space = qsv->dec_space;
if (pv->is_vpp_present)
{
// we get our input from VPP instead
in_space = av_qsv_list_item(qsv->vpp_space,
av_qsv_list_count(qsv->vpp_space) - 1);
}
// introduced in API 1.3
memset(&qsv_encode->ext_opaque_alloc, 0, sizeof(mfxExtOpaqueSurfaceAlloc));
qsv_encode->ext_opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
qsv_encode->ext_opaque_alloc.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
qsv_encode->ext_opaque_alloc.In.Surfaces = in_space->p_surfaces;
qsv_encode->ext_opaque_alloc.In.NumSurface = in_space->surface_num;
qsv_encode->ext_opaque_alloc.In.Type = qsv_encode->request[0].Type;
pv->param.videoParam->ExtParam[pv->param.videoParam->NumExtParam++] = (mfxExtBuffer*)&qsv_encode->ext_opaque_alloc;
}
// allocate sync points
qsv_encode->sync_num = (qsv_encode->surface_num ?
FFMIN(qsv_encode->surface_num, AV_QSV_SYNC_NUM) :
AV_QSV_SYNC_NUM);
for (i = 0; i < qsv_encode->sync_num; i++)
{
qsv_encode->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
AV_QSV_CHECK_POINTER(qsv_encode->p_syncp[i], MFX_ERR_MEMORY_ALLOC);
qsv_encode->p_syncp[i]->p_sync = av_mallocz(sizeof(mfxSyncPoint));
AV_QSV_CHECK_POINTER(qsv_encode->p_syncp[i]->p_sync, MFX_ERR_MEMORY_ALLOC);
}
// initialize the encoder
sts = MFXVideoENCODE_Init(qsv->mfx_session, pv->param.videoParam);
if (sts < MFX_ERR_NONE) // ignore warnings
{
hb_error("qsv_enc_init: MFXVideoENCODE_Init failed (%d)", sts);
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
return -1;
}
qsv_encode->is_init_done = 1;
mfxIMPL impl;
mfxVersion version;
// log actual implementation details now that we know them
if ((MFXQueryIMPL (qsv->mfx_session, &impl) == MFX_ERR_NONE) &&
(MFXQueryVersion(qsv->mfx_session, &version) == MFX_ERR_NONE))
{
hb_log("qsv_enc_init: using '%s' implementation, API: %"PRIu16".%"PRIu16"",
hb_qsv_impl_get_name(impl), version.Major, version.Minor);
}
else
{
hb_log("qsv_enc_init: MFXQueryIMPL/MFXQueryVersion failure");
}
pv->init_done = 1;
return 0;
}
/***********************************************************************
* encqsvInit
***********************************************************************
*
**********************************************************************/
int encqsvInit(hb_work_object_t *w, hb_job_t *job)
{
hb_work_private_t *pv = calloc(1, sizeof(hb_work_private_t));
w->private_data = pv;
pv->job = job;
pv->is_sys_mem = hb_qsv_decode_is_enabled(job) == 0;
pv->qsv_info = hb_qsv_info_get(job->vcodec);
pv->delayed_processing = hb_list_init();
pv->encoded_frames = hb_list_init();
pv->last_start = INT64_MIN;
pv->next_chapter_pts = AV_NOPTS_VALUE;
pv->delayed_chapters = hb_list_init();
// default encoding parameters
if (hb_qsv_param_default_preset(&pv->param, &pv->enc_space.m_mfxVideoParam,
pv->qsv_info, job->encoder_preset))
{
hb_error("encqsvInit: hb_qsv_param_default_preset failed");
return -1;
}
// set AsyncDepth to match that of decode and VPP
pv->param.videoParam->AsyncDepth = job->qsv.async_depth;
// enable and set colorimetry (video signal information)
pv->param.videoSignalInfo.ColourDescriptionPresent = 1;
switch (job->color_matrix_code)
{
case 4:
// custom
pv->param.videoSignalInfo.ColourPrimaries = job->color_prim;
pv->param.videoSignalInfo.TransferCharacteristics = job->color_transfer;
pv->param.videoSignalInfo.MatrixCoefficients = job->color_matrix;
break;
case 3:
// ITU BT.709 HD content
pv->param.videoSignalInfo.ColourPrimaries = HB_COLR_PRI_BT709;
pv->param.videoSignalInfo.TransferCharacteristics = HB_COLR_TRA_BT709;
pv->param.videoSignalInfo.MatrixCoefficients = HB_COLR_MAT_BT709;
break;
case 2:
// ITU BT.601 DVD or SD TV content (PAL)
pv->param.videoSignalInfo.ColourPrimaries = HB_COLR_PRI_EBUTECH;
pv->param.videoSignalInfo.TransferCharacteristics = HB_COLR_TRA_BT709;
pv->param.videoSignalInfo.MatrixCoefficients = HB_COLR_MAT_SMPTE170M;
break;
case 1:
// ITU BT.601 DVD or SD TV content (NTSC)
pv->param.videoSignalInfo.ColourPrimaries = HB_COLR_PRI_SMPTEC;
pv->param.videoSignalInfo.TransferCharacteristics = HB_COLR_TRA_BT709;
pv->param.videoSignalInfo.MatrixCoefficients = HB_COLR_MAT_SMPTE170M;
break;
default:
// detected during scan
pv->param.videoSignalInfo.ColourPrimaries = job->title->color_prim;
pv->param.videoSignalInfo.TransferCharacteristics = job->title->color_transfer;
pv->param.videoSignalInfo.MatrixCoefficients = job->title->color_matrix;
break;
}
// parse user-specified encoder options, if present
if (job->encoder_options != NULL && *job->encoder_options)
{
hb_dict_t *options_list;
hb_dict_entry_t *option = NULL;
options_list = hb_encopts_to_dict(job->encoder_options, job->vcodec);
while ((option = hb_dict_next(options_list, option)) != NULL)
{
switch (hb_qsv_param_parse(&pv->param, pv->qsv_info,
option->key, option->value))
{
case HB_QSV_PARAM_OK:
break;
case HB_QSV_PARAM_BAD_NAME:
hb_log("encqsvInit: hb_qsv_param_parse: bad key %s",
option->key);
break;
case HB_QSV_PARAM_BAD_VALUE:
hb_log("encqsvInit: hb_qsv_param_parse: bad value %s for key %s",
option->value, option->key);
break;
case HB_QSV_PARAM_UNSUPPORTED:
hb_log("encqsvInit: hb_qsv_param_parse: unsupported option %s",
option->key);
break;
case HB_QSV_PARAM_ERROR:
default:
hb_log("encqsvInit: hb_qsv_param_parse: unknown error");
break;
}
}
hb_dict_free(&options_list);
}
// reload colorimetry in case values were set in encoder_options
if (pv->param.videoSignalInfo.ColourDescriptionPresent)
{
job->color_matrix_code = 4;
job->color_prim = pv->param.videoSignalInfo.ColourPrimaries;
job->color_transfer = pv->param.videoSignalInfo.TransferCharacteristics;
job->color_matrix = pv->param.videoSignalInfo.MatrixCoefficients;
}
else
{
job->color_matrix_code = 0;
job->color_prim = HB_COLR_PRI_UNDEF;
job->color_transfer = HB_COLR_TRA_UNDEF;
job->color_matrix = HB_COLR_MAT_UNDEF;
}
// sanitize values that may exceed the Media SDK variable size
int64_t vrate, vrate_base;
int64_t par_width, par_height;
hb_limit_rational64(&vrate, &vrate_base,
job->vrate, job->vrate_base, UINT32_MAX);
hb_limit_rational64(&par_width, &par_height,
job->anamorphic.par_width,
job->anamorphic.par_height, UINT16_MAX);
// some encoding parameters are used by filters to configure their output
if (pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE)
{
job->qsv.enc_info.align_height = AV_QSV_ALIGN32(job->height);
}
else
{
job->qsv.enc_info.align_height = AV_QSV_ALIGN16(job->height);
}
job->qsv.enc_info.align_width = AV_QSV_ALIGN16(job->width);
job->qsv.enc_info.pic_struct = pv->param.videoParam->mfx.FrameInfo.PicStruct;
job->qsv.enc_info.is_init_done = 1;
// encode to H.264 and set FrameInfo
pv->param.videoParam->mfx.CodecId = MFX_CODEC_AVC;
pv->param.videoParam->mfx.CodecLevel = MFX_LEVEL_UNKNOWN;
pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_UNKNOWN;
pv->param.videoParam->mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
pv->param.videoParam->mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
pv->param.videoParam->mfx.FrameInfo.FrameRateExtN = vrate;
pv->param.videoParam->mfx.FrameInfo.FrameRateExtD = vrate_base;
pv->param.videoParam->mfx.FrameInfo.AspectRatioW = par_width;
pv->param.videoParam->mfx.FrameInfo.AspectRatioH = par_height;
pv->param.videoParam->mfx.FrameInfo.CropX = 0;
pv->param.videoParam->mfx.FrameInfo.CropY = 0;
pv->param.videoParam->mfx.FrameInfo.CropW = job->width;
pv->param.videoParam->mfx.FrameInfo.CropH = job->height;
pv->param.videoParam->mfx.FrameInfo.PicStruct = job->qsv.enc_info.pic_struct;
pv->param.videoParam->mfx.FrameInfo.Width = job->qsv.enc_info.align_width;
pv->param.videoParam->mfx.FrameInfo.Height = job->qsv.enc_info.align_height;
// set H.264 profile and level
if (job->encoder_profile != NULL && *job->encoder_profile &&
strcasecmp(job->encoder_profile, "auto"))
{
if (!strcasecmp(job->encoder_profile, "baseline"))
{
pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_AVC_BASELINE;
}
else if (!strcasecmp(job->encoder_profile, "main"))
{
pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_AVC_MAIN;
}
else if (!strcasecmp(job->encoder_profile, "high"))
{
pv->param.videoParam->mfx.CodecProfile = MFX_PROFILE_AVC_HIGH;
}
else
{
hb_error("encqsvInit: bad profile %s", job->encoder_profile);
return -1;
}
}
if (job->encoder_level != NULL && *job->encoder_level &&
strcasecmp(job->encoder_level, "auto"))
{
int err;
int i = hb_qsv_atoindex(hb_h264_level_names, job->encoder_level, &err);
if (err || i >= (sizeof(hb_h264_level_values) /
sizeof(hb_h264_level_values[0])))
{
hb_error("encqsvInit: bad level %s", job->encoder_level);
return -1;
}
else if (pv->qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
{
pv->param.videoParam->mfx.CodecLevel = HB_QSV_CLIP3(MFX_LEVEL_AVC_1,
MFX_LEVEL_AVC_52,
hb_h264_level_values[i]);
}
else
{
// Media SDK API < 1.6, MFX_LEVEL_AVC_52 unsupported
pv->param.videoParam->mfx.CodecLevel = HB_QSV_CLIP3(MFX_LEVEL_AVC_1,
MFX_LEVEL_AVC_51,
hb_h264_level_values[i]);
}
}
// interlaced encoding is not always possible
if (pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE)
{
if (pv->param.videoParam->mfx.CodecProfile == MFX_PROFILE_AVC_CONSTRAINED_BASELINE ||
pv->param.videoParam->mfx.CodecProfile == MFX_PROFILE_AVC_BASELINE ||
pv->param.videoParam->mfx.CodecProfile == MFX_PROFILE_AVC_PROGRESSIVE_HIGH)
{
hb_error("encqsvInit: profile %s doesn't support interlaced encoding",
qsv_h264_profile_xlat(pv->param.videoParam->mfx.CodecProfile));
return -1;
}
if ((pv->param.videoParam->mfx.CodecLevel >= MFX_LEVEL_AVC_1b &&
pv->param.videoParam->mfx.CodecLevel <= MFX_LEVEL_AVC_2) ||
(pv->param.videoParam->mfx.CodecLevel >= MFX_LEVEL_AVC_42))
{
hb_error("encqsvInit: level %s doesn't support interlaced encoding",
qsv_h264_level_xlat(pv->param.videoParam->mfx.CodecLevel));
return -1;
}
}
// sanitize ICQ
if (!(pv->qsv_info->capabilities & HB_QSV_CAP_RATECONTROL_ICQ))
{
// ICQ not supported
pv->param.rc.icq = 0;
}
else
{
pv->param.rc.icq = !!pv->param.rc.icq;
}
// sanitize lookahead
if (!(pv->qsv_info->capabilities & HB_QSV_CAP_RATECONTROL_LA))
{
// lookahead not supported
pv->param.rc.lookahead = 0;
}
else if ((pv->param.rc.lookahead) &&
(pv->qsv_info->capabilities & HB_QSV_CAP_RATECONTROL_LAi) == 0 &&
(pv->param.videoParam->mfx.FrameInfo.PicStruct != MFX_PICSTRUCT_PROGRESSIVE))
{
// lookahead enabled but we can't use it
hb_log("encqsvInit: LookAhead not used (LookAhead is progressive-only)");
pv->param.rc.lookahead = 0;
}
else
{
pv->param.rc.lookahead = !!pv->param.rc.lookahead;
}
// set VBV here (this will be overridden for CQP and ignored for LA)
// only set BufferSizeInKB, InitialDelayInKB and MaxKbps if we have
// them - otheriwse Media SDK will pick values for us automatically
if (pv->param.rc.vbv_buffer_size > 0)
{
if (pv->param.rc.vbv_buffer_init > 1.0)
{
pv->param.videoParam->mfx.InitialDelayInKB = (pv->param.rc.vbv_buffer_init / 8);
}
else if (pv->param.rc.vbv_buffer_init > 0.0)
{
pv->param.videoParam->mfx.InitialDelayInKB = (pv->param.rc.vbv_buffer_size *
pv->param.rc.vbv_buffer_init / 8);
}
pv->param.videoParam->mfx.BufferSizeInKB = (pv->param.rc.vbv_buffer_size / 8);
}
if (pv->param.rc.vbv_max_bitrate > 0)
{
pv->param.videoParam->mfx.MaxKbps = pv->param.rc.vbv_max_bitrate;
}
// set rate control paremeters
if (job->vquality >= 0)
{
if (pv->param.rc.icq)
{
// introduced in API 1.8
if (pv->param.rc.lookahead)
{
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_LA_ICQ;
}
else
{
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_ICQ;
}
pv->param.videoParam->mfx.ICQQuality = HB_QSV_CLIP3(1, 51, job->vquality);
}
else
{
// introduced in API 1.1
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_CQP;
pv->param.videoParam->mfx.QPI = HB_QSV_CLIP3(0, 51, job->vquality + pv->param.rc.cqp_offsets[0]);
pv->param.videoParam->mfx.QPP = HB_QSV_CLIP3(0, 51, job->vquality + pv->param.rc.cqp_offsets[1]);
pv->param.videoParam->mfx.QPB = HB_QSV_CLIP3(0, 51, job->vquality + pv->param.rc.cqp_offsets[2]);
// CQP + ExtBRC can cause bad output
pv->param.codingOption2.ExtBRC = MFX_CODINGOPTION_OFF;
}
}
else if (job->vbitrate > 0)
{
if (pv->param.rc.lookahead)
{
// introduced in API 1.7
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_LA;
pv->param.videoParam->mfx.TargetKbps = job->vbitrate;
// ignored, but some drivers will change AsyncDepth because of it
pv->param.codingOption2.ExtBRC = MFX_CODINGOPTION_OFF;
}
else
{
// introduced in API 1.0
if (job->vbitrate == pv->param.rc.vbv_max_bitrate)
{
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_CBR;
}
else
{
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_VBR;
}
pv->param.videoParam->mfx.TargetKbps = job->vbitrate;
}
}
else
{
hb_error("encqsvInit: invalid rate control (%d, %d)",
job->vquality, job->vbitrate);
return -1;
}
// if VBV is enabled but ignored, log it
if (pv->param.rc.vbv_max_bitrate > 0 || pv->param.rc.vbv_buffer_size > 0)
{
switch (pv->param.videoParam->mfx.RateControlMethod)
{
case MFX_RATECONTROL_LA:
case MFX_RATECONTROL_LA_ICQ:
hb_log("encqsvInit: LookAhead enabled, ignoring VBV");
break;
case MFX_RATECONTROL_ICQ:
hb_log("encqsvInit: ICQ rate control, ignoring VBV");
break;
default:
break;
}
}
// set B-pyramid
if (pv->param.gop.b_pyramid < 0)
{
if (pv->param.videoParam->mfx.RateControlMethod == MFX_RATECONTROL_CQP)
{
pv->param.gop.b_pyramid = 1;
}
else
{
pv->param.gop.b_pyramid = 0;
}
}
pv->param.gop.b_pyramid = !!pv->param.gop.b_pyramid;
// set the GOP structure
if (pv->param.gop.gop_ref_dist < 0)
{
if (pv->param.videoParam->mfx.RateControlMethod == MFX_RATECONTROL_CQP)
{
pv->param.gop.gop_ref_dist = 4;
}
else
{
pv->param.gop.gop_ref_dist = 3;
}
}
pv->param.videoParam->mfx.GopRefDist = pv->param.gop.gop_ref_dist;
// set the keyframe interval
if (pv->param.gop.gop_pic_size < 0)
{
int rate = (int)((double)job->vrate / (double)job->vrate_base + 0.5);
if (pv->param.videoParam->mfx.RateControlMethod == MFX_RATECONTROL_CQP)
{
// ensure B-pyramid is enabled for CQP on Haswell
pv->param.gop.gop_pic_size = 32;
}
else
{
// set the keyframe interval based on the framerate
pv->param.gop.gop_pic_size = rate;
}
}
pv->param.videoParam->mfx.GopPicSize = pv->param.gop.gop_pic_size;
// sanitize some settings that affect memory consumption
if (!pv->is_sys_mem)
{
// limit these to avoid running out of resources (causes hang)
pv->param.videoParam->mfx.GopRefDist = FFMIN(pv->param.videoParam->mfx.GopRefDist,
pv->param.rc.lookahead ? 8 : 16);
pv->param.codingOption2.LookAheadDepth = FFMIN(pv->param.codingOption2.LookAheadDepth,
pv->param.rc.lookahead ? (48 - pv->param.videoParam->mfx.GopRefDist -
3 * !pv->param.videoParam->mfx.GopRefDist) : 0);
}
else
{
// encode-only is a bit less sensitive to memory issues
pv->param.videoParam->mfx.GopRefDist = FFMIN(pv->param.videoParam->mfx.GopRefDist, 16);
pv->param.codingOption2.LookAheadDepth = FFMIN(pv->param.codingOption2.LookAheadDepth,
pv->param.rc.lookahead ? 60 : 0);
}
if ((pv->qsv_info->capabilities & HB_QSV_CAP_B_REF_PYRAMID) &&
(pv->param.videoParam->mfx.CodecProfile != MFX_PROFILE_AVC_BASELINE &&
pv->param.videoParam->mfx.CodecProfile != MFX_PROFILE_AVC_CONSTRAINED_HIGH &&
pv->param.videoParam->mfx.CodecProfile != MFX_PROFILE_AVC_CONSTRAINED_BASELINE))
{
int gop_ref_dist = 4;
/*
* B-pyramid is supported.
*
* Set gop_ref_dist to a power of two, >= 4 and <= GopRefDist to ensure
* Media SDK will not disable B-pyramid if we end up using it below.
*/
while (pv->param.videoParam->mfx.GopRefDist >= gop_ref_dist * 2)
{
gop_ref_dist *= 2;
}
if ((pv->param.gop.b_pyramid) &&
(pv->param.videoParam->mfx.GopPicSize == 0 ||
pv->param.videoParam->mfx.GopPicSize > gop_ref_dist))
{
/*
* B-pyramid enabled and GopPicSize is long enough for gop_ref_dist.
*
* Use gop_ref_dist. GopPicSize must be a multiple of GopRefDist.
* NumRefFrame should be >= (GopRefDist / 2) and >= 3, otherwise
* Media SDK may sometimes decide to disable B-pyramid too (whereas
* sometimes it will just sanitize NumrefFrame instead).
*
* Notes: Media SDK handles the NumRefFrame == 0 case for us.
* Also, GopPicSize == 0 should always result in a value that
* does NOT cause Media SDK to disable B-pyramid, so it's OK.
*/
pv->param.videoParam->mfx.GopRefDist = gop_ref_dist;
pv->param.videoParam->mfx.GopPicSize = (pv->param.videoParam->mfx.GopPicSize /
pv->param.videoParam->mfx.GopRefDist *
pv->param.videoParam->mfx.GopRefDist);
if (pv->param.videoParam->mfx.NumRefFrame)
{
pv->param.videoParam->mfx.NumRefFrame = FFMAX(pv->param.videoParam->mfx.NumRefFrame,
pv->param.videoParam->mfx.GopRefDist / 2);
pv->param.videoParam->mfx.NumRefFrame = FFMAX(pv->param.videoParam->mfx.NumRefFrame, 3);
}
}
else
{
/*
* B-pyramid disabled or not possible (GopPicSize too short).
* Sanitize gop.b_pyramid to 0 (off/disabled).
*/
pv->param.gop.b_pyramid = 0;
/* Then, adjust settings to actually disable it. */
if (pv->param.videoParam->mfx.GopRefDist == 0)
{
/*
* GopRefDist == 0 means the value will be set by Media SDK.
* Since we can't be sure what the actual value would be, we
* have to make sure that GopRefDist is set explicitly.
*/
pv->param.videoParam->mfx.GopRefDist = gop_ref_dist - 1;
}
else if (pv->param.videoParam->mfx.GopRefDist == gop_ref_dist)
{
/* GopRefDist is compatible with Media SDK's B-pyramid. */
if (pv->param.videoParam->mfx.GopPicSize == 0)
{
/*
* GopPicSize is unknown and could be a multiple of
* GopRefDist. Decrement the latter to disable B-pyramid.
*/
pv->param.videoParam->mfx.GopRefDist--;
}
else if (pv->param.videoParam->mfx.GopPicSize %
pv->param.videoParam->mfx.GopRefDist == 0)
{
/*
* GopPicSize is a multiple of GopRefDist.
* Increment the former to disable B-pyramid.
*/
pv->param.videoParam->mfx.GopPicSize++;
}
}
}
}
else
{
/* B-pyramid not supported. */
pv->param.gop.b_pyramid = 0;
}
/*
* init a dummy encode-only session to get the SPS/PPS
* and the final output settings sanitized by Media SDK
* this is fine since the actual encode will use the same
* values for all parameters relevant to the H.264 bitstream
*/
mfxStatus err;
mfxVersion version;
mfxVideoParam videoParam;
mfxExtBuffer *extParamArray[3];
mfxSession session = (mfxSession)0;
mfxExtCodingOption option1_buf, *option1 = &option1_buf;
mfxExtCodingOption2 option2_buf, *option2 = &option2_buf;
mfxExtCodingOptionSPSPPS sps_pps_buf, *sps_pps = &sps_pps_buf;
version.Major = HB_QSV_MINVERSION_MAJOR;
version.Minor = HB_QSV_MINVERSION_MINOR;
err = MFXInit(pv->qsv_info->implementation, &version, &session);
if (err != MFX_ERR_NONE)
{
hb_error("encqsvInit: MFXInit failed (%d)", err);
return -1;
}
err = MFXVideoENCODE_Init(session, pv->param.videoParam);
// workaround for the early 15.33.x driver, should be removed later
#define HB_DRIVER_FIX_33
#ifdef HB_DRIVER_FIX_33
int la_workaround = 0;
if (err < MFX_ERR_NONE &&
pv->param.videoParam->mfx.RateControlMethod == MFX_RATECONTROL_LA)
{
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_CBR;
err = MFXVideoENCODE_Init(session, pv->param.videoParam);
la_workaround = 1;
}
#endif
if (err < MFX_ERR_NONE) // ignore warnings
{
hb_error("encqsvInit: MFXVideoENCODE_Init failed (%d)", err);
MFXClose(session);
return -1;
}
memset(&videoParam, 0, sizeof(mfxVideoParam));
videoParam.ExtParam = extParamArray;
videoParam.NumExtParam = 0;
// introduced in API 1.3
memset(sps_pps, 0, sizeof(mfxExtCodingOptionSPSPPS));
sps_pps->Header.BufferId = MFX_EXTBUFF_CODING_OPTION_SPSPPS;
sps_pps->Header.BufferSz = sizeof(mfxExtCodingOptionSPSPPS);
sps_pps->SPSId = 0;
sps_pps->SPSBuffer = w->config->h264.sps;
sps_pps->SPSBufSize = sizeof(w->config->h264.sps);
sps_pps->PPSId = 0;
sps_pps->PPSBuffer = w->config->h264.pps;
sps_pps->PPSBufSize = sizeof(w->config->h264.pps);
videoParam.ExtParam[videoParam.NumExtParam++] = (mfxExtBuffer*)sps_pps;
// introduced in API 1.0
memset(option1, 0, sizeof(mfxExtCodingOption));
option1->Header.BufferId = MFX_EXTBUFF_CODING_OPTION;
option1->Header.BufferSz = sizeof(mfxExtCodingOption);
videoParam.ExtParam[videoParam.NumExtParam++] = (mfxExtBuffer*)option1;
// introduced in API 1.6
memset(option2, 0, sizeof(mfxExtCodingOption2));
option2->Header.BufferId = MFX_EXTBUFF_CODING_OPTION2;
option2->Header.BufferSz = sizeof(mfxExtCodingOption2);
if (pv->qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
{
// attach to get the final output mfxExtCodingOption2 settings
videoParam.ExtParam[videoParam.NumExtParam++] = (mfxExtBuffer*)option2;
}
err = MFXVideoENCODE_GetVideoParam(session, &videoParam);
MFXVideoENCODE_Close(session);
if (err == MFX_ERR_NONE)
{
// remove 32-bit NAL prefix (0x00 0x00 0x00 0x01)
w->config->h264.sps_length = sps_pps->SPSBufSize - 4;
memmove(w->config->h264.sps, w->config->h264.sps + 4,
w->config->h264.sps_length);
w->config->h264.pps_length = sps_pps->PPSBufSize - 4;
memmove(w->config->h264.pps, w->config->h264.pps + 4,
w->config->h264.pps_length);
}
else
{
hb_error("encqsvInit: MFXVideoENCODE_GetVideoParam failed (%d)", err);
MFXClose(session);
return -1;
}
#ifdef HB_DRIVER_FIX_33
if (la_workaround)
{
videoParam.mfx.RateControlMethod =
pv->param.videoParam->mfx.RateControlMethod = MFX_RATECONTROL_LA;
option2->LookAheadDepth = pv->param.codingOption2.LookAheadDepth;
hb_log("encqsvInit: using LookAhead workaround (\"early 33 fix\")");
}
#endif
// when using system memory, we re-use this same session
if (pv->is_sys_mem)
{
pv->mfx_session = session;
}
else
{
MFXClose(session);
}
/* check whether B-frames are used */
int bframes = videoParam.mfx.GopRefDist > 1 && videoParam.mfx.GopPicSize > 2;
if (bframes)
{
/* the muxer needs to know to the init_delay */
switch (pv->qsv_info->codec_id)
{
case MFX_CODEC_AVC:
pv->init_delay = &w->config->h264.init_delay;
break;
default: // unreachable
break;
}
/* let the muxer know that it should expect B-frames */
job->areBframes = 1;
/* holds the PTS sequence in display order, used to generate DTS */
pv->list_dts = hb_list_init();
}
// log code path and main output settings
hb_log("encqsvInit: using %s path",
pv->is_sys_mem ? "encode-only" : "full QSV");
hb_log("encqsvInit: TargetUsage %"PRIu16" AsyncDepth %"PRIu16"",
videoParam.mfx.TargetUsage, videoParam.AsyncDepth);
hb_log("encqsvInit: GopRefDist %"PRIu16" GopPicSize %"PRIu16" NumRefFrame %"PRIu16"",
videoParam.mfx.GopRefDist, videoParam.mfx.GopPicSize, videoParam.mfx.NumRefFrame);
if (pv->qsv_info->capabilities & HB_QSV_CAP_B_REF_PYRAMID)
{
hb_log("encqsvInit: BFrames %s BPyramid %s",
bframes ? "on" : "off",
bframes && pv->param.gop.b_pyramid ? "on" : "off");
}
else
{
hb_log("encqsvInit: BFrames %s", bframes ? "on" : "off");
}
if (pv->qsv_info->capabilities & HB_QSV_CAP_OPTION2_IB_ADAPT)
{
if (bframes)
{
hb_log("encqsvInit: AdaptiveI %s AdaptiveB %s",
hb_qsv_codingoption_get_name(option2->AdaptiveI),
hb_qsv_codingoption_get_name(option2->AdaptiveB));
}
else
{
hb_log("encqsvInit: AdaptiveI %s",
hb_qsv_codingoption_get_name(option2->AdaptiveI));
}
}
if (videoParam.mfx.RateControlMethod == MFX_RATECONTROL_CQP)
{
char qpi[7], qpp[9], qpb[9];
snprintf(qpi, sizeof(qpi), "QPI %"PRIu16"", videoParam.mfx.QPI);
snprintf(qpp, sizeof(qpp), " QPP %"PRIu16"", videoParam.mfx.QPP);
snprintf(qpb, sizeof(qpb), " QPB %"PRIu16"", videoParam.mfx.QPB);
hb_log("encqsvInit: RateControlMethod CQP with %s%s%s", qpi,
videoParam.mfx.GopPicSize > 1 ? qpp : "",
videoParam.mfx.GopRefDist > 1 ? qpb : "");
}
else
{
switch (videoParam.mfx.RateControlMethod)
{
case MFX_RATECONTROL_LA:
hb_log("encqsvInit: RateControlMethod LA TargetKbps %"PRIu16" LookAheadDepth %"PRIu16"",
videoParam.mfx.TargetKbps, option2->LookAheadDepth);
break;
case MFX_RATECONTROL_LA_ICQ:
hb_log("encqsvInit: RateControlMethod LA_ICQ ICQQuality %"PRIu16" LookAheadDepth %"PRIu16"",
videoParam.mfx.ICQQuality, option2->LookAheadDepth);
break;
case MFX_RATECONTROL_ICQ:
hb_log("encqsvInit: RateControlMethod ICQ ICQQuality %"PRIu16"",
videoParam.mfx.ICQQuality);
break;
case MFX_RATECONTROL_CBR:
case MFX_RATECONTROL_VBR:
hb_log("encqsvInit: RateControlMethod %s TargetKbps %"PRIu16" MaxKbps %"PRIu16" BufferSizeInKB %"PRIu16" InitialDelayInKB %"PRIu16"",
videoParam.mfx.RateControlMethod == MFX_RATECONTROL_CBR ? "CBR" : "VBR",
videoParam.mfx.TargetKbps, videoParam.mfx.MaxKbps,
videoParam.mfx.BufferSizeInKB, videoParam.mfx.InitialDelayInKB);
break;
default:
hb_log("encqsvInit: invalid rate control method %"PRIu16"",
videoParam.mfx.RateControlMethod);
return -1;
}
}
if ((pv->qsv_info->capabilities & HB_QSV_CAP_OPTION2_LA_DOWNS) &&
(videoParam.mfx.RateControlMethod == MFX_RATECONTROL_LA ||
videoParam.mfx.RateControlMethod == MFX_RATECONTROL_LA_ICQ))
{
switch (option2->LookAheadDS)
{
case MFX_LOOKAHEAD_DS_UNKNOWN:
hb_log("encqsvInit: LookAheadDS unknown (auto)");
break;
case MFX_LOOKAHEAD_DS_OFF:
hb_log("encqsvInit: LookAheadDS off");
break;
case MFX_LOOKAHEAD_DS_2x:
hb_log("encqsvInit: LookAheadDS 2x");
break;
case MFX_LOOKAHEAD_DS_4x:
hb_log("encqsvInit: LookAheadDS 4x");
break;
default:
hb_log("encqsvInit: invalid LookAheadDS value 0x%"PRIx16"",
option2->LookAheadDS);
break;
}
}
switch (videoParam.mfx.FrameInfo.PicStruct)
{
// quiet, most people don't care
case MFX_PICSTRUCT_PROGRESSIVE:
break;
// interlaced encoding is intended for advanced users only, who do care
case MFX_PICSTRUCT_FIELD_TFF:
hb_log("encqsvInit: PicStruct top field first");
break;
case MFX_PICSTRUCT_FIELD_BFF:
hb_log("encqsvInit: PicStruct bottom field first");
break;
default:
hb_error("encqsvInit: invalid PicStruct value 0x%"PRIx16"",
videoParam.mfx.FrameInfo.PicStruct);
return -1;
}
hb_log("encqsvInit: CAVLC %s",
hb_qsv_codingoption_get_name(option1->CAVLC));
if (pv->param.rc.lookahead == 0 &&
videoParam.mfx.RateControlMethod != MFX_RATECONTROL_CQP)
{
// LA/CQP and ExtBRC/MBBRC are mutually exclusive
if (pv->qsv_info->capabilities & HB_QSV_CAP_OPTION2_EXTBRC)
{
hb_log("encqsvInit: ExtBRC %s",
hb_qsv_codingoption_get_name(option2->ExtBRC));
}
if (pv->qsv_info->capabilities & HB_QSV_CAP_OPTION2_MBBRC)
{
hb_log("encqsvInit: MBBRC %s",
hb_qsv_codingoption_get_name(option2->MBBRC));
}
}
if (pv->qsv_info->capabilities & HB_QSV_CAP_OPTION2_TRELLIS)
{
switch (option2->Trellis)
{
case MFX_TRELLIS_OFF:
hb_log("encqsvInit: Trellis off");
break;
case MFX_TRELLIS_UNKNOWN:
hb_log("encqsvInit: Trellis unknown (auto)");
break;
default:
hb_log("encqsvInit: Trellis on (%s%s%s)",
(option2->Trellis & MFX_TRELLIS_I) ? "I" : "",
(option2->Trellis & MFX_TRELLIS_P) &&
(videoParam.mfx.GopPicSize > 1) ? "P" : "",
(option2->Trellis & MFX_TRELLIS_B) &&
(videoParam.mfx.GopRefDist > 1) ? "B" : "");
break;
}
}
hb_log("encqsvInit: H.264 profile %s @ level %s",
qsv_h264_profile_xlat(videoParam.mfx.CodecProfile),
qsv_h264_level_xlat (videoParam.mfx.CodecLevel));
// AsyncDepth has now been set and/or modified by Media SDK
pv->max_async_depth = videoParam.AsyncDepth;
pv->async_depth = 0;
return 0;
}
void encqsvClose(hb_work_object_t *w)
{
hb_work_private_t *pv = w->private_data;
int i;
if (pv != NULL && pv->job != NULL && pv->job->qsv.ctx != NULL &&
pv->job->qsv.ctx->is_context_active)
{
av_qsv_context *qsv_ctx = pv->job->qsv.ctx;
av_qsv_space *qsv_enc_space = pv->job->qsv.ctx->enc_space;
if (qsv_enc_space != NULL)
{
if (qsv_enc_space->is_init_done)
{
for (i = av_qsv_list_count(qsv_enc_space->tasks); i > 1; i--)
{
av_qsv_task *task = av_qsv_list_item(qsv_enc_space->tasks,
i - 1);
if (task != NULL)
{
if (task->bs != NULL)
{
av_freep(&task->bs->Data);
}
av_qsv_list_rem(qsv_enc_space->tasks, task);
av_freep(&task->bs);
av_freep(&task);
}
}
av_qsv_list_close(&qsv_enc_space->tasks);
for (i = 0; i < qsv_enc_space->surface_num; i++)
{
if (pv->is_sys_mem)
{
av_freep(&qsv_enc_space->p_surfaces[i]->Data.VU);
av_freep(&qsv_enc_space->p_surfaces[i]->Data.Y);
}
av_freep(&qsv_enc_space->p_surfaces[i]);
}
qsv_enc_space->surface_num = 0;
for (i = 0; i < qsv_enc_space->sync_num; i++)
{
av_freep(&qsv_enc_space->p_syncp[i]->p_sync);
av_freep(&qsv_enc_space->p_syncp[i]);
}
qsv_enc_space->sync_num = 0;
}
qsv_enc_space->is_init_done = 0;
}
if (qsv_ctx != NULL)
{
/* QSV context cleanup and MFXClose */
av_qsv_context_clean(qsv_ctx);
if (pv->is_sys_mem)
{
av_freep(&qsv_ctx);
}
}
}
if (pv != NULL)
{
if (pv->delayed_processing != NULL)
{
/* the list is already empty */
hb_list_close(&pv->delayed_processing);
}
if (pv->sws_context_to_nv12 != NULL)
{
sws_freeContext(pv->sws_context_to_nv12);
}
if (pv->list_dts != NULL)
{
int64_t *item;
while ((item = hb_list_item(pv->list_dts, 0)) != NULL)
{
hb_list_rem(pv->list_dts, item);
free(item);
}
hb_list_close(&pv->list_dts);
}
if (pv->delayed_chapters != NULL)
{
struct chapter_s *item;
while ((item = hb_list_item(pv->delayed_chapters, 0)) != NULL)
{
hb_list_rem(pv->delayed_chapters, item);
free(item);
}
hb_list_close(&pv->delayed_chapters);
}
if (pv->encoded_frames != NULL)
{
hb_buffer_t *item;
while ((item = hb_list_item(pv->encoded_frames, 0)) != NULL)
{
hb_list_rem(pv->encoded_frames, item);
hb_buffer_close(&item);
}
hb_list_close(&pv->encoded_frames);
}
}
free(pv);
w->private_data = NULL;
}
static void save_chapter(hb_work_private_t *pv, hb_buffer_t *buf)
{
/*
* Since there may be several frames buffered in the encoder, remember the
* timestamp so when this frame finally pops out of the encoder we'll mark
* its buffer as the start of a chapter.
*/
if (pv->next_chapter_pts == AV_NOPTS_VALUE)
{
pv->next_chapter_pts = buf->s.start;
}
/*
* Chapter markers are sometimes so close we can get a new
* one before the previous goes through the encoding queue.
*
* Dropping markers can cause weird side-effects downstream,
* including but not limited to missing chapters in the
* output, so we need to save it somehow.
*/
struct chapter_s *item = malloc(sizeof(struct chapter_s));
if (item != NULL)
{
item->start = buf->s.start;
item->index = buf->s.new_chap;
hb_list_add(pv->delayed_chapters, item);
}
/* don't let 'work_loop' put a chapter mark on the wrong buffer */
buf->s.new_chap = 0;
}
static void restore_chapter(hb_work_private_t *pv, hb_buffer_t *buf)
{
/* we're no longer looking for this chapter */
pv->next_chapter_pts = AV_NOPTS_VALUE;
/* get the chapter index from the list */
struct chapter_s *item = hb_list_item(pv->delayed_chapters, 0);
if (item != NULL)
{
/* we're done with this chapter */
hb_list_rem(pv->delayed_chapters, item);
buf->s.new_chap = item->index;
free(item);
/* we may still have another pending chapter */
item = hb_list_item(pv->delayed_chapters, 0);
if (item != NULL)
{
/*
* we're looking for this chapter now
* we still need it, don't remove it
*/
pv->next_chapter_pts = item->start;
}
}
}
static void compute_init_delay(hb_work_private_t *pv, mfxBitstream *bs)
{
if (pv->init_delay == NULL)
{
return; // not needed or already set
}
/*
* In the MP4 container, DT(0) = STTS(0) = 0.
*
* Which gives us:
* CT(0) = CTTS(0) + STTS(0) = CTTS(0) = PTS(0) - DTS(0)
* When DTS(0) < PTS(0), we then have:
* CT(0) > 0 for video, but not audio (breaks A/V sync).
*
* This is typically solved by writing an edit list shifting
* video samples by the initial delay, PTS(0) - DTS(0).
*
* See:
* ISO/IEC 14496-12:2008(E), ISO base media file format
* - 8.6.1.2 Decoding Time to Sample Box
*/
if (pv->qsv_info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
{
/* compute init_delay (in ticks) based on the DTS provided by MSDK. */
int64_t init_delay = bs->TimeStamp - bs->DecodeTimeStamp;
/*
* we also need to know the delay in frames to generate DTS.
*
* compute it based on the init_delay and average frame duration,
* and account for potential rounding errors due to the timebase.
*/
double avg_frame_dur = ((double)pv->job->vrate_base /
(double)pv->job->vrate * 90000.);
pv->bfrm_delay = (init_delay + (avg_frame_dur / 2)) / avg_frame_dur;
if (pv->bfrm_delay < 1 || pv->bfrm_delay > BFRM_DELAY_MAX)
{
hb_log("compute_init_delay: "
"invalid delay %d (PTS: %"PRIu64", DTS: %"PRId64")",
pv->bfrm_delay, bs->TimeStamp, bs->DecodeTimeStamp);
/* we have B-frames, the frame delay should be at least 1 */
if (pv->bfrm_delay < 1)
{
mfxStatus sts;
mfxVideoParam videoParam;
mfxSession session = pv->job->qsv.ctx->mfx_session;
memset(&videoParam, 0, sizeof(mfxVideoParam));
sts = MFXVideoENCODE_GetVideoParam(session, &videoParam);
if (sts != MFX_ERR_NONE)
{
hb_log("compute_init_delay: "
"MFXVideoENCODE_GetVideoParam failed (%d)", sts);
pv->bfrm_delay = 1;
}
else
{
/* usually too large, but should cover all cases */
pv->bfrm_delay = FFMIN(pv->frames_in - 1,
videoParam.mfx.GopRefDist - 1);
}
}
pv->bfrm_delay = FFMIN(BFRM_DELAY_MAX, pv->bfrm_delay);
}
pv->init_delay[0] = pv->init_pts[pv->bfrm_delay] - pv->init_pts[0];
}
else
{
/*
* we can't get the DTS from MSDK, so we need to generate our own.
*
* B-pyramid not possible here, so the delay in frames is always 1.
*/
pv->bfrm_delay = 1;
pv->init_delay[0] = pv->init_pts[1] - pv->init_pts[0];
}
/* The delay only needs to be set once. */
pv->init_delay = NULL;
}
static int qsv_frame_is_key(mfxU16 FrameType)
{
return ((FrameType & MFX_FRAMETYPE_IDR) ||
(FrameType == MFX_FRAMETYPE_UNKNOWN));
}
static void qsv_bitstream_slurp(hb_work_private_t *pv, mfxBitstream *bs)
{
/*
* we need to convert the encoder's Annex B output
* to an MP4-compatible format (ISO/IEC 14496-15).
*/
hb_buffer_t *buf = hb_nal_bitstream_annexb_to_mp4(bs->Data + bs->DataOffset,
bs->DataLength);
if (buf == NULL)
{
hb_error("encqsv: hb_nal_bitstream_annexb_to_mp4 failed");
goto fail;
}
bs->DataLength = bs->DataOffset = 0;
bs->MaxLength = pv->job->qsv.ctx->enc_space->p_buf_max_size;
buf->s.frametype = hb_qsv_frametype_xlat(bs->FrameType, &buf->s.flags);
buf->s.start = buf->s.renderOffset = bs->TimeStamp;
buf->s.stop = buf->s.start + get_frame_duration(pv, buf);
buf->s.duration = buf->s.stop - buf->s.start;
/* compute the init_delay before setting the DTS */
compute_init_delay(pv, bs);
/*
* Generate VFR-compatible output DTS based on input PTS.
*
* Depends on the B-frame delay:
*
* 0: ipts0, ipts1, ipts2...
* 1: ipts0 - ipts1, ipts1 - ipts1, ipts1, ipts2...
* 2: ipts0 - ipts2, ipts1 - ipts2, ipts2 - ipts2, ipts1...
* ...and so on.
*/
if (pv->bfrm_delay)
{
if (pv->frames_out <= pv->bfrm_delay)
{
buf->s.renderOffset = (pv->init_pts[pv->frames_out] -
pv->init_pts[pv->bfrm_delay]);
}
else
{
buf->s.renderOffset = hb_qsv_pop_next_dts(pv->list_dts);
}
}
/* check if B-pyramid is used even though it's disabled */
if ((pv->param.gop.b_pyramid == 0) &&
(bs->FrameType & MFX_FRAMETYPE_B) &&
(bs->FrameType & MFX_FRAMETYPE_REF))
{
hb_log("encqsv: BPyramid off not respected (delay: %d)", pv->bfrm_delay);
/* don't pollute the log unnecessarily */
pv->param.gop.b_pyramid = 1;
}
/* check for PTS < DTS */
if (buf->s.start < buf->s.renderOffset)
{
hb_log("encqsv: PTS %"PRId64" < DTS %"PRId64" for frame %d with type '%s'",
buf->s.start, buf->s.renderOffset, pv->frames_out + 1,
hb_qsv_frametype_name(bs->FrameType));
}
/*
* If we have a chapter marker pending and this frame's PTS
* is at or after the marker's PTS, use it as the chapter start.
*/
if (pv->next_chapter_pts != AV_NOPTS_VALUE &&
pv->next_chapter_pts <= buf->s.start &&
qsv_frame_is_key(bs->FrameType))
{
restore_chapter(pv, buf);
}
hb_list_add(pv->encoded_frames, buf);
pv->frames_out++;
return;
fail:
*pv->job->done_error = HB_ERROR_UNKNOWN;
*pv->job->die = 1;
}
static int qsv_enc_work(hb_work_private_t *pv,
av_qsv_list *qsv_atom,
mfxFrameSurface1 *surface)
{
mfxStatus sts;
av_qsv_context *qsv_ctx = pv->job->qsv.ctx;
av_qsv_space *qsv_enc_space = pv->job->qsv.ctx->enc_space;
do
{
int sync_idx = av_qsv_get_free_sync(qsv_enc_space, qsv_ctx);
if (sync_idx == -1)
{
hb_error("encqsv: av_qsv_get_free_sync failed");
return -1;
}
av_qsv_task *task = av_qsv_list_item(qsv_enc_space->tasks,
pv->async_depth);
do
{
sts = MFXVideoENCODE_EncodeFrameAsync(qsv_ctx->mfx_session,
NULL, surface, task->bs,
qsv_enc_space->p_syncp[sync_idx]->p_sync);
if (sts == MFX_ERR_MORE_DATA || (sts >= MFX_ERR_NONE &&
sts != MFX_WRN_DEVICE_BUSY))
{
if (surface != NULL && !pv->is_sys_mem)
{
ff_qsv_atomic_dec(&surface->Data.Locked);
}
}
if (sts == MFX_ERR_MORE_DATA)
{
if (qsv_atom != NULL)
{
hb_list_add(pv->delayed_processing, qsv_atom);
}
ff_qsv_atomic_dec(&qsv_enc_space->p_syncp[sync_idx]->in_use);
break;
}
else if (sts < MFX_ERR_NONE)
{
hb_error("encqsv: MFXVideoENCODE_EncodeFrameAsync failed (%d)", sts);
return -1;
}
else if (sts == MFX_WRN_DEVICE_BUSY)
{
av_qsv_sleep(10); // device is busy, wait then repeat the call
continue;
}
else
{
av_qsv_stage *new_stage = av_qsv_stage_init();
new_stage->type = AV_QSV_ENCODE;
new_stage->in.p_surface = surface;
new_stage->out.sync = qsv_enc_space->p_syncp[sync_idx];
new_stage->out.p_bs = task->bs;
task->stage = new_stage;
pv->async_depth++;
if (qsv_atom != NULL)
{
av_qsv_add_stagee(&qsv_atom, new_stage, HAVE_THREADS);
}
else
{
/* encode-only or flushing */
av_qsv_list *new_qsv_atom = av_qsv_list_init(HAVE_THREADS);
av_qsv_add_stagee(&new_qsv_atom, new_stage, HAVE_THREADS);
av_qsv_list_add (qsv_ctx->pipes, new_qsv_atom);
}
int i = hb_list_count(pv->delayed_processing);
while (--i >= 0)
{
av_qsv_list *item = hb_list_item(pv->delayed_processing, i);
if (item != NULL)
{
hb_list_rem(pv->delayed_processing, item);
av_qsv_flush_stages(qsv_ctx->pipes, &item);
}
}
break;
}
ff_qsv_atomic_dec(&qsv_enc_space->p_syncp[sync_idx]->in_use);
break;
}
while (sts >= MFX_ERR_NONE);
do
{
if (pv->async_depth == 0) break;
/* we've done enough asynchronous operations or we're flushing */
if (pv->async_depth >= pv->max_async_depth || surface == NULL)
{
av_qsv_task *task = av_qsv_list_item(qsv_enc_space->tasks, 0);
pv->async_depth--;
/* perform a sync operation to get the output bitstream */
av_qsv_wait_on_sync(qsv_ctx, task->stage);
if (task->bs->DataLength > 0)
{
av_qsv_list *pipe = av_qsv_pipe_by_stage(qsv_ctx->pipes,
task->stage);
av_qsv_flush_stages(qsv_ctx->pipes, &pipe);
/* get the encoded frame from the bitstream */
qsv_bitstream_slurp(pv, task->bs);
/* shift for fifo */
if (pv->async_depth)
{
av_qsv_list_rem(qsv_enc_space->tasks, task);
av_qsv_list_add(qsv_enc_space->tasks, task);
}
task->stage = NULL;
}
}
}
while (surface == NULL);
}
while (surface == NULL && sts != MFX_ERR_MORE_DATA);
return 0;
}
static hb_buffer_t* link_buffer_list(hb_list_t *list)
{
hb_buffer_t *buf, *prev = NULL, *out = NULL;
while ((buf = hb_list_item(list, 0)) != NULL)
{
hb_list_rem(list, buf);
if (prev == NULL)
{
prev = out = buf;
}
else
{
prev->next = buf;
prev = buf;
}
}
return out;
}
int encqsvWork(hb_work_object_t *w, hb_buffer_t **buf_in, hb_buffer_t **buf_out)
{
hb_work_private_t *pv = w->private_data;
hb_buffer_t *in = *buf_in;
hb_job_t *job = pv->job;
while (qsv_enc_init(pv) >= 2)
{
av_qsv_sleep(1); // encoding not initialized, wait and repeat the call
}
if (*job->die)
{
goto fail; // unrecoverable error, don't attempt to encode
}
/*
* EOF on input. Flush the decoder, then send the
* EOF downstream to let the muxer know we're done.
*/
if (in->size <= 0)
{
qsv_enc_work(pv, NULL, NULL);
hb_list_add(pv->encoded_frames, in);
*buf_out = link_buffer_list(pv->encoded_frames);
*buf_in = NULL; // don't let 'work_loop' close this buffer
return HB_WORK_DONE;
}
mfxFrameSurface1 *surface = NULL;
av_qsv_list *qsv_atom = NULL;
av_qsv_context *qsv_ctx = job->qsv.ctx;
av_qsv_space *qsv_enc_space = job->qsv.ctx->enc_space;
if (pv->is_sys_mem)
{
mfxFrameInfo *info = &pv->param.videoParam->mfx.FrameInfo;
int surface_index = av_qsv_get_free_surface(qsv_enc_space, qsv_ctx, info,
QSV_PART_ANY);
if (surface_index == -1)
{
hb_error("encqsv: av_qsv_get_free_surface failed");
goto fail;
}
surface = qsv_enc_space->p_surfaces[surface_index];
qsv_yuv420_to_nv12(pv->sws_context_to_nv12, surface, in);
}
else
{
qsv_atom = in->qsv_details.qsv_atom;
surface = av_qsv_get_last_stage(qsv_atom)->out.p_surface;
/*
* QSV decoding fills the QSV context's dts_seq list, we need to
* pop this surface's DTS so dts_seq doesn't grow unnecessarily.
*/
av_qsv_dts_pop(qsv_ctx);
}
/*
* Debugging code to check that the upstream modules have generated
* a continuous, self-consistent frame stream.
*/
if (pv->last_start > in->s.start)
{
hb_log("encqsv: input continuity error, "
"last start %"PRId64" start %"PRId64"",
pv->last_start, in->s.start);
}
pv->last_start = in->s.start;
/* for DTS generation */
if (pv->frames_in <= BFRM_DELAY_MAX)
{
pv->init_pts[pv->frames_in] = in->s.start;
}
if (pv->frames_in)
{
hb_qsv_add_new_dts(pv->list_dts, in->s.start);
}
pv->frames_in++;
/*
* Chapters have to start with a keyframe, so request one here.
*
* Using an mfxEncodeCtrl structure to force key frame generation is not
* possible when using a lookahead and frame reordering, so instead do
* the following before encoding the frame attached to the chapter:
*
* - flush the encoder to encode and retrieve any buffered frames
*
* - do a hard reset (MFXVideoENCODE_Close, then Init) of
* the encoder to make sure the next frame is a keyframe
*
* The hard reset ensures encoding resumes with a clean state, avoiding
* miscellaneous hard-to-disagnose issues that may occur when resuming
* an encode after flushing the encoder or using MFXVideoENCODE_Reset.
*/
if (in->s.new_chap > 0 && job->chapter_markers)
{
mfxStatus sts;
if (qsv_enc_work(pv, NULL, NULL) < 0)
{
goto fail;
}
sts = MFXVideoENCODE_Close(qsv_ctx->mfx_session);
if (sts != MFX_ERR_NONE)
{
hb_error("encqsv: MFXVideoENCODE_Close failed (%d)", sts);
goto fail;
}
sts = MFXVideoENCODE_Init(qsv_ctx->mfx_session, pv->param.videoParam);
if (sts < MFX_ERR_NONE)
{
hb_error("encqsv: MFXVideoENCODE_Init failed (%d)", sts);
goto fail;
}
save_chapter(pv, in);
}
/*
* If interlaced encoding is requested during encoder initialization,
* but the input mfxFrameSurface1 is flagged as progressive here,
* the output bitstream will be progressive (according to MediaInfo).
*
* Assume the user knows what he's doing (say he is e.g. encoding a
* progressive-flagged source using interlaced compression - he may
* well have a good reason to do so; mis-flagged sources do exist).
*/
surface->Info.PicStruct = pv->param.videoParam->mfx.FrameInfo.PicStruct;
surface->Data.TimeStamp = in->s.start;
save_frame_duration(pv, in);
/*
* Now that the input surface is setup, we can encode it.
*/
if (qsv_enc_work(pv, qsv_atom, surface) < 0)
{
goto fail;
}
*buf_out = link_buffer_list(pv->encoded_frames);
return HB_WORK_OK;
fail:
if (*job->done_error == HB_ERROR_NONE)
{
*job->done_error = HB_ERROR_UNKNOWN;
}
*job->die = 1;
*buf_out = NULL;
return HB_WORK_ERROR;
}
#endif // USE_QSV
HandBrake-0.10.2/libhb/audio_remap.c 0000664 0001752 0001752 00000023021 12463330511 017544 0 ustar handbrake handbrake /* audio_remap.c
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage:
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "common.h"
#include "hbffmpeg.h"
#include "audio_remap.h"
// source: libavutil/channel_layout.h
hb_chan_map_t hb_libav_chan_map =
{
{
AV_CH_FRONT_LEFT,
AV_CH_FRONT_RIGHT,
AV_CH_FRONT_CENTER,
AV_CH_LOW_FREQUENCY,
AV_CH_BACK_LEFT,
AV_CH_BACK_RIGHT,
AV_CH_FRONT_LEFT_OF_CENTER,
AV_CH_FRONT_RIGHT_OF_CENTER,
AV_CH_BACK_CENTER,
AV_CH_SIDE_LEFT,
AV_CH_SIDE_RIGHT,
0
}
};
// source: liba52 documentation
hb_chan_map_t hb_liba52_chan_map =
{
{
AV_CH_LOW_FREQUENCY,
AV_CH_FRONT_LEFT,
AV_CH_FRONT_CENTER,
AV_CH_FRONT_RIGHT,
AV_CH_BACK_CENTER,
AV_CH_SIDE_LEFT,
AV_CH_SIDE_RIGHT,
0
}
};
// source: http://xiph.org/vorbis/doc/Vorbis_I_spec.html#x1-800004.3.9
hb_chan_map_t hb_vorbis_chan_map =
{
{
AV_CH_FRONT_LEFT,
AV_CH_FRONT_CENTER,
AV_CH_FRONT_RIGHT,
AV_CH_SIDE_LEFT,
AV_CH_SIDE_RIGHT,
AV_CH_BACK_LEFT,
AV_CH_BACK_CENTER,
AV_CH_BACK_RIGHT,
AV_CH_LOW_FREQUENCY,
0
}
};
// source: https://developer.apple.com/library/mac/#documentation/musicaudio/reference/CoreAudioDataTypesRef/Reference/reference.html
hb_chan_map_t hb_aac_chan_map =
{
{
AV_CH_FRONT_CENTER,
AV_CH_FRONT_LEFT_OF_CENTER,
AV_CH_FRONT_RIGHT_OF_CENTER,
AV_CH_FRONT_LEFT,
AV_CH_FRONT_RIGHT,
AV_CH_SIDE_LEFT,
AV_CH_SIDE_RIGHT,
AV_CH_BACK_LEFT,
AV_CH_BACK_RIGHT,
AV_CH_BACK_CENTER,
AV_CH_LOW_FREQUENCY,
0
}
};
static void remap_planar(uint8_t **samples, int nsamples,
int nchannels, int *remap_table)
{
int ii;
uint8_t *tmp_buf[HB_AUDIO_REMAP_MAX_CHANNELS];
memcpy(tmp_buf, samples, nchannels * sizeof(uint8_t*));
for (ii = 0; ii < nchannels; ii++)
{
samples[ii] = tmp_buf[remap_table[ii]];
}
}
static void remap_u8_interleaved(uint8_t **samples, int nsamples,
int nchannels, int *remap_table)
{
int ii, jj;
uint8_t *samples_u8 = (*samples);
uint8_t tmp_buf[HB_AUDIO_REMAP_MAX_CHANNELS];
for (ii = 0; ii < nsamples; ii++)
{
memcpy(tmp_buf, samples_u8, nchannels * sizeof(uint8_t));
for (jj = 0; jj < nchannels; jj++)
{
samples_u8[jj] = tmp_buf[remap_table[jj]];
}
samples_u8 += nchannels;
}
}
static void remap_s16_interleaved(uint8_t **samples, int nsamples,
int nchannels, int *remap_table)
{
int ii, jj;
int16_t *samples_s16 = (int16_t*)(*samples);
int16_t tmp_buf[HB_AUDIO_REMAP_MAX_CHANNELS];
for (ii = 0; ii < nsamples; ii++)
{
memcpy(tmp_buf, samples_s16, nchannels * sizeof(int16_t));
for (jj = 0; jj < nchannels; jj++)
{
samples_s16[jj] = tmp_buf[remap_table[jj]];
}
samples_s16 += nchannels;
}
}
static void remap_s32_interleaved(uint8_t **samples, int nsamples,
int nchannels, int *remap_table)
{
int ii, jj;
int32_t *samples_s32 = (int32_t*)(*samples);
int32_t tmp_buf[HB_AUDIO_REMAP_MAX_CHANNELS];
for (ii = 0; ii < nsamples; ii++)
{
memcpy(tmp_buf, samples_s32, nchannels * sizeof(int32_t));
for (jj = 0; jj < nchannels; jj++)
{
samples_s32[jj] = tmp_buf[remap_table[jj]];
}
samples_s32 += nchannels;
}
}
static void remap_flt_interleaved(uint8_t **samples, int nsamples,
int nchannels, int *remap_table)
{
int ii, jj;
float *samples_flt = (float*)(*samples);
float tmp_buf[HB_AUDIO_REMAP_MAX_CHANNELS];
for (ii = 0; ii < nsamples; ii++)
{
memcpy(tmp_buf, samples_flt, nchannels * sizeof(float));
for (jj = 0; jj < nchannels; jj++)
{
samples_flt[jj] = tmp_buf[remap_table[jj]];
}
samples_flt += nchannels;
}
}
static void remap_dbl_interleaved(uint8_t **samples, int nsamples,
int nchannels, int *remap_table)
{
int ii, jj;
double *samples_dbl = (double*)(*samples);
double tmp_buf[HB_AUDIO_REMAP_MAX_CHANNELS];
for (ii = 0; ii < nsamples; ii++)
{
memcpy(tmp_buf, samples_dbl, nchannels * sizeof(double));
for (jj = 0; jj < nchannels; jj++)
{
samples_dbl[jj] = tmp_buf[remap_table[jj]];
}
samples_dbl += nchannels;
}
}
hb_audio_remap_t* hb_audio_remap_init(enum AVSampleFormat sample_fmt,
hb_chan_map_t *channel_map_out,
hb_chan_map_t *channel_map_in)
{
hb_audio_remap_t *remap = calloc(1, sizeof(hb_audio_remap_t));
if (remap == NULL)
{
hb_error("hb_audio_remap_init: failed to allocate remap");
goto fail;
}
// sample format
switch (sample_fmt)
{
case AV_SAMPLE_FMT_U8P:
case AV_SAMPLE_FMT_S16P:
case AV_SAMPLE_FMT_S32P:
case AV_SAMPLE_FMT_FLTP:
case AV_SAMPLE_FMT_DBLP:
remap->remap = &remap_planar;
break;
case AV_SAMPLE_FMT_U8:
remap->remap = &remap_u8_interleaved;
break;
case AV_SAMPLE_FMT_S16:
remap->remap = &remap_s16_interleaved;
break;
case AV_SAMPLE_FMT_S32:
remap->remap = &remap_s32_interleaved;
break;
case AV_SAMPLE_FMT_FLT:
remap->remap = &remap_flt_interleaved;
break;
case AV_SAMPLE_FMT_DBL:
remap->remap = &remap_dbl_interleaved;
break;
default:
hb_error("hb_audio_remap_init: unsupported sample format '%s'",
av_get_sample_fmt_name(sample_fmt));
goto fail;
}
// input/output channel order
if (channel_map_in == NULL || channel_map_out == NULL)
{
hb_error("hb_audio_remap_init: invalid channel map(s)");
goto fail;
}
remap->channel_map_in = channel_map_in;
remap->channel_map_out = channel_map_out;
// remap can't be done until the channel layout has been set
remap->remap_needed = 0;
return remap;
fail:
hb_audio_remap_free(remap);
return NULL;
}
void hb_audio_remap_set_channel_layout(hb_audio_remap_t *remap,
uint64_t channel_layout)
{
if (remap != NULL)
{
int ii;
remap->remap_needed = 0;
// sanitize the layout
if (channel_layout == AV_CH_LAYOUT_STEREO_DOWNMIX)
{
channel_layout = AV_CH_LAYOUT_STEREO;
}
remap->nchannels = av_get_channel_layout_nb_channels(channel_layout);
// in some cases, remapping is not necessary and/or supported
if (remap->nchannels > HB_AUDIO_REMAP_MAX_CHANNELS)
{
hb_log("hb_audio_remap_set_channel_layout: too many channels (%d)",
remap->nchannels);
return;
}
if (remap->channel_map_in == remap->channel_map_out)
{
return;
}
// build the table and check whether remapping is necessary
hb_audio_remap_build_table(remap->channel_map_out,
remap->channel_map_in, channel_layout,
remap->table);
for (ii = 0; ii < remap->nchannels; ii++)
{
if (remap->table[ii] != ii)
{
remap->remap_needed = 1;
break;
}
}
}
}
void hb_audio_remap_free(hb_audio_remap_t *remap)
{
if (remap != NULL)
{
free(remap);
}
}
void hb_audio_remap(hb_audio_remap_t *remap, uint8_t **samples, int nsamples)
{
if (remap != NULL && remap->remap_needed)
{
remap->remap(samples, nsamples, remap->nchannels, remap->table);
}
}
void hb_audio_remap_build_table(hb_chan_map_t *channel_map_out,
hb_chan_map_t *channel_map_in,
uint64_t channel_layout,
int *remap_table)
{
int ii, jj, nchannels, out_chan_idx, remap_idx;
uint64_t *channels_in, *channels_out;
if (channel_layout == AV_CH_LAYOUT_STEREO_DOWNMIX)
{
// Dolby Surround is Stereo when it comes to remapping
channel_layout = AV_CH_LAYOUT_STEREO;
}
nchannels = av_get_channel_layout_nb_channels(channel_layout);
// clear remap table before (re-)building it
memset(remap_table, 0, nchannels * sizeof(int));
out_chan_idx = 0;
channels_in = channel_map_in ->channel_order_map;
channels_out = channel_map_out->channel_order_map;
for (ii = 0; channels_out[ii] && out_chan_idx < nchannels; ii++)
{
if (channel_layout & channels_out[ii])
{
remap_idx = 0;
for (jj = 0; channels_in[jj] && remap_idx < nchannels; jj++)
{
if (channels_out[ii] == channels_in[jj])
{
remap_table[out_chan_idx++] = remap_idx++;
break;
}
else if (channel_layout & channels_in[jj])
{
remap_idx++;
}
}
}
}
}
HandBrake-0.10.2/libhb/param.c 0000664 0001752 0001752 00000022031 12515471026 016364 0 ustar handbrake handbrake /* param.c
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit
* http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include
/* NL-means presets and tunes
*
* Presets adjust strength:
* ultralight - visually transparent
* light
* medium
* strong
*
* Tunes adjust settings to the specified content type:
* none
* film - most content, live action
* grain - like film but preserves luma grain
* highmotion - like film but avoids color smearing with stronger settings
* animation - cel animation such as cartoons, anime
*/
static char * generate_nlmeans_settings(const char *preset, const char *tune)
{
char *opt = NULL;
if (preset == NULL)
return NULL;
if (!strcasecmp(preset, "ultralight") ||
!strcasecmp(preset, "light") ||
!strcasecmp(preset, "medium") ||
!strcasecmp(preset, "strong"))
{
double strength[2],
origin_tune[2];
int patch_size[2],
range[2],
frames[2],
prefilter[2];
if (tune == NULL || !strcasecmp(tune, "none"))
{
strength[0] = strength[1] = 6;
origin_tune[0] = origin_tune[1] = 1;
patch_size[0] = patch_size[1] = 7;
range[0] = range[1] = 3;
frames[0] = frames[1] = 2;
prefilter[0] = prefilter[1] = 0;
if (!strcasecmp(preset, "ultralight"))
{
strength[0] = strength[1] = 1.5;
}
else if (!strcasecmp(preset, "light"))
{
strength[0] = strength[1] = 3;
}
else if (!strcasecmp(preset, "strong"))
{
strength[0] = strength[1] = 10;
}
}
else if (!strcasecmp(tune, "film"))
{
strength[0] = 6; strength[1] = 8;
origin_tune[0] = origin_tune[1] = 0.8;
patch_size[0] = patch_size[1] = 7;
range[0] = range[1] = 3;
frames[0] = frames[1] = 2;
prefilter[0] = prefilter[1] = 0;
if (!strcasecmp(preset, "ultralight"))
{
strength[0] = 1.5; strength[1] = 2.4;
origin_tune[0] = 0.9; origin_tune[1] = 0.9;
}
else if (!strcasecmp(preset, "light"))
{
strength[0] = 3; strength[1] = 4;
origin_tune[0] = 0.9; origin_tune[1] = 0.9;
}
else if (!strcasecmp(preset, "strong"))
{
strength[0] = 8; strength[1] = 10;
origin_tune[0] = 0.6; origin_tune[1] = 0.6;
}
}
else if (!strcasecmp(tune, "grain"))
{
strength[0] = 0; strength[1] = 6;
origin_tune[0] = origin_tune[1] = 0.8;
patch_size[0] = patch_size[1] = 7;
range[0] = range[1] = 3;
frames[0] = frames[1] = 2;
prefilter[0] = prefilter[1] = 0;
if (!strcasecmp(preset, "ultralight"))
{
strength[0] = 0; strength[1] = 2.4;
origin_tune[0] = 0.9; origin_tune[1] = 0.9;
}
else if (!strcasecmp(preset, "light"))
{
strength[0] = 0; strength[1] = 3.5;
origin_tune[0] = 0.9; origin_tune[1] = 0.9;
}
else if (!strcasecmp(preset, "strong"))
{
strength[0] = 0; strength[1] = 8;
origin_tune[0] = 0.6; origin_tune[1] = 0.6;
}
}
else if (!strcasecmp(tune, "highmotion"))
{
strength[0] = 6; strength[1] = 6;
origin_tune[0] = 0.8; origin_tune[1] = 0.7;
patch_size[0] = 7; patch_size[1] = 7;
range[0] = 3; range[1] = 5;
frames[0] = 2; frames[1] = 1;
prefilter[0] = 0; prefilter[1] = 0;
if (!strcasecmp(preset, "ultralight"))
{
strength[0] = 1.5; strength[1] = 2.4;
origin_tune[0] = 0.9; origin_tune[1] = 0.9;
}
else if (!strcasecmp(preset, "light"))
{
strength[0] = 3; strength[1] = 3.25;
origin_tune[0] = 0.9; origin_tune[1] = 0.8;
}
else if (!strcasecmp(preset, "strong"))
{
strength[0] = 8; strength[1] = 6.75;
origin_tune[0] = 0.6; origin_tune[1] = 0.5;
}
}
else if (!strcasecmp(tune, "animation"))
{
strength[0] = 5; strength[1] = 4;
origin_tune[0] = origin_tune[1] = 0.15;
patch_size[0] = patch_size[1] = 5;
range[0] = range[1] = 7;
frames[0] = frames[1] = 4;
prefilter[0] = prefilter[1] = 0;
if (!strcasecmp(preset, "ultralight"))
{
strength[0] = 2.5; strength[1] = 2;
frames[0] = 2; frames[1] = 2;
}
else if (!strcasecmp(preset, "light"))
{
strength[0] = 3; strength[1] = 2.25;
frames[0] = 3; frames[1] = 3;
}
else if (!strcasecmp(preset, "strong"))
{
strength[0] = 10; strength[1] = 8;
}
}
else
{
fprintf(stderr, "Unrecognized nlmeans tune (%s).\n", tune);
return NULL;
}
opt = hb_strdup_printf("%lf:%lf:%d:%d:%d:%d:%lf:%lf:%d:%d:%d:%d",
strength[0], origin_tune[0], patch_size[0],
range[0], frames[0], prefilter[0],
strength[1], origin_tune[1], patch_size[1],
range[1], frames[1], prefilter[1]);
}
else
{
opt = strdup(preset);
if (tune != NULL)
{
fprintf(stderr, "Custom nlmeans parameters specified; ignoring nlmeans tune (%s).\n", tune);
}
}
return opt;
}
/* HQDN3D presets
*
* Presets adjust strength:
* ultralight - visually transparent
* light
* medium
* strong
*/
static char * generate_hqdn3d_settings(const char *preset, const char *tune)
{
if (preset == NULL)
return NULL;
if (!strcasecmp(preset, "strong"))
return strdup("7:7:7:5:5:5");
else if (!strcasecmp(preset, "medium"))
return strdup("3:2:2:2:3:3");
else if (!strcasecmp(preset, "light") || !strcasecmp(preset, "weak"))
return strdup("2:1:1:2:3:3");
else if (!strcasecmp(preset, "ultralight"))
return strdup("1:0.7:0.7:1:2:2");
else
return strdup(preset);
}
int hb_validate_param_string(const char *regex_pattern, const char *param_string)
{
regex_t regex_temp;
if (regcomp(®ex_temp, regex_pattern, REG_EXTENDED) == 0)
{
if (regexec(®ex_temp, param_string, 0, NULL, 0) == 0)
{
regfree(®ex_temp);
return 0;
}
}
else
{
fprintf(stderr, "hb_validate_param_string: Error compiling regex for pattern (%s).\n", param_string);
}
regfree(®ex_temp);
return 1;
}
int hb_validate_filter_settings(int filter_id, const char *filter_param)
{
// Regex matches "number" followed by one or more ":number", where number is uint or ufloat
const char *hb_colon_separated_params_regex = "^((([0-9]+([.,][0-9]+)?)|([.,][0-9]+))((:(([0-9]+([.,][0-9]+)?)|([.,][0-9]+)))+)?)$";
const char *regex_pattern = NULL;
switch (filter_id)
{
case HB_FILTER_NLMEANS:
case HB_FILTER_HQDN3D:
if (filter_param == NULL)
{
return 0;
}
regex_pattern = hb_colon_separated_params_regex;
break;
default:
fprintf(stderr, "hb_validate_filter_settings: Unrecognized filter (%d).\n",
filter_id);
return 1;
break;
}
if (hb_validate_param_string(regex_pattern, filter_param) == 0)
{
return 0;
}
return 1;
}
char * hb_generate_filter_settings(int filter_id, const char *preset, const char *tune)
{
char *filter_param = NULL;
switch (filter_id)
{
case HB_FILTER_NLMEANS:
filter_param = generate_nlmeans_settings(preset, tune);
break;
case HB_FILTER_HQDN3D:
filter_param = generate_hqdn3d_settings(preset, tune);
break;
default:
fprintf(stderr, "hb_generate_filter_settings: Unrecognized filter (%d).\n",
filter_id);
break;
}
if (hb_validate_filter_settings(filter_id, filter_param) == 0)
{
return filter_param;
}
return NULL;
}
HandBrake-0.10.2/libhb/project.h.m4 0000664 0001752 0001752 00000003354 11200722756 017264 0 ustar handbrake handbrake changequote(<<, >>)dnl
include(<>)dnl
dnl
dnl
dnl
#ifndef HB_PROJECT_H
#define HB_PROJECT_H
<<#>>define HB_PROJECT_TITLE "__HB_title"
<<#>>define HB_PROJECT_NAME "__HB_name"
<<#>>define HB_PROJECT_NAME_LOWER "__HB_name_lower"
<<#>>define HB_PROJECT_NAME_UPPER "__HB_name_upper"
<<#>>define HB_PROJECT_URL_WEBSITE "__HB_url_website"
<<#>>define HB_PROJECT_URL_COMMUNITY "__HB_url_community"
<<#>>define HB_PROJECT_URL_IRC "__HB_url_irc"
<<#>>define HB_PROJECT_URL_APPCAST "__HB_url_appcast"
<<#>>define HB_PROJECT_VERSION_MAJOR __HB_version_major
<<#>>define HB_PROJECT_VERSION_MINOR __HB_version_minor
<<#>>define HB_PROJECT_VERSION_POINT __HB_version_point
<<#>>define HB_PROJECT_VERSION "__HB_version"
<<#>>define HB_PROJECT_VERSION_HEX 0x<<>>__HB_version_hex<<>>LL
<<#>>define HB_PROJECT_BUILD __HB_build
<<#>>define HB_PROJECT_REPO_URL "__HB_repo_url"
<<#>>define HB_PROJECT_REPO_ROOT "__HB_repo_root"
<<#>>define HB_PROJECT_REPO_UUID "__HB_repo_uuid"
<<#>>define HB_PROJECT_REPO_REV __HB_repo_rev
<<#>>define HB_PROJECT_REPO_DATE "__HB_repo_date"
<<#>>define HB_PROJECT_REPO_OFFICIAL __HB_repo_official
<<#>>define HB_PROJECT_REPO_TYPE "__HB_repo_type"
<<#>>define HB_PROJECT_BUILD_SPEC "__BUILD_spec"
<<#>>define HB_PROJECT_BUILD_MACHINE "__BUILD_machine"
<<#>>define HB_PROJECT_BUILD_VENDOR "__BUILD_vendor"
<<#>>define HB_PROJECT_BUILD_SYSTEM "__BUILD_system"
<<#>>define HB_PROJECT_BUILD_SYSTEMF "__BUILD_systemf"
<<#>>define HB_PROJECT_BUILD_RELEASE "__BUILD_release"
<<#>>define HB_PROJECT_BUILD_TITLE "__BUILD_title"
<<#>>define HB_PROJECT_BUILD_ARCH "__BUILD_arch"
#endif /* HB_PROJECT_PROJECT_H */
HandBrake-0.10.2/libhb/audio_remap.h 0000664 0001752 0001752 00000006355 12463330511 017564 0 ustar handbrake handbrake /* audio_remap.h
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage:
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/* This file handles the following two scenarios:
*
* 1) remapping audio from decoder order to libav order (for downmixing)
*
* 2) remapping audio from libav order to encoder order (for encoding)
*
* We only need to support:
*
* a) channels found in our non-libavcodec audio decoders' layouts
* b) channels found in HB_AMIXDOWN_* layouts
*
* We consider that:
*
* Left/Right Surround == Side Left/Right
* Left/Right Rear Surround == Back Left/Right */
#ifndef AUDIO_REMAP_H
#define AUDIO_REMAP_H
#include
#include "libavutil/samplefmt.h"
/* we only need to support the 11 "most common" channels */
#define HB_AUDIO_REMAP_MAX_CHANNELS 11
typedef struct
{
uint64_t channel_order_map[HB_AUDIO_REMAP_MAX_CHANNELS + 1];
} hb_chan_map_t;
typedef struct
{
int nchannels;
int remap_needed;
hb_chan_map_t *channel_map_in;
hb_chan_map_t *channel_map_out;
int table[HB_AUDIO_REMAP_MAX_CHANNELS];
void (*remap)(uint8_t **samples, int nsamples,
int nchannels, int *remap_table);
} hb_audio_remap_t;
/*
* Predefined channel maps for common channel orders.
*/
extern hb_chan_map_t hb_libav_chan_map;
extern hb_chan_map_t hb_liba52_chan_map;
extern hb_chan_map_t hb_vorbis_chan_map;
extern hb_chan_map_t hb_aac_chan_map;
/*
* Initialize an hb_audio_remap_t to remap audio with the specified sample
* format, from the input to the output channel order (indicated by
* channel_map_in and channel_map_out, respectively).
*/
hb_audio_remap_t* hb_audio_remap_init(enum AVSampleFormat sample_fmt,
hb_chan_map_t *channel_map_out,
hb_chan_map_t *channel_map_in);
/*
* Updates an hb_audio_remap_t's number of channels and remap table to work with
* the specified channel layout.
*
* Must be called at least once before remapping.
*/
void hb_audio_remap_set_channel_layout(hb_audio_remap_t *remap,
uint64_t channel_layout);
/*
* Free an hb_audio_remap_t.
*/
void hb_audio_remap_free(hb_audio_remap_t *remap);
/*
* Remap audio between 2 different channel orders, using the settings specified
* in the remap paremeter. Remapping is only done when necessary.
*
* The remap parameter can be NULL (no remapping).
*/
void hb_audio_remap(hb_audio_remap_t *remap, uint8_t **samples,
int nsamples);
/*
* Generate a table used to remap audio between 2 different channel orders.
*
* Usage: output_sample[channel_idx] = input_sample[remap_table[channel_idx]]
*
* remap_table is allocated by the caller.
*/
void hb_audio_remap_build_table(hb_chan_map_t *channel_map_out,
hb_chan_map_t *channel_map_in,
uint64_t channel_layout,
int *remap_table);
#endif /* AUDIO_REMAP_H */
HandBrake-0.10.2/libhb/dxva2api.h 0000664 0001752 0001752 00000102750 12463330511 017011 0 ustar handbrake handbrake /* dxva2api.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifndef _DXVA2API_H
#define _DXVA2API_H
#ifdef USE_HWD
#define MINGW_DXVA2API_H_VERSION (2)
#if __GNUC__ >= 3
#pragma GCC system_header
#endif
#include
#include
/* Define it to allow using nameless struct/union (non C99 compliant) to match
* the official documentation. */
//#define DXVA2API_USE_BITFIELDS
/****************STRUCTURES******************/
#pragma pack(push, 1)
#define DXVA2API_USE_BITFIELDS
typedef struct _DXVA2_ExtendedFormat {
#ifdef DXVA2API_USE_BITFIELDS
union {
struct {
UINT SampleFormat : 8;
UINT VideoChromaSubsampling : 4;
UINT NominalRange : 3;
UINT VideoTransferMatrix : 3;
UINT VideoLighting : 4;
UINT VideoPrimaries : 5;
UINT VideoTransferFunction : 5;
};
UINT value;
};
#else
UINT value;
#endif
} DXVA2_ExtendedFormat;
typedef struct _DXVA2_Frequency {
UINT Numerator;
UINT Denominator;
} DXVA2_Frequency;
typedef struct _DXVA2_VideoDesc {
UINT SampleWidth;
UINT SampleHeight;
DXVA2_ExtendedFormat SampleFormat;
D3DFORMAT Format;
DXVA2_Frequency InputSampleFreq;
DXVA2_Frequency OutputFrameFreq;
UINT UABProtectionLevel;
UINT Reserved;
} DXVA2_VideoDesc;
typedef struct _DXVA2_ConfigPictureDecode {
GUID guidConfigBitstreamEncryption;
GUID guidConfigMBcontrolEncryption;
GUID guidConfigResidDiffEncryption;
UINT ConfigBitstreamRaw;
UINT ConfigMBcontrolRasterOrder;
UINT ConfigResidDiffHost;
UINT ConfigSpatialResid8;
UINT ConfigResid8Subtraction;
UINT ConfigSpatialHost8or9Clipping;
UINT ConfigSpatialResidInterleaved;
UINT ConfigIntraResidUnsigned;
UINT ConfigResidDiffAccelerator;
UINT ConfigHostInverseScan;
UINT ConfigSpecificIDCT;
UINT Config4GroupedCoefs;
USHORT ConfigMinRenderTargetBuffCount;
USHORT ConfigDecoderSpecific;
} DXVA2_ConfigPictureDecode;
typedef struct _DXVA2_DecodeBufferDesc {
DWORD CompressedBufferType;
UINT BufferIndex;
UINT DataOffset;
UINT DataSize;
UINT FirstMBaddress;
UINT NumMBsInBuffer;
UINT Width;
UINT Height;
UINT Stride;
UINT ReservedBits;
PVOID pvPVPState;
} DXVA2_DecodeBufferDesc;
typedef struct _DXVA2_DecodeExtensionData {
UINT Function;
PVOID pPrivateInputData;
UINT PrivateInputDataSize;
PVOID pPrivateOutputData;
UINT PrivateOutputDataSize;
} DXVA2_DecodeExtensionData;
typedef struct _DXVA2_DecodeExecuteParams {
UINT NumCompBuffers;
DXVA2_DecodeBufferDesc *pCompressedBuffers;
DXVA2_DecodeExtensionData *pExtensionData;
} DXVA2_DecodeExecuteParams;
enum {
DXVA2_VideoDecoderRenderTarget = 0,
DXVA2_VideoProcessorRenderTarget= 1,
DXVA2_VideoSoftwareRenderTarget = 2
};
enum {
DXVA2_PictureParametersBufferType = 0,
DXVA2_MacroBlockControlBufferType = 1,
DXVA2_ResidualDifferenceBufferType = 2,
DXVA2_DeblockingControlBufferType = 3,
DXVA2_InverseQuantizationMatrixBufferType = 4,
DXVA2_SliceControlBufferType = 5,
DXVA2_BitStreamDateBufferType = 6,
DXVA2_MotionVectorBuffer = 7,
DXVA2_FilmGrainBuffer = 8
};
/* DXVA MPEG-I/II and VC-1 */
typedef struct _DXVA_PictureParameters {
USHORT wDecodedPictureIndex;
USHORT wDeblockedPictureIndex;
USHORT wForwardRefPictureIndex;
USHORT wBackwardRefPictureIndex;
USHORT wPicWidthInMBminus1;
USHORT wPicHeightInMBminus1;
UCHAR bMacroblockWidthMinus1;
UCHAR bMacroblockHeightMinus1;
UCHAR bBlockWidthMinus1;
UCHAR bBlockHeightMinus1;
UCHAR bBPPminus1;
UCHAR bPicStructure;
UCHAR bSecondField;
UCHAR bPicIntra;
UCHAR bPicBackwardPrediction;
UCHAR bBidirectionalAveragingMode;
UCHAR bMVprecisionAndChromaRelation;
UCHAR bChromaFormat;
UCHAR bPicScanFixed;
UCHAR bPicScanMethod;
UCHAR bPicReadbackRequests;
UCHAR bRcontrol;
UCHAR bPicSpatialResid8;
UCHAR bPicOverflowBlocks;
UCHAR bPicExtrapolation;
UCHAR bPicDeblocked;
UCHAR bPicDeblockConfined;
UCHAR bPic4MVallowed;
UCHAR bPicOBMC;
UCHAR bPicBinPB;
UCHAR bMV_RPS;
UCHAR bReservedBits;
USHORT wBitstreamFcodes;
USHORT wBitstreamPCEelements;
UCHAR bBitstreamConcealmentNeed;
UCHAR bBitstreamConcealmentMethod;
} DXVA_PictureParameters, *LPDXVA_PictureParameters;
typedef struct _DXVA_QmatrixData {
BYTE bNewQmatrix[4];
WORD Qmatrix[4][8 * 8];
} DXVA_QmatrixData, *LPDXVA_QmatrixData;
typedef struct _DXVA_SliceInfo {
USHORT wHorizontalPosition;
USHORT wVerticalPosition;
UINT dwSliceBitsInBuffer;
UINT dwSliceDataLocation;
UCHAR bStartCodeBitOffset;
UCHAR bReservedBits;
USHORT wMBbitOffset;
USHORT wNumberMBsInSlice;
USHORT wQuantizerScaleCode;
USHORT wBadSliceChopping;
} DXVA_SliceInfo, *LPDXVA_SliceInfo;
/* DXVA H264 */
typedef struct {
#ifdef DXVA2API_USE_BITFIELDS
union {
struct {
UCHAR Index7Bits : 7;
UCHAR AssociatedFlag : 1;
};
UCHAR bPicEntry;
};
#else
UCHAR bPicEntry;
#endif
} DXVA_PicEntry_H264;
typedef struct {
USHORT wFrameWidthInMbsMinus1;
USHORT wFrameHeightInMbsMinus1;
DXVA_PicEntry_H264 CurrPic;
UCHAR num_ref_frames;
#ifdef DXVA2API_USE_BITFIELDS
union {
struct {
USHORT field_pic_flag : 1;
USHORT MbaffFrameFlag : 1;
USHORT residual_colour_transform_flag : 1;
USHORT sp_for_switch_flag : 1;
USHORT chroma_format_idc : 2;
USHORT RefPicFlag : 1;
USHORT constrained_intra_pred_flag : 1;
USHORT weighted_pred_flag : 1;
USHORT weighted_bipred_idc : 2;
USHORT MbsConsecutiveFlag : 1;
USHORT frame_mbs_only_flag : 1;
USHORT transform_8x8_mode_flag : 1;
USHORT MinLumaBipredSize8x8Flag : 1;
USHORT IntraPicFlag : 1;
};
USHORT wBitFields;
};
#else
USHORT wBitFields;
#endif
UCHAR bit_depth_luma_minus8;
UCHAR bit_depth_chroma_minus8;
USHORT Reserved16Bits;
UINT StatusReportFeedbackNumber;
DXVA_PicEntry_H264 RefFrameList[16];
INT CurrFieldOrderCnt[2];
INT FieldOrderCntList[16][2];
CHAR pic_init_qs_minus26;
CHAR chroma_qp_index_offset;
CHAR second_chroma_qp_index_offset;
UCHAR ContinuationFlag;
CHAR pic_init_qp_minus26;
UCHAR num_ref_idx_l0_active_minus1;
UCHAR num_ref_idx_l1_active_minus1;
UCHAR Reserved8BitsA;
USHORT FrameNumList[16];
UINT UsedForReferenceFlags;
USHORT NonExistingFrameFlags;
USHORT frame_num;
UCHAR log2_max_frame_num_minus4;
UCHAR pic_order_cnt_type;
UCHAR log2_max_pic_order_cnt_lsb_minus4;
UCHAR delta_pic_order_always_zero_flag;
UCHAR direct_8x8_inference_flag;
UCHAR entropy_coding_mode_flag;
UCHAR pic_order_present_flag;
UCHAR num_slice_groups_minus1;
UCHAR slice_group_map_type;
UCHAR deblocking_filter_control_present_flag;
UCHAR redundant_pic_cnt_present_flag;
UCHAR Reserved8BitsB;
USHORT slice_group_change_rate_minus1;
UCHAR SliceGroupMap[810];
} DXVA_PicParams_H264;
typedef struct {
UCHAR bScalingLists4x4[6][16];
UCHAR bScalingLists8x8[2][64];
} DXVA_Qmatrix_H264;
typedef struct {
UINT BSNALunitDataLocation;
UINT SliceBytesInBuffer;
USHORT wBadSliceChopping;
USHORT first_mb_in_slice;
USHORT NumMbsForSlice;
USHORT BitOffsetToSliceData;
UCHAR slice_type;
UCHAR luma_log2_weight_denom;
UCHAR chroma_log2_weight_denom;
UCHAR num_ref_idx_l0_active_minus1;
UCHAR num_ref_idx_l1_active_minus1;
CHAR slice_alpha_c0_offset_div2;
CHAR slice_beta_offset_div2;
UCHAR Reserved8Bits;
DXVA_PicEntry_H264 RefPicList[2][32];
SHORT Weights[2][32][3][2];
CHAR slice_qs_delta;
CHAR slice_qp_delta;
UCHAR redundant_pic_cnt;
UCHAR direct_spatial_mv_pred_flag;
UCHAR cabac_init_idc;
UCHAR disable_deblocking_filter_idc;
USHORT slice_id;
} DXVA_Slice_H264_Long;
typedef struct {
UINT BSNALunitDataLocation;
UINT SliceBytesInBuffer;
USHORT wBadSliceChopping;
} DXVA_Slice_H264_Short;
typedef struct {
USHORT wFrameWidthInMbsMinus1;
USHORT wFrameHeightInMbsMinus1;
DXVA_PicEntry_H264 InPic;
DXVA_PicEntry_H264 OutPic;
USHORT PicOrderCnt_offset;
INT CurrPicOrderCnt;
UINT StatusReportFeedbackNumber;
UCHAR model_id;
UCHAR separate_colour_description_present_flag;
UCHAR film_grain_bit_depth_luma_minus8;
UCHAR film_grain_bit_depth_chroma_minus8;
UCHAR film_grain_full_range_flag;
UCHAR film_grain_colour_primaries;
UCHAR film_grain_transfer_characteristics;
UCHAR film_grain_matrix_coefficients;
UCHAR blending_mode_id;
UCHAR log2_scale_factor;
UCHAR comp_model_present_flag[4];
UCHAR num_intensity_intervals_minus1[4];
UCHAR num_model_values_minus1[4];
UCHAR intensity_interval_lower_bound[3][16];
UCHAR intensity_interval_upper_bound[3][16];
SHORT comp_model_value[3][16][8];
} DXVA_FilmGrainChar_H264;
typedef struct {
union {
struct {
USHORT Fraction;
SHORT Value;
};
LONG ll;
};
}DXVA2_Fixed32;
typedef struct {
UCHAR Cr;
UCHAR Cb;
UCHAR Y;
UCHAR Alpha;
}DXVA2_AYUVSample8;
typedef struct {
USHORT Cr;
USHORT Cb;
USHORT Y;
USHORT Alpha;
}DXVA2_AYUVSample16;
typedef struct {
DXVA2_Fixed32 MinValue;
DXVA2_Fixed32 MaxValue;
DXVA2_Fixed32 DefaultValue;
DXVA2_Fixed32 StepSize;
}DXVA2_ValueRange;
typedef struct {
DXVA2_Fixed32 Brightness;
DXVA2_Fixed32 Contrast;
DXVA2_Fixed32 Hue;
DXVA2_Fixed32 Saturation;
}DXVA2_ProcAmpValues;
typedef struct {
DXVA2_Fixed32 Level;
DXVA2_Fixed32 Threshold;
DXVA2_Fixed32 Radius;
}DXVA2_FilterValues;
typedef struct {
UINT DeviceCaps;
D3DPOOL InputPool;
UINT NumForwardRefSamples;
UINT NumBackwardRefSamples;
UINT Reserved;
UINT DeinterlaceTechnology;
UINT ProcAmpControlCaps;
UINT VideoProcessorOperations;
UINT NoiseFilterTechnology;
UINT DetailFilterTechnology;
}DXVA2_VideoProcessorCaps;
#ifndef _REFERENCE_TIME_
#define _REFERENCE_TIME_
typedef long long int64_t;
typedef int64_t REFERENCE_TIME;
#endif
typedef struct {
REFERENCE_TIME Start;
REFERENCE_TIME End;
DXVA2_ExtendedFormat SampleFormat;
IDirect3DSurface9 *SrcSurface;
RECT SrcRect;
RECT DstRect;
DXVA2_AYUVSample8 Pal[16];
DXVA2_Fixed32 PlanarAlpha;
DWORD SampleData;
}DXVA2_VideoSample;
typedef struct {
REFERENCE_TIME TargetFrame;
RECT TargetRect;
SIZE ConstrictionSize;
UINT StreamingFlags;
DXVA2_AYUVSample16 BackgroundColor;
DXVA2_ExtendedFormat DestFormat;
DXVA2_ProcAmpValues ProcAmpValues;
DXVA2_Fixed32 Alpha;
DXVA2_FilterValues NoiseFilterLuma;
DXVA2_FilterValues NoiseFilterChroma;
DXVA2_FilterValues DetailFilterLuma;
DXVA2_FilterValues DetailFilterChroma;
DWORD DestData;
} DXVA2_VideoProcessBltParams;
#pragma pack(pop)
/*************INTERFACES************/
#ifdef __cplusplus
extern "C" {
#endif
#define _COM_interface struct
typedef _COM_interface IDirectXVideoDecoderService IDirectXVideoDecoderService;
typedef _COM_interface IDirectXVideoDecoder IDirectXVideoDecoder;
#undef INTERFACE
#define INTERFACE IDirectXVideoDecoder
DECLARE_INTERFACE_( IDirectXVideoDecoder, IUnknown )
{
STDMETHOD( QueryInterface ) ( THIS_ REFIID, PVOID* ) PURE;
STDMETHOD_( ULONG, AddRef ) ( THIS ) PURE;
STDMETHOD_( ULONG, Release ) ( THIS ) PURE;
STDMETHOD( GetVideoDecoderService ) ( THIS_ IDirectXVideoDecoderService** ) PURE;
STDMETHOD( GetCreationParameters ) ( THIS_ GUID*, DXVA2_VideoDesc*, DXVA2_ConfigPictureDecode*, IDirect3DSurface9***, UINT* ) PURE;
STDMETHOD( GetBuffer ) ( THIS_ UINT, void**, UINT* ) PURE;
STDMETHOD( ReleaseBuffer ) ( THIS_ UINT ) PURE;
STDMETHOD( BeginFrame ) ( THIS_ IDirect3DSurface9 *, void* ) PURE;
STDMETHOD( EndFrame ) ( THIS_ HANDLE * ) PURE;
STDMETHOD( Execute ) ( THIS_ const DXVA2_DecodeExecuteParams* ) PURE;
};
#if !defined(__cplusplus) || defined(CINTERFACE)
#define IDirectXVideoDecoder_QueryInterface( p, a, b ) (p)->lpVtbl->QueryInterface( p, a, b )
#define IDirectXVideoDecoder_AddRef( p ) (p)->lpVtbl->AddRef( p )
#define IDirectXVideoDecoder_Release( p ) (p)->lpVtbl->Release( p )
#define IDirectXVideoDecoder_BeginFrame( p, a, b ) (p)->lpVtbl->BeginFrame( p, a, b )
#define IDirectXVideoDecoder_EndFrame( p, a ) (p)->lpVtbl->EndFrame( p, a )
#define IDirectXVideoDecoder_Execute( p, a ) (p)->lpVtbl->Execute( p, a )
#define IDirectXVideoDecoder_GetBuffer( p, a, b, c ) (p)->lpVtbl->GetBuffer( p, a, b, c )
#define IDirectXVideoDecoder_GetCreationParameters( p, a, b, c, d, e ) (p)->lpVtbl->GetCreationParameters( p, a, b, c, d, e )
#define IDirectXVideoDecoder_GetVideoDecoderService( p, a ) (p)->lpVtbl->GetVideoDecoderService( p, a )
#define IDirectXVideoDecoder_ReleaseBuffer( p, a ) (p)->lpVtbl->ReleaseBuffer( p, a )
#else
#define IDirectXVideoDecoder_QueryInterface( p, a, b ) (p)->QueryInterface( a, b )
#define IDirectXVideoDecoder_AddRef( p ) (p)->AddRef()
#define IDirectXVideoDecoder_Release( p ) (p)->Release()
#define IDirectXVideoDecoder_BeginFrame( p, a, b ) (p)->BeginFrame( a, b )
#define IDirectXVideoDecoder_EndFrame( p, a ) (p)->EndFrame( a )
#define IDirectXVideoDecoder_Execute( p, a ) (p)->Execute( a )
#define IDirectXVideoDecoder_GetBuffer( p, a, b, c ) (p)->GetBuffer( a, b, c )
#define IDirectXVideoDecoder_GetCreationParameters( p, a, b, c, d, e ) (p)->GetCreationParameters( a, b, c, d, e )
#define IDirectXVideoDecoder_GetVideoDecoderService( p, a ) (p)->GetVideoDecoderService( a )
#define IDirectXVideoDecoder_ReleaseBuffer( p, a ) (p)->ReleaseBuffer( a )
#endif
#undef INTERFACE
#define INTERFACE IDirectXVideoAccelerationService
DECLARE_INTERFACE_( IDirectXVideoAccelerationService, IUnknown )
{
STDMETHOD( QueryInterface ) ( THIS_ REFIID, PVOID* ) PURE;
STDMETHOD_( ULONG, AddRef ) ( THIS ) PURE;
STDMETHOD_( ULONG, Release ) ( THIS ) PURE;
STDMETHOD( CreateSurface ) ( THIS_ UINT, UINT, UINT, D3DFORMAT, D3DPOOL, DWORD, DWORD, IDirect3DSurface9**, HANDLE* ) PURE;
};
#if !defined(__cplusplus) || defined(CINTERFACE)
#define IDirectXVideoAccelerationService_QueryInterface( p, a, b ) (p)->lpVtbl->QueryInterface( p, a, b )
#define IDirectXVideoAccelerationService_AddRef( p ) (p)->lpVtbl->AddRef( p )
#define IDirectXVideoAccelerationService_Release( p ) (p)->lpVtbl->Release( p )
#define IDirectXVideoAccelerationService_CreateSurface( p, a, b, c, d, e, f, g, h, i ) (p)->lpVtbl->CreateSurface( p, a, b, c, d, e, f, g, h, i )
#else
#define IDirectXVideoAccelerationService_QueryInterface( p, a, b ) (p)->QueryInterface( a, b )
#define IDirectXVideoAccelerationService_AddRef( p ) (p)->AddRef()
#define IDirectXVideoAccelerationService_Release( p ) (p)->Release()
#define IDirectXVideoAccelerationService_CreateSurface( p, a, b, c, d, e, f, g, h, i ) (p)->CreateSurface( a, b, c, d, e, f, g, h, i )
#endif
#undef INTERFACE
#define INTERFACE IDirectXVideoDecoderService
DECLARE_INTERFACE_( IDirectXVideoDecoderService, IDirectXVideoAccelerationService )
{
STDMETHOD( QueryInterface ) ( THIS_ REFIID, PVOID* ) PURE;
STDMETHOD_( ULONG, AddRef ) ( THIS ) PURE;
STDMETHOD_( ULONG, Release ) ( THIS ) PURE;
STDMETHOD( CreateSurface ) ( THIS_ UINT, UINT, UINT, D3DFORMAT, D3DPOOL, DWORD, DWORD, IDirect3DSurface9**, HANDLE* ) PURE;
STDMETHOD( GetDecoderDeviceGuids ) ( THIS_ UINT*, GUID ** ) PURE;
STDMETHOD( GetDecoderRenderTargets ) ( THIS_ REFGUID, UINT*, D3DFORMAT** ) PURE;
STDMETHOD( GetDecoderConfigurations ) ( THIS_ REFGUID, const DXVA2_VideoDesc*, IUnknown*, UINT*, DXVA2_ConfigPictureDecode** ) PURE;
STDMETHOD( CreateVideoDecoder ) ( THIS_ REFGUID, const DXVA2_VideoDesc*, DXVA2_ConfigPictureDecode*, IDirect3DSurface9**, UINT, IDirectXVideoDecoder** ) PURE;
};
#if !defined(__cplusplus) || defined(CINTERFACE)
#define IDirectXVideoDecoderService_QueryInterface( p, a, b ) (p)->lpVtbl->QueryInterface( p, a, b )
#define IDirectXVideoDecoderService_AddRef( p ) (p)->lpVtbl->AddRef( p )
#define IDirectXVideoDecoderService_Release( p ) (p)->lpVtbl->Release( p )
#define IDirectXVideoDecoderService_CreateSurface( p, a, b, c, d, e, f, g, h, i ) (p)->lpVtbl->CreateSurface( p, a, b, c, d, e, f, g, h, i )
#define IDirectXVideoDecoderService_CreateVideoDecoder( p, a, b, c, d, e, f ) (p)->lpVtbl->CreateVideoDecoder( p, a, b, c, d, e, f )
#define IDirectXVideoDecoderService_GetDecoderConfigurations( p, a, b, c, d, e ) (p)->lpVtbl->GetDecoderConfigurations( p, a, b, c, d, e )
#define IDirectXVideoDecoderService_GetDecoderDeviceGuids( p, a, b ) (p)->lpVtbl->GetDecoderDeviceGuids( p, a, b )
#define IDirectXVideoDecoderService_GetDecoderRenderTargets( p, a, b, c ) (p)->lpVtbl->GetDecoderRenderTargets( p, a, b, c )
#else
#define IDirectXVideoDecoderService_QueryInterface( p, a, b ) (p)->QueryInterface( a, b )
#define IDirectXVideoDecoderService_AddRef( p ) (p)->AddRef()
#define IDirectXVideoDecoderService_Release( p ) (p)->Release()
#define IDirectXVideoDecoderService_CreateSurface( p, a, b, c, d, e, f, g, h, i ) (p)->CreateSurface( a, b, c, d, e, f, g, h, i )
#define IDirectXVideoDecoderService_CreateVideoDecoder( p, a, b, c, d, e, f ) (p)->CreateVideoDecoder( a, b, c, d, e, f )
#define IDirectXVideoDecoderService_GetDecoderConfigurations( p, a, b, c, d, e ) (p)->GetDecoderConfigurations( a, b, c, d, e )
#define IDirectXVideoDecoderService_GetDecoderDeviceGuids( p, a, b ) (p)->GetDecoderDeviceGuids( a, b )
#define IDirectXVideoDecoderService_GetDecoderRenderTargets( p, a, b, c ) (p)->GetDecoderRenderTargets( a, b, c )
#endif
#undef INTERFACE
#define INTERFACE IDirect3DDeviceManager9
DECLARE_INTERFACE_( IDirect3DDeviceManager9, IUnknown )
{
STDMETHOD( QueryInterface ) ( THIS_ REFIID, PVOID* ) PURE;
STDMETHOD_( ULONG, AddRef ) ( THIS ) PURE;
STDMETHOD_( ULONG, Release ) ( THIS ) PURE;
STDMETHOD( ResetDevice ) ( THIS_ IDirect3DDevice9*, UINT ) PURE;
STDMETHOD( OpenDeviceHandle ) ( THIS_ HANDLE* ) PURE;
STDMETHOD( CloseDeviceHandle ) ( THIS_ HANDLE ) PURE;
STDMETHOD( TestDevice ) ( THIS_ HANDLE ) PURE;
STDMETHOD( LockDevice ) ( THIS_ HANDLE, IDirect3DDevice9**, BOOL ) PURE;
STDMETHOD( UnlockDevice ) ( THIS_ HANDLE, BOOL ) PURE;
STDMETHOD( GetVideoService ) ( THIS_ HANDLE, REFIID, void** ) PURE;
};
#if !defined(__cplusplus) || defined(CINTERFACE)
#define IDirect3DDeviceManager9_QueryInterface( p, a, b ) (p)->lpVtbl->QueryInterface( p, a, b )
#define IDirect3DDeviceManager9_AddRef( p ) (p)->lpVtbl->AddRef( p )
#define IDirect3DDeviceManager9_Release( p ) (p)->lpVtbl->Release( p )
#define IDirect3DDeviceManager9_ResetDevice( p, a, b ) (p)->lpVtbl->ResetDevice( p, a, b )
#define IDirect3DDeviceManager9_OpenDeviceHandle( p, a ) (p)->lpVtbl->OpenDeviceHandle( p, a )
#define IDirect3DDeviceManager9_CloseDeviceHandle( p, a ) (p)->lpVtbl->CloseDeviceHandle( p, a )
#define IDirect3DDeviceManager9_TestDevice( p, a ) (p)->lpVtbl->TestDevice( p, a )
#define IDirect3DDeviceManager9_LockDevice( p, a, b, c ) (p)->lpVtbl->LockDevice( p, a, b, c )
#define IDirect3DDeviceManager9_UnlockDevice( p, a, b ) (p)->lpVtbl->UnlockDevice( p, a, b )
#define IDirect3DDeviceManager9_GetVideoService( p, a, b, c ) (p)->lpVtbl->GetVideoService( p, a, b, c )
#else
#define IDirect3DDeviceManager9_QueryInterface( p, a, b ) (p)->QueryInterface( a, b )
#define IDirect3DDeviceManager9_AddRef( p ) (p)->AddRef()
#define IDirect3DDeviceManager9_Release( p ) (p)->Release()
#define IDirect3DDeviceManager9_ResetDevice( p, a, b ) (p)->ResetDevice( a, b )
#define IDirect3DDeviceManager9_OpenDeviceHandle( p, a ) (p)->OpenDeviceHandle( a )
#define IDirect3DDeviceManager9_CloseDeviceHandle( p, a ) (p)->CloseDeviceHandle( a )
#define IDirect3DDeviceManager9_TestDevice( p, a ) (p)->TestDevice( a )
#define IDirect3DDeviceManager9_LockDevice( p, a, b, c ) (p)->LockDevice( a, b, c )
#define IDirect3DDeviceManager9_UnlockDevice( p, a, b ) (p)->UnlockDevice( a, b )
#define IDirect3DDeviceManager9_GetVideoService( p, a, b, c ) (p)->GetVideoService( a, b, c )
#endif
typedef _COM_interface IDirectXVideoProcessorService IDirectXVideoProcessorService;
typedef _COM_interface IDirectXVideoProcessor IDirectXVideoProcessor;
#undef INTERFACE
#define INTERFACE IDirectXVideoProcessor
DECLARE_INTERFACE_( IDirectXVideoProcessor, IUnknown )
{
STDMETHOD( QueryInterface ) ( THIS_ REFIID, PVOID* ) PURE;
STDMETHOD_( ULONG, AddRef ) ( THIS ) PURE;
STDMETHOD_( ULONG, Release ) ( THIS ) PURE;
STDMETHOD( GetVideoProcessorService ) ( THIS_ IDirectXVideoProcessorService** ) PURE;
STDMETHOD( GetCreationParameters ) ( THIS_ GUID*, DXVA2_VideoDesc*, D3DFORMAT*, UINT* ) PURE;
STDMETHOD( GetVideoProcessorCaps ) ( THIS_ DXVA2_VideoProcessorCaps* ) PURE;
STDMETHOD( GetProcAmpRange ) ( THIS_ UINT, DXVA2_ValueRange* ) PURE;
STDMETHOD( GetFilterPropertyRange ) ( THIS_ UINT, DXVA2_ValueRange* ) PURE;
STDMETHOD( VideoProcessBlt ) ( THIS_ IDirect3DSurface9*, DXVA2_VideoProcessBltParams*, DXVA2_VideoSample*, UINT, HANDLE* ) PURE;
};
#if !defined(__cplusplus) || defined(CINTERFACE)
#define IDirectXVideoProcessor_QueryInterface( p, a, b ) (p)->lpVtbl->QueryInterface( p, a, b )
#define IDirectXVideoProcessor_AddRef( p ) (p)->lpVtbl->AddRef( p )
#define IDirectXVideoProcessor_Release( p ) (p)->lpVtbl->Release( p )
#define IDirectXVideoProcessor_GetVideoProcessorService( p, a ) (p)->lpVtbl->GetVideoProcessorService( p, a )
#define IDirectXVideoProcessor_GetCreationParameters( p, a, b, c, d ) (p)->lpVtbl->GetCreationParameters( p, a, b, c, d )
#define IDirectXVideoProcessor_GetVideoProcessorCaps( p, a ) (p)->lpVtbl->GetVideoProcessorCaps( p, a )
#define IDirectXVideoProcessor_GetProcAmpRange( p, a, b ) (p)->lpVtbl->GetProcAmpRange( p, a, b )
#define IDirectXVideoProcessor_GetFilterPropertyRange( p, a, b ) (p)->lpVtbl->GetFilterPropertyRange( p, a, b )
#define IDirectXVideoProcessor_VideoProcessBlt( p, a, b, c, d, e ) (p)->lpVtbl->VideoProcessBlt( p, a, b, c, d, e )
#else
#define IDirectXVideoProcessor_QueryInterface( p, a, b ) (p)->QueryInterface( a, b )
#define IDirectXVideoProcessor_AddRef( p ) (p)->AddRef()
#define IDirectXVideoProcessor_Release( p ) (p)->Release()
#define IDirectXVideoProcessor_GetVideoProcessorService( p, a ) (p)->GetVideoProcessorService( a )
#define IDirectXVideoProcessor_GetCreationParameters( p, a, b, c, d ) (p)->GetCreationParameters( a, b, c, d )
#define IDirectXVideoProcessor_GetVideoProcessorCaps( p, a ) (p)->GetVideoProcessorCaps( a )
#define IDirectXVideoProcessor_GetProcAmpRange( p, a, b ) (p)->GetProcAmpRange( a, b )
#define IDirectXVideoProcessor_GetFilterPropertyRange( p, a, b ) (p)->GetFilterPropertyRange( a, b )
#define IDirectXVideoProcessor_VideoProcessBlt( p, a, b, c, d, e ) (p)->VideoProcessBlt( a, b, c, d, e )
#endif
#undef INTERFACE
#define INTERFACE IDirectXVideoProcessorService
DECLARE_INTERFACE_( IDirectXVideoProcessorService, IDirectXVideoAccelerationService )
{
STDMETHOD( QueryInterface ) ( THIS_ REFIID, PVOID* ) PURE;
STDMETHOD_( ULONG, AddRef ) ( THIS ) PURE;
STDMETHOD_( ULONG, Release ) ( THIS ) PURE;
STDMETHOD( CreateSurface ) ( THIS_ UINT, UINT, UINT, D3DFORMAT, D3DPOOL, DWORD, DWORD, IDirect3DSurface9**, HANDLE* ) PURE;
STDMETHOD( RegisterVideoProcessorSoftwareDevice ) ( THIS_ void* ) PURE;
STDMETHOD( GetVideoProcessorDeviceGuids ) ( THIS_ DXVA2_VideoDesc*, UINT, GUID** ) PURE;
STDMETHOD( GetVideoProcessorRenderTargets ) ( THIS_ REFGUID, DXVA2_VideoDesc*, UINT*, D3DFORMAT** ) PURE;
STDMETHOD( GetVideoProcessorSubStreamFormats ) ( THIS_ REFGUID, DXVA2_VideoDesc*, D3DFORMAT, UINT*, D3DFORMAT** ) PURE;
STDMETHOD( GetVideoProcessorCaps ) ( THIS_ REFGUID, DXVA2_VideoDesc*, D3DFORMAT, DXVA2_VideoProcessorCaps* ) PURE;
STDMETHOD( GetProcAmpRange ) ( THIS_ REFGUID, DXVA2_VideoDesc*, D3DFORMAT, UINT, DXVA2_ValueRange* ) PURE;
STDMETHOD( GetFilterPropertyRange ) ( THIS_ REFGUID, DXVA2_VideoDesc*, D3DFORMAT, UINT, DXVA2_ValueRange* ) PURE;
STDMETHOD( CreateVideoProcessor ) ( THIS_ REFGUID, DXVA2_VideoDesc*, D3DFORMAT, UINT, IDirectXVideoProcessor** ) PURE;
};
#if !defined(__cplusplus) || defined(CINTERFACE)
#define IDirectXVideoProcessorService_QueryInterface( p, a, b ) (p)->lpVtbl->QueryInterface( p, a, b )
#define IDirectXVideoProcessorService_AddRef( p ) (p)->lpVtbl->AddRef( p )
#define IDirectXVideoProcessorService_Release( p ) (p)->lpVtbl->Release( p )
#define IDirectXVideoProcessorService_CreateSurface( p, a, b, c, d, e, f, g, h, i ) (p)->lpVtbl->CreateSurface( p, a, b, c, d, e, f, g, h, i )
#define IDirectXVideoProcessorService_RegisterVideoProcessorSoftwareDevice( p, a ) (p)->lpVtbl->RegisterVideoProcessorSoftwareDevice( p, a )
#define IDirectXVideoProcessorService_GetVideoProcessorDeviceGuids( p, a, b, c ) (p)->lpVtbl->GetVideoProcessorDeviceGuids( p, a, b, c )
#define IDirectXVideoProcessorService_GetVideoProcessorRenderTargets( p, a, b, c, d ) (p)->lpVtbl->GetVideoProcessorRenderTargets( p, a, b, c, d )
#define IDirectXVideoProcessorService_GetVideoProcessorSubStreamFormats( p, a, b, c, d, e ) (p)->lpVtbl->GetVideoProcessorSubStreamFormats( p, a, b, c, d, e )
#define IDirectXVideoProcessorService_GetVideoProcessorCaps( p, a, b, c, d ) (p)->lpVtbl->GetVideoProcessorCaps( p, a, b, c, d )
#define IDirectXVideoProcessorService_GetProcAmpRange( p, a, b, c, d, e ) (p)->lpVtbl->GetProcAmpRange( p, a, b, c, d, e )
#define IDirectXVideoProcessorService_GetFilterPropertyRange( p, a, b, c, d, e ) (p)->lpVtbl->GetFilterPropertyRange( p, a, b, c, d, e )
#define IDirectXVideoProcessorService_CreateVideoProcessor( p, a, b, c, d, e ) (p)->lpVtbl->CreateVideoProcessor( p, a, b, c, d, e )
#else
#define IDirectXVideoProcessorService_QueryInterface( p, a, b ) (p)->QueryInterface( a, b )
#define IDirectXVideoProcessorService_AddRef( p ) (p)->AddRef()
#define IDirectXVideoProcessorService_Release( p ) (p)->Release()
#define IDirectXVideoProcessorService_CreateSurface( p, a, b, c, d, e, f, g, h, i ) (p)->CreateSurface( a, b, c, d, e, f, g, h, i )
#define IDirectXVideoProcessorService_RegisterVideoProcessorSoftwareDevice( p, a ) (p)->RegisterVideoProcessorSoftwareDevice( a )
#define IDirectXVideoProcessorService_GetVideoProcessorDeviceGuids( p, a, b, c ) (p)->GetVideoProcessorDeviceGuids( a, b, c )
#define IDirectXVideoProcessorService_GetVideoProcessorRenderTargets( p, a, b, c, d ) (p)->GetVideoProcessorRenderTargets( a, b, c, d )
#define IDirectXVideoProcessorService_GetVideoProcessorSubStreamFormats( p, a, b, c, d, e ) (p)->GetVideoProcessorSubStreamFormats( a, b, c, d, e )
#define IDirectXVideoProcessorService_GetVideoProcessorCaps( p, a, b, c, d ) (p)->GetVideoProcessorCaps( a, b, c, d )
#define IDirectXVideoProcessorService_GetProcAmpRange( p, a, b, c, d, e ) (p)->GetProcAmpRange( a, b, c, d, e )
#define IDirectXVideoProcessorService_GetFilterPropertyRange( p, a, b, c, d, e ) (p)->GetFilterPropertyRange( a, b, c, d, e )
#define IDirectXVideoProcessorService_CreateVideoProcessor( p, a, b, c, d, e ) (p)->CreateVideoProcessor( a, b, c, d, e )
#endif
/*****************************************************************************************************
************************DXVA Video Processor********************************************************
*******************************************************************************************************/
/*#undef INTERFACE
#define INTERFACE IDirectXVideoService
DECLARE_INTERFACE_(IDirectXVideoService,IUnknown)
{
STDMETHOD(DXVA2CreateVideoService)(IDirect3DDevice9*, REFIID, void**) PURE;
};
#if !defined(__cplusplus) || defined(CINTERFACE)
#define IDirectXVideoService_DXVA2CreateVideoService(a,b,c) DXVA2CreateVideoService(a,b,c)
#else
#define IDirectXVideoService_DXVA2CreateVideoService(a,b,c) DXVA2CreateVideoService(a,b,c)
#endif*/
#ifdef __cplusplus
};
#endif
#ifdef __cplusplus
extern "C" HRESULT WINAPI DXVA2CreateVideoService( IDirect3DDevice9 *,
REFIID riid,
void **ppService );
#else
extern HRESULT WINAPI DXVA2CreateVideoService( IDirect3DDevice9 *,
REFIID riid,
void **ppService );
#endif
typedef
enum _DXVA2_VideoChromaSubSampling
{ DXVA2_VideoChromaSubsamplingMask = 0xf,
DXVA2_VideoChromaSubsampling_Unknown = 0,
DXVA2_VideoChromaSubsampling_ProgressiveChroma = 0x8,
DXVA2_VideoChromaSubsampling_Horizontally_Cosited = 0x4,
DXVA2_VideoChromaSubsampling_Vertically_Cosited = 0x2,
DXVA2_VideoChromaSubsampling_Vertically_AlignedChromaPlanes = 0x1,
DXVA2_VideoChromaSubsampling_MPEG2 = ( DXVA2_VideoChromaSubsampling_Horizontally_Cosited | DXVA2_VideoChromaSubsampling_Vertically_AlignedChromaPlanes ),
DXVA2_VideoChromaSubsampling_MPEG1 = DXVA2_VideoChromaSubsampling_Vertically_AlignedChromaPlanes,
DXVA2_VideoChromaSubsampling_DV_PAL = ( DXVA2_VideoChromaSubsampling_Horizontally_Cosited | DXVA2_VideoChromaSubsampling_Vertically_Cosited ),
DXVA2_VideoChromaSubsampling_Cosited = ( ( DXVA2_VideoChromaSubsampling_Horizontally_Cosited | DXVA2_VideoChromaSubsampling_Vertically_Cosited ) | DXVA2_VideoChromaSubsampling_Vertically_AlignedChromaPlanes )} DXVA2_VideoChromaSubSampling;
typedef
enum _DXVA2_NominalRange
{ DXVA2_NominalRangeMask = 0x7,
DXVA2_NominalRange_Unknown = 0,
DXVA2_NominalRange_Normal = 1,
DXVA2_NominalRange_Wide = 2,
DXVA2_NominalRange_0_255 = 1,
DXVA2_NominalRange_16_235 = 2,
DXVA2_NominalRange_48_208 = 3} DXVA2_NominalRange;
typedef
enum _DXVA2_VideoLighting
{ DXVA2_VideoLightingMask = 0xf,
DXVA2_VideoLighting_Unknown = 0,
DXVA2_VideoLighting_bright = 1,
DXVA2_VideoLighting_office = 2,
DXVA2_VideoLighting_dim = 3,
DXVA2_VideoLighting_dark = 4} DXVA2_VideoLighting;
typedef
enum _DXVA2_VideoPrimaries
{ DXVA2_VideoPrimariesMask = 0x1f,
DXVA2_VideoPrimaries_Unknown = 0,
DXVA2_VideoPrimaries_reserved = 1,
DXVA2_VideoPrimaries_BT709 = 2,
DXVA2_VideoPrimaries_BT470_2_SysM = 3,
DXVA2_VideoPrimaries_BT470_2_SysBG = 4,
DXVA2_VideoPrimaries_SMPTE170M = 5,
DXVA2_VideoPrimaries_SMPTE240M = 6,
DXVA2_VideoPrimaries_EBU3213 = 7,
DXVA2_VideoPrimaries_SMPTE_C = 8} DXVA2_VideoPrimaries;
typedef
enum _DXVA2_VideoTransferFunction
{ DXVA2_VideoTransFuncMask = 0x1f,
DXVA2_VideoTransFunc_Unknown = 0,
DXVA2_VideoTransFunc_10 = 1,
DXVA2_VideoTransFunc_18 = 2,
DXVA2_VideoTransFunc_20 = 3,
DXVA2_VideoTransFunc_22 = 4,
DXVA2_VideoTransFunc_709 = 5,
DXVA2_VideoTransFunc_240M = 6,
DXVA2_VideoTransFunc_sRGB = 7,
DXVA2_VideoTransFunc_28 = 8} DXVA2_VideoTransferFunction;
typedef
enum _DXVA2_SampleFormat
{ DXVA2_SampleFormatMask = 0xff,
DXVA2_SampleUnknown = 0,
DXVA2_SampleProgressiveFrame = 2,
DXVA2_SampleFieldInterleavedEvenFirst = 3,
DXVA2_SampleFieldInterleavedOddFirst = 4,
DXVA2_SampleFieldSingleEven = 5,
DXVA2_SampleFieldSingleOdd = 6,
DXVA2_SampleSubStream = 7} DXVA2_SampleFormat;
typedef
enum _DXVA2_VideoTransferMatrix
{ DXVA2_VideoTransferMatrixMask = 0x7,
DXVA2_VideoTransferMatrix_Unknown = 0,
DXVA2_VideoTransferMatrix_BT709 = 1,
DXVA2_VideoTransferMatrix_BT601 = 2,
DXVA2_VideoTransferMatrix_SMPTE240M = 3} DXVA2_VideoTransferMatrix;
enum __MIDL___MIDL_itf_dxva2api_0000_0000_0004
{ DXVA2_NoiseFilterLumaLevel = 1,
DXVA2_NoiseFilterLumaThreshold = 2,
DXVA2_NoiseFilterLumaRadius = 3,
DXVA2_NoiseFilterChromaLevel = 4,
DXVA2_NoiseFilterChromaThreshold = 5,
DXVA2_NoiseFilterChromaRadius = 6,
DXVA2_DetailFilterLumaLevel = 7,
DXVA2_DetailFilterLumaThreshold = 8,
DXVA2_DetailFilterLumaRadius = 9,
DXVA2_DetailFilterChromaLevel = 10,
DXVA2_DetailFilterChromaThreshold = 11,
DXVA2_DetailFilterChromaRadius = 12};
enum __MIDL___MIDL_itf_dxva2api_0000_0000_0008
{ DXVA2_VideoProcess_None = 0,
DXVA2_VideoProcess_YUV2RGB = 0x1,
DXVA2_VideoProcess_StretchX = 0x2,
DXVA2_VideoProcess_StretchY = 0x4,
DXVA2_VideoProcess_AlphaBlend = 0x8,
DXVA2_VideoProcess_SubRects = 0x10,
DXVA2_VideoProcess_SubStreams = 0x20,
DXVA2_VideoProcess_SubStreamsExtended = 0x40,
DXVA2_VideoProcess_YUV2RGBExtended = 0x80,
DXVA2_VideoProcess_AlphaBlendExtended = 0x100,
DXVA2_VideoProcess_Constriction = 0x200,
DXVA2_VideoProcess_NoiseFilter = 0x400,
DXVA2_VideoProcess_DetailFilter = 0x800,
DXVA2_VideoProcess_PlanarAlpha = 0x1000,
DXVA2_VideoProcess_LinearScaling = 0x2000,
DXVA2_VideoProcess_GammaCompensated = 0x4000,
DXVA2_VideoProcess_MaintainsOriginalFieldData = 0x8000,
DXVA2_VideoProcess_Mask = 0xffff};
__inline float hb_dx_fixedtofloat( const DXVA2_Fixed32 _fixed_ );
__inline const DXVA2_Fixed32 hb_dx_fixed32_opaque_alpha();
__inline DXVA2_Fixed32 hb_dx_floattofixed( const float _float_ );
#endif
#endif //_DXVA2API_H
HandBrake-0.10.2/libhb/hb.h 0000664 0001752 0001752 00000013122 12463330511 015656 0 ustar handbrake handbrake /* hb.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_HB_H
#define HB_HB_H
#ifdef __cplusplus
extern "C" {
#endif
#include "project.h"
#include "common.h"
#include "hb_dict.h"
/* hb_init()
Initializes a libhb session (launches his own thread, detects CPUs,
etc) */
#define HB_DEBUG_NONE 0
#define HB_DEBUG_ALL 1
void hb_register( hb_work_object_t * );
void hb_register_logger( void (*log_cb)(const char* message) );
hb_handle_t * hb_init( int verbose, int update_check );
hb_handle_t * hb_init_dl ( int verbose, int update_check ); // hb_init for use with dylib
/* hb_get_version() */
char * hb_get_version( hb_handle_t * );
int hb_get_build( hb_handle_t * );
/* hb_check_update()
Checks for an update on the website. If there is, returns the build
number and points 'version' to a version description. Returns a
negative value otherwise. */
int hb_check_update( hb_handle_t * h, char ** version );
char * hb_dvd_name( char * path );
void hb_dvd_set_dvdnav( int enable );
/* hb_scan()
Scan the specified path. Can be a DVD device, a VIDEO_TS folder or
a VOB file. If title_index is 0, scan all titles. */
void hb_scan( hb_handle_t *, const char * path,
int title_index, int preview_count,
int store_previews, uint64_t min_duration );
void hb_scan_stop( hb_handle_t * );
uint64_t hb_first_duration( hb_handle_t * );
/* hb_get_titles()
Returns the list of valid titles detected by the latest scan. */
hb_list_t * hb_get_titles( hb_handle_t * );
/* hb_get_title_set()
Returns the title set which contains a list of valid titles detected
by the latest scan and title set data. */
hb_title_set_t * hb_get_title_set( hb_handle_t * );
/* hb_detect_comb()
Analyze a frame for interlacing artifacts, returns true if they're found.
Taken from Thomas Oestreich's 32detect filter in the Transcode project. */
int hb_detect_comb( hb_buffer_t * buf, int color_equal, int color_diff, int threshold, int prog_equal, int prog_diff, int prog_threshold );
// JJJ: title->job?
int hb_save_preview( hb_handle_t * h, int title, int preview,
hb_buffer_t *buf );
hb_buffer_t * hb_read_preview( hb_handle_t * h, hb_title_t *title,
int preview );
void hb_get_preview( hb_handle_t *, hb_job_t *, int,
uint8_t * );
hb_image_t * hb_get_preview2(hb_handle_t * h, int title_idx, int picture,
hb_ui_geometry_t *ui_geo, int deinterlace);
void hb_set_anamorphic_size2(hb_geometry_t *src_geo,
hb_ui_geometry_t *ui_geo,
hb_geometry_t *result);
void hb_set_anamorphic_size( hb_job_t *,
int *output_width, int *output_height,
int *output_par_width, int *output_par_height );
void hb_validate_size( hb_job_t * job );
void hb_add_filter( hb_job_t * job, hb_filter_object_t * filter,
const char * settings );
/* Handling jobs */
int hb_count( hb_handle_t * );
hb_job_t * hb_job( hb_handle_t *, int );
void hb_add( hb_handle_t *, hb_job_t * );
void hb_rem( hb_handle_t *, hb_job_t * );
hb_title_t * hb_find_title_by_index( hb_handle_t *h, int title_index );
hb_job_t * hb_job_init_by_index( hb_handle_t *h, int title_index );
hb_job_t * hb_job_init( hb_title_t * title );
void hb_job_reset( hb_job_t * job );
void hb_job_close( hb_job_t ** job );
void hb_start( hb_handle_t * );
void hb_pause( hb_handle_t * );
void hb_resume( hb_handle_t * );
void hb_stop( hb_handle_t * );
void hb_system_sleep_allow(hb_handle_t*);
void hb_system_sleep_prevent(hb_handle_t*);
/* Persistent data between jobs. */
typedef struct hb_interjob_s
{
int last_job; /* job->sequence_id & 0xFFFFFF */
int frame_count; /* number of frames counted by sync */
int out_frame_count; /* number of frames counted by render */
uint64_t total_time; /* real length in 90kHz ticks (i.e. seconds / 90000) */
int vrate; /* actual measured output vrate from 1st pass */
int vrate_base; /* actual measured output vrate_base from 1st pass */
hb_subtitle_t *select_subtitle; /* foreign language scan subtitle */
} hb_interjob_t;
hb_interjob_t * hb_interjob_get( hb_handle_t * );
/* hb_get_state()
Should be regularly called by the UI (like 5 or 10 times a second).
Look at test/test.c to see how to use it. */
void hb_get_state( hb_handle_t *, hb_state_t * );
void hb_get_state2( hb_handle_t *, hb_state_t * );
/* hb_get_scancount() is called by the MacGui in UpdateUI to
check for a new scan during HB_STATE_WORKING phase */
int hb_get_scancount( hb_handle_t * );
/* hb_close()
Aborts all current jobs if any, frees memory. */
void hb_close( hb_handle_t ** );
/* hb_global_init()
Performs process initialization. */
int hb_global_init();
/* hb_global_close()
Performs final cleanup for the process. */
void hb_global_close();
/* hb_get_instance_id()
Return the unique instance id of an libhb instance created by hb_init. */
int hb_get_instance_id( hb_handle_t * h );
#ifdef __cplusplus
}
#endif
#endif
HandBrake-0.10.2/libhb/encx264.c 0000664 0001752 0001752 00000153633 12463330511 016465 0 ustar handbrake handbrake /* encx264.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include "hb.h"
#include "hb_dict.h"
#include "encx264.h"
int encx264Init( hb_work_object_t *, hb_job_t * );
int encx264Work( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
void encx264Close( hb_work_object_t * );
hb_work_object_t hb_encx264 =
{
WORK_ENCX264,
"H.264/AVC encoder (libx264)",
encx264Init,
encx264Work,
encx264Close
};
#define DTS_BUFFER_SIZE 32
/*
* The frame info struct remembers information about each frame across calls
* to x264_encoder_encode. Since frames are uniquely identified by their
* timestamp, we use some bits of the timestamp as an index. The LSB is
* chosen so that two successive frames will have different values in the
* bits over any plausible range of frame rates. (Starting with bit 8 allows
* any frame rate slower than 352fps.) The MSB determines the size of the array.
* It is chosen so that two frames can't use the same slot during the
* encoder's max frame delay (set by the standard as 16 frames) and so
* that, up to some minimum frame rate, frames are guaranteed to map to
* different slots. (An MSB of 17 which is 2^(17-8+1) = 1024 slots guarantees
* no collisions down to a rate of .7 fps).
*/
#define FRAME_INFO_MAX2 (8) // 2^8 = 256; 90000/256 = 352 frames/sec
#define FRAME_INFO_MIN2 (17) // 2^17 = 128K; 90000/131072 = 1.4 frames/sec
#define FRAME_INFO_SIZE (1 << (FRAME_INFO_MIN2 - FRAME_INFO_MAX2 + 1))
#define FRAME_INFO_MASK (FRAME_INFO_SIZE - 1)
struct hb_work_private_s
{
hb_job_t * job;
x264_t * x264;
x264_picture_t pic_in;
uint8_t * grey_data;
uint32_t frames_in;
uint32_t frames_out;
int64_t last_stop; // Debugging - stop time of previous input frame
hb_list_t *delayed_chapters;
int64_t next_chapter_pts;
struct {
int64_t duration;
} frame_info[FRAME_INFO_SIZE];
char filename[1024];
};
// used in delayed_chapters list
struct chapter_s
{
int index;
int64_t start;
};
/***********************************************************************
* hb_work_encx264_init
***********************************************************************
*
**********************************************************************/
int encx264Init( hb_work_object_t * w, hb_job_t * job )
{
x264_param_t param;
x264_nal_t * nal;
int nal_count;
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
pv->next_chapter_pts = AV_NOPTS_VALUE;
pv->delayed_chapters = hb_list_init();
if (x264_param_default_preset(¶m,
job->encoder_preset, job->encoder_tune) < 0)
{
free( pv );
pv = NULL;
return 1;
}
/* If the PSNR or SSIM tunes are in use, enable the relevant metric */
if (job->encoder_tune != NULL && *job->encoder_tune)
{
char *tmp = strdup(job->encoder_tune);
char *tok = strtok(tmp, ",./-+");
do
{
if (!strncasecmp(tok, "psnr", 4))
{
param.analyse.b_psnr = 1;
break;
}
if (!strncasecmp(tok, "ssim", 4))
{
param.analyse.b_ssim = 1;
break;
}
}
while ((tok = strtok(NULL, ",./-+")) != NULL);
free(tmp);
}
/* Some HandBrake-specific defaults; users can override them
* using the encoder_options string. */
if( job->pass == 2 && job->cfr != 1 )
{
hb_interjob_t * interjob = hb_interjob_get( job->h );
param.i_fps_num = interjob->vrate;
param.i_fps_den = interjob->vrate_base;
}
else
{
param.i_fps_num = job->vrate;
param.i_fps_den = job->vrate_base;
}
if ( job->cfr == 1 )
{
param.i_timebase_num = 0;
param.i_timebase_den = 0;
param.b_vfr_input = 0;
}
else
{
param.i_timebase_num = 1;
param.i_timebase_den = 90000;
}
/* Set min:max keyframe intervals to 1:10 of fps;
* adjust +0.5 for when fps has remainder to bump
* { 23.976, 29.976, 59.94 } to { 24, 30, 60 }. */
param.i_keyint_min = (int)( (double)job->vrate / (double)job->vrate_base + 0.5 );
param.i_keyint_max = 10 * param.i_keyint_min;
param.i_log_level = X264_LOG_INFO;
/* set up the VUI color model & gamma to match what the COLR atom
* set in muxmp4.c says. See libhb/muxmp4.c for notes. */
if( job->color_matrix_code == 4 )
{
// Custom
param.vui.i_colorprim = job->color_prim;
param.vui.i_transfer = job->color_transfer;
param.vui.i_colmatrix = job->color_matrix;
}
else if( job->color_matrix_code == 3 )
{
// ITU BT.709 HD content
param.vui.i_colorprim = HB_COLR_PRI_BT709;
param.vui.i_transfer = HB_COLR_TRA_BT709;
param.vui.i_colmatrix = HB_COLR_MAT_BT709;
}
else if( job->color_matrix_code == 2 )
{
// ITU BT.601 DVD or SD TV content (PAL)
param.vui.i_colorprim = HB_COLR_PRI_EBUTECH;
param.vui.i_transfer = HB_COLR_TRA_BT709;
param.vui.i_colmatrix = HB_COLR_MAT_SMPTE170M;
}
else if( job->color_matrix_code == 1 )
{
// ITU BT.601 DVD or SD TV content (NTSC)
param.vui.i_colorprim = HB_COLR_PRI_SMPTEC;
param.vui.i_transfer = HB_COLR_TRA_BT709;
param.vui.i_colmatrix = HB_COLR_MAT_SMPTE170M;
}
else
{
// detected during scan
param.vui.i_colorprim = job->title->color_prim;
param.vui.i_transfer = job->title->color_transfer;
param.vui.i_colmatrix = job->title->color_matrix;
}
/* place job->encoder_options in an hb_dict_t for convenience */
hb_dict_t * x264_opts = NULL;
if (job->encoder_options != NULL && *job->encoder_options)
{
x264_opts = hb_encopts_to_dict(job->encoder_options, job->vcodec);
}
/* iterate through x264_opts and have libx264 parse the options for us */
int ret;
hb_dict_entry_t * entry = NULL;
while( ( entry = hb_dict_next( x264_opts, entry ) ) )
{
/* Here's where the strings are passed to libx264 for parsing. */
ret = x264_param_parse( ¶m, entry->key, entry->value );
/* Let x264 sanity check the options for us */
if( ret == X264_PARAM_BAD_NAME )
hb_log( "x264 options: Unknown suboption %s", entry->key );
if( ret == X264_PARAM_BAD_VALUE )
hb_log( "x264 options: Bad argument %s=%s", entry->key, entry->value ? entry->value : "(null)" );
}
hb_dict_free( &x264_opts );
/* Reload colorimetry settings in case custom values were set
* in the encoder_options string */
job->color_matrix_code = 4;
job->color_prim = param.vui.i_colorprim;
job->color_transfer = param.vui.i_transfer;
job->color_matrix = param.vui.i_colmatrix;
/* For 25 fps sources, HandBrake's explicit keyints will match the x264 defaults:
* min-keyint 25 (same as auto), keyint 250. */
if( param.i_keyint_min != 25 || param.i_keyint_max != 250 )
{
int min_auto;
if ( param.i_fps_num / param.i_fps_den < param.i_keyint_max / 10 )
min_auto = param.i_fps_num / param.i_fps_den;
else
min_auto = param.i_keyint_max / 10;
char min[40], max[40];
param.i_keyint_min == X264_KEYINT_MIN_AUTO ?
snprintf( min, 40, "auto (%d)", min_auto ) :
snprintf( min, 40, "%d", param.i_keyint_min );
param.i_keyint_max == X264_KEYINT_MAX_INFINITE ?
snprintf( max, 40, "infinite" ) :
snprintf( max, 40, "%d", param.i_keyint_max );
hb_log( "encx264: min-keyint: %s, keyint: %s", min, max );
}
/* Settings which can't be overriden in the encoder_options string
* (muxer-specific settings, resolution, ratecontrol, etc.). */
/* Disable annexb. Inserts size into nal header instead of start code. */
param.b_annexb = 0;
param.i_width = job->width;
param.i_height = job->height;
if( job->anamorphic.mode )
{
param.vui.i_sar_width = job->anamorphic.par_width;
param.vui.i_sar_height = job->anamorphic.par_height;
}
if( job->vquality >= 0 )
{
/* Constant RF */
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = job->vquality;
hb_log( "encx264: encoding at constant RF %f", param.rc.f_rf_constant );
}
else
{
/* Average bitrate */
param.rc.i_rc_method = X264_RC_ABR;
param.rc.i_bitrate = job->vbitrate;
hb_log( "encx264: encoding at average bitrate %d", param.rc.i_bitrate );
if( job->pass > 0 && job->pass < 3 )
{
memset( pv->filename, 0, 1024 );
hb_get_tempory_filename( job->h, pv->filename, "x264.log" );
}
switch( job->pass )
{
case 1:
param.rc.b_stat_read = 0;
param.rc.b_stat_write = 1;
param.rc.psz_stat_out = pv->filename;
break;
case 2:
param.rc.b_stat_read = 1;
param.rc.b_stat_write = 0;
param.rc.psz_stat_in = pv->filename;
break;
}
}
/* Apply profile and level settings last, if present. */
if (job->encoder_profile != NULL && *job->encoder_profile)
{
if (hb_apply_h264_profile(¶m, job->encoder_profile, 1))
{
free(pv);
pv = NULL;
return 1;
}
}
if (job->encoder_level != NULL && *job->encoder_level)
{
if (hb_apply_h264_level(¶m, job->encoder_level,
job->encoder_profile, 1) < 0)
{
free(pv);
pv = NULL;
return 1;
}
}
/* Turbo first pass */
if( job->pass == 1 && job->fastfirstpass == 1 )
{
x264_param_apply_fastfirstpass( ¶m );
}
/* B-pyramid is enabled by default. */
job->areBframes = 2;
if( !param.i_bframe )
{
job->areBframes = 0;
}
else if( !param.i_bframe_pyramid )
{
job->areBframes = 1;
}
/* Log the unparsed x264 options string. */
char *x264_opts_unparsed = hb_x264_param_unparse(job->encoder_preset,
job->encoder_tune,
job->encoder_options,
job->encoder_profile,
job->encoder_level,
job->width, job->height);
if( x264_opts_unparsed != NULL )
{
hb_log( "encx264: unparsed options: %s", x264_opts_unparsed );
}
free( x264_opts_unparsed );
hb_deep_log( 2, "encx264: opening libx264 (pass %d)", job->pass );
pv->x264 = x264_encoder_open( ¶m );
if ( pv->x264 == NULL )
{
hb_error("encx264: x264_encoder_open failed.");
free( pv );
pv = NULL;
return 1;
}
x264_encoder_headers( pv->x264, &nal, &nal_count );
/* Sequence Parameter Set */
memcpy(w->config->h264.sps, nal[0].p_payload + 4, nal[0].i_payload - 4);
w->config->h264.sps_length = nal[0].i_payload - 4;
/* Picture Parameter Set */
memcpy(w->config->h264.pps, nal[1].p_payload + 4, nal[1].i_payload - 4);
w->config->h264.pps_length = nal[1].i_payload - 4;
x264_picture_init( &pv->pic_in );
pv->pic_in.img.i_csp = X264_CSP_I420;
pv->pic_in.img.i_plane = 3;
if( job->grayscale )
{
int uvsize = (hb_image_stride(AV_PIX_FMT_YUV420P, job->width, 1) *
hb_image_height(AV_PIX_FMT_YUV420P, job->height, 1));
pv->grey_data = malloc(uvsize);
memset(pv->grey_data, 0x80, uvsize);
pv->pic_in.img.plane[1] = pv->pic_in.img.plane[2] = pv->grey_data;
}
return 0;
}
void encx264Close( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
if (pv->delayed_chapters != NULL)
{
struct chapter_s *item;
while ((item = hb_list_item(pv->delayed_chapters, 0)) != NULL)
{
hb_list_rem(pv->delayed_chapters, item);
free(item);
}
hb_list_close(&pv->delayed_chapters);
}
free( pv->grey_data );
x264_encoder_close( pv->x264 );
free( pv );
w->private_data = NULL;
/* TODO */
}
/*
* see comments in definition of 'frame_info' in pv struct for description
* of what these routines are doing.
*/
static void save_frame_info( hb_work_private_t * pv, hb_buffer_t * in )
{
int i = (in->s.start >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
pv->frame_info[i].duration = in->s.stop - in->s.start;
}
static int64_t get_frame_duration( hb_work_private_t * pv, int64_t pts )
{
int i = (pts >> FRAME_INFO_MAX2) & FRAME_INFO_MASK;
return pv->frame_info[i].duration;
}
static hb_buffer_t *nal_encode( hb_work_object_t *w, x264_picture_t *pic_out,
int i_nal, x264_nal_t *nal )
{
hb_buffer_t *buf = NULL;
hb_work_private_t *pv = w->private_data;
hb_job_t *job = pv->job;
/* Should be way too large */
buf = hb_video_buffer_init( job->width, job->height );
buf->size = 0;
buf->s.frametype = 0;
// use the pts to get the original frame's duration.
buf->s.duration = get_frame_duration( pv, pic_out->i_pts );
buf->s.start = pic_out->i_pts;
buf->s.stop = buf->s.start + buf->s.duration;
buf->s.renderOffset = pic_out->i_dts;
if ( !w->config->h264.init_delay && pic_out->i_dts < 0 )
{
w->config->h264.init_delay = -pic_out->i_dts;
}
/* Encode all the NALs we were given into buf.
NOTE: This code assumes one video frame per NAL (but there can
be other stuff like SPS and/or PPS). If there are multiple
frames we only get the duration of the first which will
eventually screw up the muxer & decoder. */
int i;
for( i = 0; i < i_nal; i++ )
{
int size = nal[i].i_payload;
memcpy(buf->data + buf->size, nal[i].p_payload, size);
if( size < 1 )
{
continue;
}
/* H.264 in .mp4 or .mkv */
switch( nal[i].i_type )
{
/* Sequence Parameter Set & Program Parameter Set go in the
* mp4 header so skip them here
*/
case NAL_SPS:
case NAL_PPS:
continue;
case NAL_SLICE:
case NAL_SLICE_IDR:
case NAL_SEI:
default:
break;
}
/* Decide what type of frame we have. */
switch( pic_out->i_type )
{
case X264_TYPE_IDR:
// Handled in b_keyframe check below.
break;
case X264_TYPE_I:
buf->s.frametype = HB_FRAME_I;
break;
case X264_TYPE_P:
buf->s.frametype = HB_FRAME_P;
break;
case X264_TYPE_B:
buf->s.frametype = HB_FRAME_B;
break;
/* This is for b-pyramid, which has reference b-frames
However, it doesn't seem to ever be used... */
case X264_TYPE_BREF:
buf->s.frametype = HB_FRAME_BREF;
break;
// If it isn't the above, what type of frame is it??
default:
buf->s.frametype = 0;
break;
}
/* Since libx264 doesn't tell us when b-frames are
themselves reference frames, figure it out on our own. */
if( (buf->s.frametype == HB_FRAME_B) &&
(nal[i].i_ref_idc != NAL_PRIORITY_DISPOSABLE) )
buf->s.frametype = HB_FRAME_BREF;
/* Expose disposable bit to muxer. */
if( nal[i].i_ref_idc == NAL_PRIORITY_DISPOSABLE )
buf->s.flags &= ~HB_FRAME_REF;
else
buf->s.flags |= HB_FRAME_REF;
// PIR has no IDR frames, but x264 marks recovery points
// as keyframes. So fake an IDR at these points. This flag
// is also set for real IDR frames.
if( pic_out->b_keyframe )
{
buf->s.frametype = HB_FRAME_IDR;
/* if we have a chapter marker pending and this
frame's presentation time stamp is at or after
the marker's time stamp, use this as the
chapter start. */
if (pv->next_chapter_pts != AV_NOPTS_VALUE &&
pv->next_chapter_pts <= pic_out->i_pts)
{
// we're no longer looking for this chapter
pv->next_chapter_pts = AV_NOPTS_VALUE;
// get the chapter index from the list
struct chapter_s *item = hb_list_item(pv->delayed_chapters, 0);
if (item != NULL)
{
// we're done with this chapter
buf->s.new_chap = item->index;
hb_list_rem(pv->delayed_chapters, item);
free(item);
// we may still have another pending chapter
item = hb_list_item(pv->delayed_chapters, 0);
if (item != NULL)
{
// we're looking for this one now
// we still need it, don't remove it
pv->next_chapter_pts = item->start;
}
}
}
}
buf->size += size;
}
// make sure we found at least one video frame
if ( buf->size <= 0 )
{
// no video - discard the buf
hb_buffer_close( &buf );
}
return buf;
}
static hb_buffer_t *x264_encode( hb_work_object_t *w, hb_buffer_t *in )
{
hb_work_private_t *pv = w->private_data;
hb_job_t *job = pv->job;
/* Point x264 at our current buffers Y(UV) data. */
pv->pic_in.img.i_stride[0] = in->plane[0].stride;
pv->pic_in.img.i_stride[1] = in->plane[1].stride;
pv->pic_in.img.i_stride[2] = in->plane[2].stride;
pv->pic_in.img.plane[0] = in->plane[0].data;
if( !job->grayscale )
{
pv->pic_in.img.plane[1] = in->plane[1].data;
pv->pic_in.img.plane[2] = in->plane[2].data;
}
if( in->s.new_chap && job->chapter_markers )
{
/* chapters have to start with an IDR frame so request that this
frame be coded as IDR. Since there may be up to 16 frames
currently buffered in the encoder remember the timestamp so
when this frame finally pops out of the encoder we'll mark
its buffer as the start of a chapter. */
pv->pic_in.i_type = X264_TYPE_IDR;
if (pv->next_chapter_pts == AV_NOPTS_VALUE)
{
pv->next_chapter_pts = in->s.start;
}
/*
* Chapter markers are sometimes so close we can get a new one before the
* previous marker has been through the encoding queue.
*
* Dropping markers can cause weird side-effects downstream, including but
* not limited to missing chapters in the output, so we need to save it
* somehow.
*/
struct chapter_s *item = malloc(sizeof(struct chapter_s));
if (item != NULL)
{
item->start = in->s.start;
item->index = in->s.new_chap;
hb_list_add(pv->delayed_chapters, item);
}
/* don't let 'work_loop' put a chapter mark on the wrong buffer */
in->s.new_chap = 0;
}
else
{
pv->pic_in.i_type = X264_TYPE_AUTO;
}
/* XXX this is temporary debugging code to check that the upstream
* modules (render & sync) have generated a continuous, self-consistent
* frame stream with the current frame's start time equal to the
* previous frame's stop time.
*/
if( pv->last_stop != in->s.start )
{
hb_log("encx264 input continuity err: last stop %"PRId64" start %"PRId64,
pv->last_stop, in->s.start);
}
pv->last_stop = in->s.stop;
// Remember info about this frame that we need to pass across
// the x264_encoder_encode call (since it reorders frames).
save_frame_info( pv, in );
/* Feed the input PTS to x264 so it can figure out proper output PTS */
pv->pic_in.i_pts = in->s.start;
x264_picture_t pic_out;
int i_nal;
x264_nal_t *nal;
x264_encoder_encode( pv->x264, &nal, &i_nal, &pv->pic_in, &pic_out );
if ( i_nal > 0 )
{
return nal_encode( w, &pic_out, i_nal, nal );
}
return NULL;
}
int encx264Work( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t *pv = w->private_data;
hb_buffer_t *in = *buf_in;
*buf_out = NULL;
if( in->size <= 0 )
{
// EOF on input. Flush any frames still in the decoder then
// send the eof downstream to tell the muxer we're done.
x264_picture_t pic_out;
int i_nal;
x264_nal_t *nal;
hb_buffer_t *last_buf = NULL;
while ( x264_encoder_delayed_frames( pv->x264 ) )
{
x264_encoder_encode( pv->x264, &nal, &i_nal, NULL, &pic_out );
if ( i_nal == 0 )
continue;
if ( i_nal < 0 )
break;
hb_buffer_t *buf = nal_encode( w, &pic_out, i_nal, nal );
if ( buf )
{
++pv->frames_out;
if ( last_buf == NULL )
*buf_out = buf;
else
last_buf->next = buf;
last_buf = buf;
}
}
// Flushed everything - add the eof to the end of the chain.
if ( last_buf == NULL )
*buf_out = in;
else
last_buf->next = in;
*buf_in = NULL;
return HB_WORK_DONE;
}
// Not EOF - encode the packet & wrap it in a NAL
++pv->frames_in;
++pv->frames_out;
*buf_out = x264_encode( w, in );
return HB_WORK_OK;
}
int hb_apply_h264_profile(x264_param_t *param, const char *h264_profile,
int verbose)
{
if (h264_profile != NULL &&
strcasecmp(h264_profile, hb_h264_profile_names[0]) != 0)
{
/*
* baseline profile doesn't support interlacing
*/
if ((param->b_interlaced ||
param->b_fake_interlaced) &&
!strcasecmp(h264_profile, "baseline"))
{
if (verbose)
{
hb_log("hb_apply_h264_profile [warning]: baseline profile doesn't support interlacing, disabling");
}
param->b_interlaced = param->b_fake_interlaced = 0;
}
/*
* lossless requires High 4:4:4 Predictive profile
*/
if (param->rc.f_rf_constant < 1.0 &&
param->rc.i_rc_method == X264_RC_CRF &&
strcasecmp(h264_profile, "high444") != 0)
{
if (verbose)
{
hb_log("hb_apply_h264_profile [warning]: lossless requires high444 profile, disabling");
}
param->rc.f_rf_constant = 1.0;
}
if (!strcasecmp(h264_profile, "high10") ||
!strcasecmp(h264_profile, "high422"))
{
// arbitrary profile names may be specified via the CLI
// map unsupported high10 and high422 profiles to high
return x264_param_apply_profile(param, "high");
}
return x264_param_apply_profile(param, h264_profile);
}
else if (!strcasecmp(h264_profile, hb_h264_profile_names[0]))
{
// "auto", do nothing
return 0;
}
else
{
// error (profile not a string), abort
hb_error("hb_apply_h264_profile: no profile specified");
return -1;
}
}
int hb_check_h264_level(const char *h264_level, int width, int height,
int fps_num, int fps_den, int interlaced,
int fake_interlaced)
{
x264_param_t param;
x264_param_default(¶m);
param.i_width = width;
param.i_height = height;
param.i_fps_num = fps_num;
param.i_fps_den = fps_den;
param.b_interlaced = !!interlaced;
param.b_fake_interlaced = !!fake_interlaced;
return (hb_apply_h264_level(¶m, h264_level, NULL, 0) != 0);
}
int hb_apply_h264_level(x264_param_t *param, const char *h264_level,
const char *h264_profile, int verbose)
{
float f_framerate;
const x264_level_t *x264_level = NULL;
int i, i_mb_size, i_mb_rate, i_mb_width, i_mb_height, max_mb_side, ret;
/*
* find the x264_level_t corresponding to the requested level
*/
if (h264_level != NULL &&
strcasecmp(h264_level, hb_h264_level_names[0]) != 0)
{
for (i = 0; hb_h264_level_values[i]; i++)
{
if (!strcmp(hb_h264_level_names[i], h264_level))
{
int val = hb_h264_level_values[i];
for (i = 0; x264_levels[i].level_idc; i++)
{
if (x264_levels[i].level_idc == val)
{
x264_level = &x264_levels[i];
break;
}
}
break;
}
}
if (x264_level == NULL)
{
// error (invalid or unsupported level), abort
hb_error("hb_apply_h264_level: invalid level %s", h264_level);
return -1;
}
}
else if(!strcasecmp(h264_level, hb_h264_level_names[0]))
{
// "auto", do nothing
return 0;
}
else
{
// error (level not a string), abort
hb_error("hb_apply_h264_level: no level specified");
return -1;
}
/*
* the H.264 profile determines VBV constraints
*/
enum
{
// Main or Baseline (equivalent)
HB_ENCX264_PROFILE_MAIN,
// High (no 4:2:2 or 10-bit support, so anything lossy is equivalent)
HB_ENCX264_PROFILE_HIGH,
// Lossless (4:2:0 8-bit for now)
HB_ENCX264_PROFILE_HIGH444,
} hb_encx264_profile;
/*
* H.264 profile
*
* TODO: we need to guess the profile like x264_sps_init does, otherwise
* we'll get an error when setting a Main-incompatible VBV and
* x264_sps_init() guesses Main profile. x264_sps_init() may eventually take
* VBV into account when guessing profile, at which point this code can be
* re-enabled.
*/
#if 0
if (h264_profile != NULL && *h264_profile)
{
// if the user explicitly specified a profile, don't guess it
if (!strcasecmp(h264_profile, "high444"))
{
hb_encx264_profile = HB_ENCX264_PROFILE_HIGH444;
}
else if (!strcasecmp(h264_profile, "main") ||
!strcasecmp(h264_profile, "baseline"))
{
hb_encx264_profile = HB_ENCX264_PROFILE_MAIN;
}
else
{
hb_encx264_profile = HB_ENCX264_PROFILE_HIGH;
}
}
else
#endif
{
// guess the H.264 profile if the user didn't request one
if (param->rc.i_rc_method == X264_RC_CRF &&
param->rc.f_rf_constant < 1.0)
{
hb_encx264_profile = HB_ENCX264_PROFILE_HIGH444;
}
else if (param->analyse.b_transform_8x8 ||
param->i_cqm_preset != X264_CQM_FLAT)
{
hb_encx264_profile = HB_ENCX264_PROFILE_HIGH;
}
else
{
hb_encx264_profile = HB_ENCX264_PROFILE_MAIN;
}
}
/*
* we need at least width and height in order to apply a level correctly
*/
if (param->i_width <= 0 || param->i_height <= 0)
{
// error (invalid width or height), abort
hb_error("hb_apply_h264_level: invalid resolution (width: %d, height: %d)",
param->i_width, param->i_height);
return -1;
}
/*
* a return value of 1 means there were warnings
*/
ret = 0;
/*
* some levels do not support interlaced encoding
*/
if (x264_level->frame_only && (param->b_interlaced ||
param->b_fake_interlaced))
{
if (verbose)
{
hb_log("hb_apply_h264_level [warning]: interlaced flag not supported for level %s, disabling",
h264_level);
}
ret = 1;
param->b_interlaced = param->b_fake_interlaced = 0;
}
/*
* frame dimensions and rate (in macroblocks)
*/
i_mb_width = (param->i_width + 15) / 16;
i_mb_height = (param->i_height + 15) / 16;
if (param->b_interlaced || param->b_fake_interlaced)
{
// interlaced: encoded height must divide cleanly by 32
i_mb_height = (i_mb_height + 1) & ~1;
}
i_mb_size = i_mb_width * i_mb_height;
if (param->i_fps_den <= 0 || param->i_fps_num <= 0)
{
i_mb_rate = 0;
f_framerate = 0.0;
}
else
{
i_mb_rate = (int64_t)i_mb_size * param->i_fps_num / param->i_fps_den;
f_framerate = (float)param->i_fps_num / param->i_fps_den;
}
/*
* sanitize ref/frameref
*/
if (param->i_keyint_max != 1)
{
int i_max_dec_frame_buffering =
MAX(MIN(x264_level->dpb / i_mb_size, 16), 1);
param->i_frame_reference =
MIN(i_max_dec_frame_buffering, param->i_frame_reference);
/*
* some level and resolution combos may require as little as 1 ref;
* bframes and b-pyramid are not compatible with this scenario
*/
if (i_max_dec_frame_buffering < 2)
{
param->i_bframe = 0;
}
else if (i_max_dec_frame_buffering < 4)
{
param->i_bframe_pyramid = X264_B_PYRAMID_NONE;
}
}
/*
* set and/or sanitize the VBV (if not lossless)
*/
if (hb_encx264_profile != HB_ENCX264_PROFILE_HIGH444)
{
// High profile allows for higher VBV bufsize/maxrate
int cbp_factor = hb_encx264_profile == HB_ENCX264_PROFILE_HIGH ? 5 : 4;
if (!param->rc.i_vbv_max_bitrate)
{
param->rc.i_vbv_max_bitrate = (x264_level->bitrate * cbp_factor) / 4;
}
else
{
param->rc.i_vbv_max_bitrate =
MIN(param->rc.i_vbv_max_bitrate,
(x264_level->bitrate * cbp_factor) / 4);
}
if (!param->rc.i_vbv_buffer_size)
{
param->rc.i_vbv_buffer_size = (x264_level->cpb * cbp_factor) / 4;
}
else
{
param->rc.i_vbv_buffer_size =
MIN(param->rc.i_vbv_buffer_size,
(x264_level->cpb * cbp_factor) / 4);
}
}
/*
* sanitize mvrange/mv-range
*/
param->analyse.i_mv_range =
MIN(param->analyse.i_mv_range,
x264_level->mv_range >> !!param->b_interlaced);
/*
* TODO: check the rest of the limits
*/
/*
* things we can do nothing about (too late to change resolution or fps),
* print warnings if we're not being quiet
*/
if (x264_level->frame_size < i_mb_size)
{
if (verbose)
{
hb_log("hb_apply_h264_level [warning]: frame size (%dx%d, %d macroblocks) too high for level %s (max. %d macroblocks)",
i_mb_width * 16, i_mb_height * 16, i_mb_size, h264_level,
x264_level->frame_size);
}
ret = 1;
}
else if (x264_level->mbps < i_mb_rate)
{
if (verbose)
{
hb_log("hb_apply_h264_level [warning]: framerate (%.3f) too high for level %s at %dx%d (max. %.3f)",
f_framerate, h264_level, param->i_width, param->i_height,
(float)x264_level->mbps / i_mb_size);
}
ret = 1;
}
/*
* width or height squared may not exceed 8 * frame_size (in macroblocks)
* thus neither dimension may exceed sqrt(8 * frame_size)
*/
max_mb_side = sqrt(x264_level->frame_size * 8);
if (i_mb_width > max_mb_side)
{
if (verbose)
{
hb_log("hb_apply_h264_level [warning]: frame too wide (%d) for level %s (max. %d)",
param->i_width, h264_level, max_mb_side * 16);
}
ret = 1;
}
if (i_mb_height > max_mb_side)
{
if (verbose)
{
hb_log("hb_apply_h264_level [warning]: frame too tall (%d) for level %s (max. %d)",
param->i_height, h264_level, max_mb_side * 16);
}
ret = 1;
}
/*
* level successfully applied, yay!
*/
param->i_level_idc = x264_level->level_idc;
return ret;
}
char * hb_x264_param_unparse(const char *x264_preset, const char *x264_tune,
const char *x264_encopts, const char *h264_profile,
const char *h264_level, int width, int height)
{
int i;
char buf[32];
char *unparsed_opts;
hb_dict_t *x264_opts;
hb_dict_entry_t *entry;
x264_param_t defaults, param;
/*
* get the global x264 defaults (what we compare against)
*/
x264_param_default(&defaults);
/*
* apply the defaults, preset and tune
*/
if (x264_param_default_preset(¶m, x264_preset, x264_tune) < 0)
{
/*
* Note: GUIs should be able to always specifiy valid preset/tunes, so
* this code will hopefully never be reached
*/
return strdup("hb_x264_param_unparse: invalid x264 preset/tune");
}
/*
* place additional x264 options in a dictionary
*/
entry = NULL;
x264_opts = hb_encopts_to_dict(x264_encopts, HB_VCODEC_X264);
/*
* some libx264 options are set via dedicated widgets in the video tab or
* hardcoded in libhb, and have no effect when present in the advanced x264
* options string.
*
* clear them from x264_opts so as to not apply then during unparse.
*/
hb_dict_unset(&x264_opts, "qp");
hb_dict_unset(&x264_opts, "qp_constant");
hb_dict_unset(&x264_opts, "crf");
hb_dict_unset(&x264_opts, "bitrate");
hb_dict_unset(&x264_opts, "fps");
hb_dict_unset(&x264_opts, "force-cfr");
hb_dict_unset(&x264_opts, "sar");
hb_dict_unset(&x264_opts, "annexb");
/*
* apply the additional x264 options
*/
while ((entry = hb_dict_next(x264_opts, entry)) != NULL)
{
// let's not pollute GUI logs with x264_param_parse return codes
x264_param_parse(¶m, entry->key, entry->value);
}
/*
* apply the x264 profile, if specified
*/
if (h264_profile != NULL && *h264_profile)
{
// be quiet so at to not pollute GUI logs
hb_apply_h264_profile(¶m, h264_profile, 0);
}
/*
* apply the h264 level, if specified
*/
if (h264_level != NULL && *h264_level)
{
// set width/height to avoid issues in hb_apply_h264_level
param.i_width = width;
param.i_height = height;
// be quiet so at to not pollute GUI logs
hb_apply_h264_level(¶m, h264_level, h264_profile, 0);
}
/*
* if x264_encopts is NULL, x264_opts wasn't initialized
*/
if (x264_opts == NULL && (x264_opts = hb_dict_init(20)) == NULL)
{
return strdup("hb_x264_param_unparse: could not initialize hb_dict_t");
}
/*
* x264 lets you specify some options in multiple ways. For options that we
* do unparse, clear the forms that don't match how we unparse said option
* from the x264_opts dictionary.
*
* actual synonyms are already handled by hb_encopts_to_dict().
*
* "no-deblock" is a special case as it can't be unparsed to "deblock=0"
*
* also, don't bother with forms that aren't allowed by the x264 CLI, such
* as "no-bframes" - there are too many.
*/
hb_dict_unset(&x264_opts, "no-sliced-threads");
hb_dict_unset(&x264_opts, "no-scenecut");
hb_dict_unset(&x264_opts, "no-b-adapt");
hb_dict_unset(&x264_opts, "no-weightb");
hb_dict_unset(&x264_opts, "no-cabac");
hb_dict_unset(&x264_opts, "interlaced"); // we unparse to tff/bff
hb_dict_unset(&x264_opts, "no-interlaced");
hb_dict_unset(&x264_opts, "no-8x8dct");
hb_dict_unset(&x264_opts, "no-mixed-refs");
hb_dict_unset(&x264_opts, "no-fast-pskip");
hb_dict_unset(&x264_opts, "no-dct-decimate");
hb_dict_unset(&x264_opts, "no-psy");
hb_dict_unset(&x264_opts, "no-mbtree");
/*
* compare defaults to param and unparse to the x264_opts dictionary
*/
if (!param.b_sliced_threads != !defaults.b_sliced_threads)
{
// can be modified by: tune zerolatency
sprintf(buf, "%d", !!param.b_sliced_threads);
hb_dict_set(&x264_opts, "sliced-threads", buf);
}
else
{
hb_dict_unset(&x264_opts, "sliced-threads");
}
if (param.i_sync_lookahead != defaults.i_sync_lookahead)
{
// can be modified by: tune zerolatency
sprintf(buf, "%d", param.i_sync_lookahead);
hb_dict_set(&x264_opts, "sync-lookahead", buf);
}
else
{
hb_dict_unset(&x264_opts, "sync-lookahead");
}
if (param.i_level_idc != defaults.i_level_idc)
{
// can be modified by: level
for (i = 0; hb_h264_level_values[i]; i++)
if (param.i_level_idc == hb_h264_level_values[i])
hb_dict_set(&x264_opts, "level", hb_h264_level_names[i]);
}
else
{
hb_dict_unset(&x264_opts, "level");
}
if (param.i_frame_reference != defaults.i_frame_reference)
{
// can be modified by: presets, tunes, level
sprintf(buf, "%d", param.i_frame_reference);
hb_dict_set(&x264_opts, "ref", buf);
}
else
{
hb_dict_unset(&x264_opts, "ref");
}
if (param.i_scenecut_threshold != defaults.i_scenecut_threshold)
{
// can be modified by: preset ultrafast
sprintf(buf, "%d", param.i_scenecut_threshold);
hb_dict_set(&x264_opts, "scenecut", buf);
}
else
{
hb_dict_unset(&x264_opts, "scenecut");
}
if (param.i_bframe != defaults.i_bframe)
{
// can be modified by: presets, tunes, profile, level
sprintf(buf, "%d", param.i_bframe);
hb_dict_set(&x264_opts, "bframes", buf);
}
else
{
hb_dict_unset(&x264_opts, "bframes");
}
if (param.i_bframe > 0)
{
if (param.i_bframe_adaptive != defaults.i_bframe_adaptive)
{
// can be modified by: presets
sprintf(buf, "%d", param.i_bframe_adaptive);
hb_dict_set(&x264_opts, "b-adapt", buf);
}
else
{
hb_dict_unset(&x264_opts, "b-adapt");
}
if (param.i_bframe > 1 &&
param.i_bframe_pyramid != defaults.i_bframe_pyramid)
{
// can be modified by: level
if (param.i_bframe_pyramid < X264_B_PYRAMID_NONE)
param.i_bframe_pyramid = X264_B_PYRAMID_NONE;
if (param.i_bframe_pyramid > X264_B_PYRAMID_NORMAL)
param.i_bframe_pyramid = X264_B_PYRAMID_NORMAL;
for (i = 0; x264_b_pyramid_names[i] != NULL; i++)
if (param.i_bframe_pyramid == i)
hb_dict_set(&x264_opts, "b-pyramid",
x264_b_pyramid_names[i]);
}
else
{
hb_dict_unset(&x264_opts, "b-pyramid");
}
if (param.analyse.i_direct_mv_pred != defaults.analyse.i_direct_mv_pred)
{
// can be modified by: presets
if (param.analyse.i_direct_mv_pred < X264_DIRECT_PRED_NONE)
param.analyse.i_direct_mv_pred = X264_DIRECT_PRED_NONE;
if (param.analyse.i_direct_mv_pred > X264_DIRECT_PRED_AUTO)
param.analyse.i_direct_mv_pred = X264_DIRECT_PRED_AUTO;
for (i = 0; x264_direct_pred_names[i] != NULL; i++)
if (param.analyse.i_direct_mv_pred == i)
hb_dict_set(&x264_opts, "direct",
x264_direct_pred_names[i]);
}
else
{
hb_dict_unset(&x264_opts, "direct");
}
if (!param.analyse.b_weighted_bipred !=
!defaults.analyse.b_weighted_bipred)
{
// can be modified by: preset ultrafast, tune fastdecode
sprintf(buf, "%d", !!param.analyse.b_weighted_bipred);
hb_dict_set(&x264_opts, "weightb", buf);
}
else
{
hb_dict_unset(&x264_opts, "weightb");
}
}
else
{
// no bframes, these options have no effect
hb_dict_unset(&x264_opts, "b-adapt");
hb_dict_unset(&x264_opts, "b-pyramid");
hb_dict_unset(&x264_opts, "direct");
hb_dict_unset(&x264_opts, "weightb");
hb_dict_unset(&x264_opts, "b-bias");
hb_dict_unset(&x264_opts, "open-gop");
}
if (!param.b_deblocking_filter != !defaults.b_deblocking_filter)
{
// can be modified by: preset ultrafast, tune fastdecode
sprintf(buf, "%d", !param.b_deblocking_filter);
hb_dict_set(&x264_opts, "no-deblock", buf);
}
else
{
hb_dict_unset(&x264_opts, "no-deblock");
}
if (param.b_deblocking_filter &&
(param.i_deblocking_filter_alphac0 != defaults.i_deblocking_filter_alphac0 ||
param.i_deblocking_filter_beta != defaults.i_deblocking_filter_beta))
{
// can be modified by: tunes
sprintf(buf, "%d,%d", param.i_deblocking_filter_alphac0,
param.i_deblocking_filter_beta);
hb_dict_set(&x264_opts, "deblock", buf);
}
else
{
hb_dict_unset(&x264_opts, "deblock");
}
if (!param.b_cabac != !defaults.b_cabac)
{
// can be modified by: preset ultrafast, tune fastdecode, profile
sprintf(buf, "%d", !!param.b_cabac);
hb_dict_set(&x264_opts, "cabac", buf);
}
else
{
hb_dict_unset(&x264_opts, "cabac");
}
if (param.b_interlaced != defaults.b_interlaced)
{
if (param.b_tff)
{
hb_dict_set(&x264_opts, "tff", "1");
hb_dict_unset(&x264_opts, "bff");
}
else
{
hb_dict_set(&x264_opts, "bff", "1");
hb_dict_unset(&x264_opts, "tff");
}
hb_dict_unset(&x264_opts, "fake-interlaced");
}
else if (param.b_fake_interlaced != defaults.b_fake_interlaced)
{
hb_dict_set(&x264_opts, "fake-interlaced", "1");
hb_dict_unset(&x264_opts, "tff");
hb_dict_unset(&x264_opts, "bff");
}
else
{
hb_dict_unset(&x264_opts, "tff");
hb_dict_unset(&x264_opts, "bff");
hb_dict_unset(&x264_opts, "fake-interlaced");
}
if (param.i_cqm_preset == defaults.i_cqm_preset &&
param.psz_cqm_file == defaults.psz_cqm_file)
{
// can be reset to default by: profile
hb_dict_unset(&x264_opts, "cqm");
hb_dict_unset(&x264_opts, "cqm4");
hb_dict_unset(&x264_opts, "cqm8");
hb_dict_unset(&x264_opts, "cqm4i");
hb_dict_unset(&x264_opts, "cqm4p");
hb_dict_unset(&x264_opts, "cqm8i");
hb_dict_unset(&x264_opts, "cqm8p");
hb_dict_unset(&x264_opts, "cqm4iy");
hb_dict_unset(&x264_opts, "cqm4ic");
hb_dict_unset(&x264_opts, "cqm4py");
hb_dict_unset(&x264_opts, "cqm4pc");
}
/*
* Note: param.analyse.intra can only be modified directly or by using
* x264 --preset ultrafast, but not via the "analyse" option
*/
if (param.analyse.inter != defaults.analyse.inter)
{
// can be modified by: presets, tune touhou
if (!param.analyse.inter)
{
hb_dict_set(&x264_opts, "analyse", "none");
}
else if ((param.analyse.inter & X264_ANALYSE_I4x4) &&
(param.analyse.inter & X264_ANALYSE_I8x8) &&
(param.analyse.inter & X264_ANALYSE_PSUB16x16) &&
(param.analyse.inter & X264_ANALYSE_PSUB8x8) &&
(param.analyse.inter & X264_ANALYSE_BSUB16x16))
{
hb_dict_set(&x264_opts, "analyse", "all");
}
else
{
sprintf(buf, "%s", "");
if (param.analyse.inter & X264_ANALYSE_I4x4)
{
strcat(buf, "i4x4");
}
if (param.analyse.inter & X264_ANALYSE_I8x8)
{
if (*buf)
strcat(buf, ",");
strcat(buf, "i8x8");
}
if (param.analyse.inter & X264_ANALYSE_PSUB16x16)
{
if (*buf)
strcat(buf, ",");
strcat(buf, "p8x8");
}
if (param.analyse.inter & X264_ANALYSE_PSUB8x8)
{
if (*buf)
strcat(buf, ",");
strcat(buf, "p4x4");
}
if (param.analyse.inter & X264_ANALYSE_BSUB16x16)
{
if (*buf)
strcat(buf, ",");
strcat(buf, "b8x8");
}
hb_dict_set(&x264_opts, "analyse", buf);
}
}
else
{
hb_dict_unset(&x264_opts, "analyse");
}
if (!param.analyse.b_transform_8x8 != !defaults.analyse.b_transform_8x8)
{
// can be modified by: preset ultrafast, profile
sprintf(buf, "%d", !!param.analyse.b_transform_8x8);
hb_dict_set(&x264_opts, "8x8dct", buf);
}
else
{
hb_dict_unset(&x264_opts, "8x8dct");
}
if (param.analyse.i_weighted_pred != defaults.analyse.i_weighted_pred)
{
// can be modified by: presets, tune fastdecode, profile
sprintf(buf, "%d", param.analyse.i_weighted_pred);
hb_dict_set(&x264_opts, "weightp", buf);
}
else
{
hb_dict_unset(&x264_opts, "weightp");
}
if (param.analyse.i_me_method != defaults.analyse.i_me_method)
{
// can be modified by: presets
if (param.analyse.i_me_method < X264_ME_DIA)
param.analyse.i_me_method = X264_ME_DIA;
if (param.analyse.i_me_method > X264_ME_TESA)
param.analyse.i_me_method = X264_ME_TESA;
for (i = 0; x264_motion_est_names[i] != NULL; i++)
if (param.analyse.i_me_method == i)
hb_dict_set(&x264_opts, "me", x264_motion_est_names[i]);
}
else
{
hb_dict_unset(&x264_opts, "me");
}
if (param.analyse.i_me_range != defaults.analyse.i_me_range)
{
// can be modified by: presets
sprintf(buf, "%d", param.analyse.i_me_range);
hb_dict_set(&x264_opts, "merange", buf);
}
else
{
hb_dict_unset(&x264_opts, "merange");
}
if (param.analyse.i_mv_range != defaults.analyse.i_mv_range)
{
// can be modified by: level
sprintf(buf, "%d", param.analyse.i_mv_range);
hb_dict_set(&x264_opts, "mvrange", buf);
}
else
{
hb_dict_unset(&x264_opts, "mvrange");
}
if (param.analyse.i_subpel_refine > 9 && (param.rc.i_aq_mode == 0 ||
param.analyse.i_trellis < 2))
{
// subme 10 and higher require AQ and trellis 2
param.analyse.i_subpel_refine = 9;
}
if (param.analyse.i_subpel_refine != defaults.analyse.i_subpel_refine)
{
// can be modified by: presets
sprintf(buf, "%d", param.analyse.i_subpel_refine);
hb_dict_set(&x264_opts, "subme", buf);
}
else
{
hb_dict_unset(&x264_opts, "subme");
}
if (!param.analyse.b_mixed_references !=
!defaults.analyse.b_mixed_references)
{
// can be modified by: presets
sprintf(buf, "%d", !!param.analyse.b_mixed_references);
hb_dict_set(&x264_opts, "mixed-refs", buf);
}
else
{
hb_dict_unset(&x264_opts, "mixed-refs");
}
if (param.analyse.i_trellis != defaults.analyse.i_trellis)
{
// can be modified by: presets
sprintf(buf, "%d", param.analyse.i_trellis);
hb_dict_set(&x264_opts, "trellis", buf);
}
else
{
hb_dict_unset(&x264_opts, "trellis");
}
if (!param.analyse.b_fast_pskip != !defaults.analyse.b_fast_pskip)
{
// can be modified by: preset placebo
sprintf(buf, "%d", !!param.analyse.b_fast_pskip);
hb_dict_set(&x264_opts, "fast-pskip", buf);
}
else
{
hb_dict_unset(&x264_opts, "fast-pskip");
}
if (!param.analyse.b_dct_decimate != !defaults.analyse.b_dct_decimate)
{
// can be modified by: tune grain
sprintf(buf, "%d", !!param.analyse.b_dct_decimate);
hb_dict_set(&x264_opts, "dct-decimate", buf);
}
else
{
hb_dict_unset(&x264_opts, "dct-decimate");
}
if (!param.analyse.b_psy != !defaults.analyse.b_psy)
{
// can be modified by: tunes
sprintf(buf, "%d", !!param.analyse.b_psy);
hb_dict_set(&x264_opts, "psy", buf);
}
else
{
hb_dict_unset(&x264_opts, "psy");
}
if (param.analyse.b_psy &&
(param.analyse.f_psy_rd != defaults.analyse.f_psy_rd ||
param.analyse.f_psy_trellis != defaults.analyse.f_psy_trellis))
{
// can be modified by: tunes
sprintf(buf, "%.2f,%.2f", param.analyse.f_psy_rd,
param.analyse.f_psy_trellis);
hb_dict_set(&x264_opts, "psy-rd", buf);
}
else
{
hb_dict_unset(&x264_opts, "psy-rd");
}
/*
* Note: while deadzone is incompatible with trellis, it still has a slight
* effect on the output even when trellis is on, so always unparse it.
*/
if (param.analyse.i_luma_deadzone[0] != defaults.analyse.i_luma_deadzone[0])
{
// can be modified by: tune grain
sprintf(buf, "%d", param.analyse.i_luma_deadzone[0]);
hb_dict_set(&x264_opts, "deadzone-inter", buf);
}
else
{
hb_dict_unset(&x264_opts, "deadzone-inter");
}
if (param.analyse.i_luma_deadzone[1] != defaults.analyse.i_luma_deadzone[1])
{
// can be modified by: tune grain
sprintf(buf, "%d", param.analyse.i_luma_deadzone[1]);
hb_dict_set(&x264_opts, "deadzone-intra", buf);
}
else
{
hb_dict_unset(&x264_opts, "deadzone-intra");
}
if (param.rc.i_vbv_buffer_size != defaults.rc.i_vbv_buffer_size)
{
// can be modified by: level
sprintf(buf, "%d", param.rc.i_vbv_buffer_size);
hb_dict_set(&x264_opts, "vbv-bufsize", buf);
if (param.rc.i_vbv_max_bitrate != defaults.rc.i_vbv_max_bitrate)
{
// can be modified by: level
sprintf(buf, "%d", param.rc.i_vbv_max_bitrate);
hb_dict_set(&x264_opts, "vbv-maxrate", buf);
}
else
{
hb_dict_unset(&x264_opts, "vbv-maxrate");
}
}
else
{
hb_dict_unset(&x264_opts, "vbv-bufsize");
hb_dict_unset(&x264_opts, "vbv-maxrate");
}
if (param.rc.f_ip_factor != defaults.rc.f_ip_factor)
{
// can be modified by: tune grain
sprintf(buf, "%.2f", param.rc.f_ip_factor);
hb_dict_set(&x264_opts, "ipratio", buf);
}
else
{
hb_dict_unset(&x264_opts, "ipratio");
}
if (param.i_bframe > 0 && !param.rc.b_mb_tree &&
param.rc.f_pb_factor != defaults.rc.f_pb_factor)
{
// can be modified by: tune grain
sprintf(buf, "%.2f", param.rc.f_pb_factor);
hb_dict_set(&x264_opts, "pbratio", buf);
}
else
{
// pbratio requires bframes and is incomaptible with mbtree
hb_dict_unset(&x264_opts, "pbratio");
}
if (param.rc.f_qcompress != defaults.rc.f_qcompress)
{
// can be modified by: tune grain
sprintf(buf, "%.2f", param.rc.f_qcompress);
hb_dict_set(&x264_opts, "qcomp", buf);
}
else
{
hb_dict_unset(&x264_opts, "qcomp");
}
if (param.rc.i_aq_mode != defaults.rc.i_aq_mode)
{
// can be modified by: preset ultrafast, tune psnr
sprintf(buf, "%d", param.rc.i_aq_mode);
hb_dict_set(&x264_opts, "aq-mode", buf);
}
else
{
hb_dict_unset(&x264_opts, "aq-mode");
}
if (param.rc.i_aq_mode > 0 &&
param.rc.f_aq_strength != defaults.rc.f_aq_strength)
{
// can be modified by: tunes
sprintf(buf, "%.2f", param.rc.f_aq_strength);
hb_dict_set(&x264_opts, "aq-strength", buf);
}
else
{
hb_dict_unset(&x264_opts, "aq-strength");
}
if (!param.rc.b_mb_tree != !defaults.rc.b_mb_tree)
{
// can be modified by: presets, tune zerolatency
sprintf(buf, "%d", !!param.rc.b_mb_tree);
hb_dict_set(&x264_opts, "mbtree", buf);
}
else
{
hb_dict_unset(&x264_opts, "mbtree");
}
if (param.rc.i_lookahead != defaults.rc.i_lookahead)
{
// can be modified by: presets, tune zerolatency
sprintf(buf, "%d", param.rc.i_lookahead);
hb_dict_set(&x264_opts, "rc-lookahead", buf);
}
else
{
hb_dict_unset(&x264_opts, "rc-lookahead");
}
if (!param.b_vfr_input != !defaults.b_vfr_input)
{
// can be modified by: tune zerolatency
sprintf(buf, "%d", !param.b_vfr_input);
hb_dict_set(&x264_opts, "force-cfr", buf);
}
else
{
hb_dict_unset(&x264_opts, "force-cfr");
}
/* convert the x264_opts dictionary to an encopts string */
unparsed_opts = hb_dict_to_encopts(x264_opts);
hb_dict_free(&x264_opts);
/* we're done */
return unparsed_opts;
}
const char * const * hb_x264_presets()
{
return x264_preset_names;
}
const char * const * hb_x264_tunes()
{
return x264_tune_names;
}
const char * const * hb_h264_profiles()
{
return hb_h264_profile_names;
}
const char * const * hb_h264_levels()
{
return hb_h264_level_names;
}
const char * hb_x264_encopt_name(const char *name)
{
int i;
for (i = 0; hb_x264_encopt_synonyms[i][0] != NULL; i++)
if (!strcmp(name, hb_x264_encopt_synonyms[i][1]))
return hb_x264_encopt_synonyms[i][0];
return name;
}
HandBrake-0.10.2/libhb/qsv_filter.h 0000664 0001752 0001752 00000003377 12205472744 017467 0 ustar handbrake handbrake /* ********************************************************************* *\
Copyright (C) 2013 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\* ********************************************************************* */
#ifndef QSV_FILTER_H
#define QSV_FILTER_H
hb_buffer_t *link_buf_list( hb_filter_private_t *pv );
void qsv_filter_close( av_qsv_context* qsv, AV_QSV_STAGE_TYPE vpp_type );
#endif // QSV_FILTER_H
HandBrake-0.10.2/libhb/qsv_filter.c 0000664 0001752 0001752 00000064643 12317653727 017473 0 ustar handbrake handbrake /* ********************************************************************* *\
Copyright (C) 2013 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\* ********************************************************************* */
#ifdef USE_QSV
#include "hb.h"
#include "hbffmpeg.h"
#include "libavcodec/qsv.h"
#include "qsv_filter.h"
struct hb_filter_private_s
{
hb_job_t *job;
hb_list_t *list;
int width_in;
int height_in;
int pix_fmt;
int pix_fmt_out;
int width_out;
int height_out;
int crop[4];
int deinterlace;
int is_frc_used;
// set during init, used to configure input surfaces' "area of interest"
mfxU16 CropX;
mfxU16 CropY;
mfxU16 CropH;
mfxU16 CropW;
av_qsv_space *vpp_space;
// FRC param(s)
mfxExtVPPFrameRateConversion frc_config;
};
static int hb_qsv_filter_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_qsv_filter_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static int hb_qsv_filter_info( hb_filter_object_t * filter,
hb_filter_info_t * info );
static void hb_qsv_filter_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_qsv =
{
.id = HB_FILTER_QSV,
.enforce_order = 1,
.name = "Quick Sync Video VPP",
.settings = NULL,
.init = hb_qsv_filter_init,
.work = hb_qsv_filter_work,
.close = hb_qsv_filter_close,
.info = hb_qsv_filter_info,
};
static int filter_init( av_qsv_context* qsv, hb_filter_private_t * pv ){
mfxStatus sts;
int i=0;
if(!qsv) return 3;
if(!qsv->vpp_space){
qsv->vpp_space = av_qsv_list_init(HAVE_THREADS);
}
if(!pv->vpp_space){
for(i=0; ivpp_space);i++){
av_qsv_space *qsv_vpp = av_qsv_list_item( qsv->vpp_space, i );
if(qsv_vpp->type == AV_QSV_VPP_DEFAULT){
pv->vpp_space = qsv_vpp;
break;
}
}
}
if(!pv->vpp_space){
pv->vpp_space = calloc( 1, sizeof( av_qsv_space ));
pv->vpp_space->type = AV_QSV_VPP_DEFAULT;
av_qsv_list_add( qsv->vpp_space, pv->vpp_space );
}
else
if(pv->vpp_space->is_init_done ) return 1;
if(!qsv->dec_space || !qsv->dec_space->is_init_done) return 2;
// we need to know final output settings before we can properly configure
if (!pv->job->qsv.enc_info.is_init_done)
{
return 2;
}
av_qsv_add_context_usage(qsv,HAVE_THREADS);
// see params needed like at mediasdk-man.pdf:"Appendix A: Configuration Parameter Constraints"
// for now - most will take from the decode
{
av_qsv_space *qsv_vpp = pv->vpp_space;
AV_QSV_ZERO_MEMORY(qsv_vpp->m_mfxVideoParam);
if (pv->deinterlace)
{
/*
* Input may be progressive, interlaced or even mixed, so init with
* MFX_PICSTRUCT_UNKNOWN and use per-frame field order information
* (mfxFrameSurface1.Info.PicStruct)
*/
qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct = MFX_PICSTRUCT_UNKNOWN;
qsv_vpp->m_mfxVideoParam.vpp.Out.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
}
else
{
/* Same PicStruct in/out: no filtering */
qsv_vpp->m_mfxVideoParam.vpp.In.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
qsv_vpp->m_mfxVideoParam.vpp.Out.PicStruct = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.PicStruct;
}
// FrameRate is important for VPP to start with
if( qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN == 0 &&
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD == 0 ){
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtN = pv->job->title->rate;
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FrameRateExtD = pv->job->title->rate_base;
}
/*
* In theory, input width/height and decode CropW/CropH should be the
* same; however, due to some versions of Libav not applying the H.264
* "crop rect" properly, there can be a mismatch.
*
* Since we want the same bahevior regardless of whether we're using
* software or hardware-accelerated decoding, prefer the Libav values.
*
* Note that since CropW/CropH may be higher than the decode values, we
* need to adjust CropX/CropY to make sure we don't exceed the input's
* Width/Height boundaries.
*/
pv->CropW = pv-> width_in;
pv->CropH = pv->height_in;
pv->CropX = FFMIN(qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropX,
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.Width - pv->CropW);
pv->CropY = FFMIN(qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.CropY,
qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.Height - pv->CropH);
/* Then, apply additional cropping requested by the user, if any */
pv->CropX += pv->crop[2];
pv->CropY += pv->crop[0];
pv->CropW -= pv->crop[2] + pv->crop[3];
pv->CropH -= pv->crop[0] + pv->crop[1];
qsv_vpp->m_mfxVideoParam.vpp.In.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
qsv_vpp->m_mfxVideoParam.vpp.In.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtN = pv->job->vrate;
qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtD = pv->job->vrate_base;
qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
qsv_vpp->m_mfxVideoParam.vpp.In.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
qsv_vpp->m_mfxVideoParam.vpp.In.Width = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.Width;
qsv_vpp->m_mfxVideoParam.vpp.In.Height = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.Height;
qsv_vpp->m_mfxVideoParam.vpp.In.CropX = pv->CropX;
qsv_vpp->m_mfxVideoParam.vpp.In.CropY = pv->CropY;
qsv_vpp->m_mfxVideoParam.vpp.In.CropW = pv->CropW;
qsv_vpp->m_mfxVideoParam.vpp.In.CropH = pv->CropH;
qsv_vpp->m_mfxVideoParam.vpp.Out.FourCC = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.FourCC;
qsv_vpp->m_mfxVideoParam.vpp.Out.ChromaFormat = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.ChromaFormat;
qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN = pv->job->vrate;
qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD = pv->job->vrate_base;
qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioW = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioW;
qsv_vpp->m_mfxVideoParam.vpp.Out.AspectRatioH = qsv->dec_space->m_mfxVideoParam.mfx.FrameInfo.AspectRatioH;
qsv_vpp->m_mfxVideoParam.vpp.Out.Width = pv->job->qsv.enc_info.align_width;
qsv_vpp->m_mfxVideoParam.vpp.Out.Height = pv->job->qsv.enc_info.align_height;
qsv_vpp->m_mfxVideoParam.vpp.Out.CropX = 0; // no letterboxing
qsv_vpp->m_mfxVideoParam.vpp.Out.CropY = 0; // no pillarboxing
qsv_vpp->m_mfxVideoParam.vpp.Out.CropW = pv-> width_out;
qsv_vpp->m_mfxVideoParam.vpp.Out.CropH = pv->height_out;
qsv_vpp->m_mfxVideoParam.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
qsv_vpp->m_mfxVideoParam.AsyncDepth = pv->job->qsv.async_depth;
memset(&qsv_vpp->request, 0, sizeof(mfxFrameAllocRequest)*2);
sts = MFXVideoVPP_QueryIOSurf(qsv->mfx_session, &qsv_vpp->m_mfxVideoParam, qsv_vpp->request );
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
int num_surfaces_in = qsv_vpp->request[0].NumFrameSuggested;
int num_surfaces_out = qsv_vpp->request[1].NumFrameSuggested;
av_qsv_config *config = qsv->qsv_config;
qsv_vpp->surface_num = FFMIN( num_surfaces_in + num_surfaces_out + qsv_vpp->m_mfxVideoParam.AsyncDepth + config ? config->additional_buffers/2 :0 , AV_QSV_SURFACE_NUM );
if(qsv_vpp->surface_num <= 0 )
qsv_vpp->surface_num = AV_QSV_SURFACE_NUM;
int i = 0;
for (i = 0; i < qsv_vpp->surface_num; i++){
qsv_vpp->p_surfaces[i] = av_mallocz( sizeof(mfxFrameSurface1) );
AV_QSV_CHECK_POINTER(qsv_vpp->p_surfaces[i], MFX_ERR_MEMORY_ALLOC);
memcpy(&(qsv_vpp->p_surfaces[i]->Info), &(qsv_vpp->m_mfxVideoParam.vpp.Out), sizeof(mfxFrameInfo));
}
qsv_vpp->sync_num = FFMIN( qsv_vpp->surface_num, AV_QSV_SYNC_NUM );
for (i = 0; i < qsv_vpp->sync_num; i++){
qsv_vpp->p_syncp[i] = av_mallocz(sizeof(av_qsv_sync));
AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i], MFX_ERR_MEMORY_ALLOC);
qsv_vpp->p_syncp[i]->p_sync = av_mallocz(sizeof(mfxSyncPoint));
AV_QSV_CHECK_POINTER(qsv_vpp->p_syncp[i]->p_sync, MFX_ERR_MEMORY_ALLOC);
}
/*
about available VPP filters, see "Table 4 Configurable VPP filters", mediasdk-man.pdf
Hints (optional feature) IDs:
MFX_EXTBUFF_VPP_DENOISE // Remove noise
// Value of 0-100 (inclusive) indicates
// the level of noise to remove.
MFX_EXTBUFF_VPP_DETAIL // Enhance picture details/edges:
// 0-100 value (inclusive) to indicate
// the level of details to be enhanced.
MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION // Convert input frame rate to match the output, based on frame interpolation:
// MFX_FRCALGM_PRESERVE_TIMESTAMP,
// MFX_FRCALGM_DISTRIBUTED_TIMESTAMP,
// MFX_FRCALGM_FRAME_INTERPOLATION
MFX_EXTBUFF_VPP_IMAGE_STABILIZATION // Perform image stabilization
// Stabilization modes:
// MFX_IMAGESTAB_MODE_UPSCALE
// MFX_IMAGESTAB_MODE_BOXING
MFX_EXTBUFF_VPP_PICSTRUCT_DETECTION // Perform detection of picture structure:
// Detected picture structure - top field first, bottom field first, progressive or unknown
// if video processor cannot detect picture structure.
MFX_EXTBUFF_VPP_PROCAMP // Adjust the brightness, contrast, saturation, and hue settings
// Initialize extended buffer for frame processing
// - Process amplifier (ProcAmp) used to control brightness
// - mfxExtVPPDoUse: Define the processing algorithm to be used
// - mfxExtVPPProcAmp: ProcAmp configuration
// - mfxExtBuffer: Add extended buffers to VPP parameter configuration
mfxExtVPPDoUse extDoUse;
mfxU32 tabDoUseAlg[1];
extDoUse.Header.BufferId = MFX_EXTBUFF_VPP_DOUSE;
extDoUse.Header.BufferSz = sizeof(mfxExtVPPDoUse);
extDoUse.NumAlg = 1;
extDoUse.AlgList = tabDoUseAlg;
tabDoUseAlg[0] = MFX_EXTBUFF_VPP_PROCAMP;
mfxExtVPPProcAmp procampConfig;
procampConfig.Header.BufferId = MFX_EXTBUFF_VPP_PROCAMP;
procampConfig.Header.BufferSz = sizeof(mfxExtVPPProcAmp);
procampConfig.Hue = 0.0f; // Default
procampConfig.Saturation = 1.0f; // Default
procampConfig.Contrast = 1.0; // Default
procampConfig.Brightness = 40.0; // Adjust brightness
mfxExtBuffer* ExtBuffer[2];
ExtBuffer[0] = (mfxExtBuffer*)&extDoUse;
ExtBuffer[1] = (mfxExtBuffer*)&procampConfig;
VPPParams.NumExtParam = 2;
VPPParams.ExtParam = (mfxExtBuffer**)&ExtBuffer[0];
*/
memset(&qsv_vpp->ext_opaque_alloc, 0, sizeof(qsv_vpp->ext_opaque_alloc));
if( (qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtN / qsv_vpp->m_mfxVideoParam.vpp.In.FrameRateExtD ) !=
(qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN / qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD) )
{
pv->is_frc_used = 1;
}
qsv_vpp->m_mfxVideoParam.NumExtParam = qsv_vpp->p_ext_param_num = 1 + pv->is_frc_used;
qsv_vpp->p_ext_params = av_mallocz(sizeof(mfxExtBuffer *)*qsv_vpp->p_ext_param_num);
AV_QSV_CHECK_POINTER(qsv_vpp->p_ext_params, MFX_ERR_MEMORY_ALLOC);
qsv_vpp->m_mfxVideoParam.ExtParam = qsv_vpp->p_ext_params;
qsv_vpp->ext_opaque_alloc.In.Surfaces = qsv->dec_space->p_surfaces;
qsv_vpp->ext_opaque_alloc.In.NumSurface = qsv->dec_space->surface_num;
qsv_vpp->ext_opaque_alloc.In.Type = qsv->dec_space->request[0].Type;
qsv_vpp->ext_opaque_alloc.Out.Surfaces = qsv_vpp->p_surfaces;
qsv_vpp->ext_opaque_alloc.Out.NumSurface = qsv_vpp->surface_num;
qsv_vpp->ext_opaque_alloc.Out.Type = qsv->dec_space->request[0].Type;
qsv_vpp->ext_opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
qsv_vpp->ext_opaque_alloc.Header.BufferSz = sizeof(mfxExtOpaqueSurfaceAlloc);
qsv_vpp->p_ext_params[0] = (mfxExtBuffer*)&qsv_vpp->ext_opaque_alloc;
if(pv->is_frc_used)
{
pv->frc_config.Header.BufferId = MFX_EXTBUFF_VPP_FRAME_RATE_CONVERSION;
pv->frc_config.Header.BufferSz = sizeof(mfxExtVPPFrameRateConversion);
pv->frc_config.Algorithm = MFX_FRCALGM_PRESERVE_TIMESTAMP;
qsv_vpp->p_ext_params[1] = (mfxExtBuffer*)&pv->frc_config;
}
sts = MFXVideoVPP_Init(qsv->mfx_session, &qsv_vpp->m_mfxVideoParam);
AV_QSV_IGNORE_MFX_STS(sts, MFX_WRN_PARTIAL_ACCELERATION);
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
qsv_vpp->is_init_done = 1;
}
return 0;
}
static int hb_qsv_filter_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
pv->list = hb_list_init();
// list of init params provided at work.c:~700
pv->width_in = init->width;
pv->height_in = init->height;
pv->width_out = init->width;
pv->height_out = init->height;
memcpy( pv->crop, init->crop, sizeof( int[4] ) );
if (filter->settings != NULL)
{
sscanf(filter->settings, "%d:%d:%d:%d:%d:%d_dei:%d",
&pv->width_out, &pv->height_out,
&pv->crop[0], &pv->crop[1], &pv->crop[2], &pv->crop[3],
&pv->deinterlace);
}
pv->job = init->job;
// will be later as more params will be known
// filter_init(pv->job->qsv, pv);
// just passing
init->vrate = init->vrate;
init->vrate_base = init->vrate_base;
// framerate shaping not yet supported
init->cfr = 0;
init->pix_fmt = pv->pix_fmt;
init->width = pv->width_out;
init->height = pv->height_out;
memcpy( init->crop, pv->crop, sizeof( int[4] ) );
return 0;
}
static int hb_qsv_filter_info( hb_filter_object_t * filter,
hb_filter_info_t * info )
{
hb_filter_private_t *pv = filter->private_data;
if (pv == NULL)
return -1;
sprintf(info->human_readable_desc,
"source: %d * %d, crop (%d/%d/%d/%d): %d * %d, scale: %d * %d",
pv->width_in, pv->height_in,
pv->crop[0], pv->crop[1], pv->crop[2], pv->crop[3],
pv->width_in - pv->crop[2] - pv->crop[3],
pv->height_in - pv->crop[0] - pv->crop[1],
pv->width_out, pv->height_out);
if (pv->deinterlace)
{
sprintf(info->human_readable_desc + strlen(info->human_readable_desc),
", deinterlace");
}
return 0;
}
void qsv_filter_close( av_qsv_context* qsv, AV_QSV_STAGE_TYPE vpp_type ){
int i = 0;
av_qsv_space* vpp_space = 0;
if(qsv && qsv->is_context_active && qsv->vpp_space)
for(i=av_qsv_list_count( qsv->vpp_space);i>0;i--){
vpp_space = av_qsv_list_item( qsv->vpp_space, i-1 );
if( vpp_space->type == vpp_type && vpp_space->is_init_done){
hb_log( "qsv_filter[%s] done: max_surfaces: %u/%u , max_syncs: %u/%u", ((vpp_type == AV_QSV_VPP_DEFAULT)?"Default": "User") ,vpp_space->surface_num_max_used, vpp_space->surface_num, vpp_space->sync_num_max_used, vpp_space->sync_num );
for (i = 0; i < vpp_space->surface_num; i++){
av_freep(&vpp_space->p_surfaces[i]);
}
vpp_space->surface_num = 0;
if( vpp_space->p_ext_param_num || vpp_space->p_ext_params )
av_freep(&vpp_space->p_ext_params);
vpp_space->p_ext_param_num = 0;
for (i = 0; i < vpp_space->sync_num; i++){
av_freep(&vpp_space->p_syncp[i]->p_sync);
av_freep(&vpp_space->p_syncp[i]);
}
vpp_space->sync_num = 0;
av_qsv_list_rem(qsv->vpp_space,vpp_space);
if( av_qsv_list_count(qsv->vpp_space) == 0 )
av_qsv_list_close(&qsv->vpp_space);
vpp_space->is_init_done = 0;
break;
}
}
}
static void hb_qsv_filter_close( hb_filter_object_t * filter )
{
int i = 0;
hb_filter_private_t * pv = filter->private_data;
if ( !pv )
{
return;
}
av_qsv_context* qsv = pv->job->qsv.ctx;
if(qsv && qsv->vpp_space && av_qsv_list_count(qsv->vpp_space) > 0){
// closing local stuff
qsv_filter_close(qsv,AV_QSV_VPP_DEFAULT);
// closing the commong stuff
av_qsv_context_clean(qsv);
}
hb_list_close(&pv->list);
free( pv );
filter->private_data = NULL;
}
int process_frame(av_qsv_list* received_item, av_qsv_context* qsv, hb_filter_private_t * pv ){
// 1 if have results , 0 - otherwise
int ret = 1;
mfxStatus sts = MFX_ERR_NONE;
mfxFrameSurface1 *work_surface = NULL;
av_qsv_stage* stage = 0;
av_qsv_space *qsv_vpp = pv->vpp_space;
if(received_item){
stage = av_qsv_get_last_stage( received_item );
work_surface = stage->out.p_surface;
}
int sync_idx = av_qsv_get_free_sync(qsv_vpp, qsv);
int surface_idx = -1;
for(;;)
{
if (sync_idx == -1)
{
hb_error("qsv: Not enough resources allocated for QSV filter");
ret = 0;
break;
}
if( sts == MFX_ERR_MORE_SURFACE || sts == MFX_ERR_NONE )
surface_idx = av_qsv_get_free_surface(qsv_vpp, qsv, &(qsv_vpp->m_mfxVideoParam.vpp.Out), QSV_PART_ANY);
if (surface_idx == -1) {
hb_error("qsv: Not enough resources allocated for QSV filter");
ret = 0;
break;
}
if (work_surface != NULL)
{
work_surface->Info.CropX = pv->CropX;
work_surface->Info.CropY = pv->CropY;
work_surface->Info.CropW = pv->CropW;
work_surface->Info.CropH = pv->CropH;
}
sts = MFXVideoVPP_RunFrameVPPAsync(qsv->mfx_session, work_surface, qsv_vpp->p_surfaces[surface_idx] , NULL, qsv_vpp->p_syncp[sync_idx]->p_sync);
if( MFX_ERR_MORE_DATA == sts ){
if(!qsv_vpp->pending){
qsv_vpp->pending = av_qsv_list_init(0);
}
// if we have no results, we should not miss resource(s)
av_qsv_list_add( qsv_vpp->pending, received_item);
ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
ret = 0;
break;
}
if( MFX_ERR_MORE_DATA == sts || (MFX_ERR_NONE <= sts && MFX_WRN_DEVICE_BUSY != sts)){
if (work_surface){
ff_qsv_atomic_dec(&work_surface->Data.Locked);
}
}
if( MFX_ERR_MORE_SURFACE == sts || MFX_ERR_NONE <= sts){
if( MFX_ERR_MORE_SURFACE == sts )
continue;
if (qsv_vpp->p_surfaces[surface_idx] && MFX_WRN_DEVICE_BUSY != sts )
ff_qsv_atomic_inc(&qsv_vpp->p_surfaces[surface_idx]->Data.Locked);
}
AV_QSV_CHECK_RESULT(sts, MFX_ERR_NONE, sts);
if (MFX_ERR_NONE <= sts ) // repeat the call if warning and no output
{
if (MFX_WRN_DEVICE_BUSY == sts){
av_qsv_sleep(10); // wait if device is busy
continue;
}
// shouldnt be a case but drain
if(stage){
av_qsv_stage* new_stage = av_qsv_stage_init();
new_stage->type = AV_QSV_VPP_DEFAULT;
new_stage->in.p_surface = work_surface;
new_stage->out.p_surface = qsv_vpp->p_surfaces[surface_idx];
new_stage->out.sync = qsv_vpp->p_syncp[sync_idx];
av_qsv_add_stagee( &received_item, new_stage,HAVE_THREADS );
// add pending resources for the proper reclaim later
if( qsv_vpp->pending ){
if( av_qsv_list_count(qsv_vpp->pending)>0 ){
new_stage->pending = qsv_vpp->pending;
}
qsv_vpp->pending = 0;
// making free via decrement for all pending
int i = 0;
for (i = av_qsv_list_count(new_stage->pending); i > 0; i--){
av_qsv_list *atom_list = av_qsv_list_item(new_stage->pending, i-1);
av_qsv_stage *stage = av_qsv_get_last_stage( atom_list );
mfxFrameSurface1 *work_surface = stage->out.p_surface;
if (work_surface)
ff_qsv_atomic_dec(&work_surface->Data.Locked);
}
}
}
break;
}
ff_qsv_atomic_dec(&qsv_vpp->p_syncp[sync_idx]->in_use);
if (MFX_ERR_NOT_ENOUGH_BUFFER == sts)
HB_DEBUG_ASSERT(1, "The bitstream buffer size is insufficient.");
break;
}
return ret;
}
static int hb_qsv_filter_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
hb_buffer_t * out = *buf_out;
int sts = 0;
av_qsv_context* qsv = pv->job->qsv.ctx;
if ( !pv )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_OK;
}
while(1){
int ret = filter_init(qsv,pv);
if(ret >= 2)
av_qsv_sleep(1);
else
break;
}
*buf_in = NULL;
if ( in->size <= 0 )
{
while(1){
sts = process_frame(in->qsv_details.qsv_atom, qsv, pv);
if(sts)
hb_list_add(pv->list,in);
else
break;
}
hb_list_add( pv->list, in );
*buf_out = link_buf_list( pv );
return HB_FILTER_DONE;
}
sts = process_frame(in->qsv_details.qsv_atom, qsv, pv);
if(sts){
hb_list_add(pv->list,in);
}
if( hb_list_count(pv->list) ){
*buf_out = hb_list_item(pv->list,0);
out = *buf_out;
if(pv->is_frc_used && out)
{
mfxStatus sts = MFX_ERR_NONE;
if(out->qsv_details.qsv_atom){
av_qsv_stage* stage = av_qsv_get_last_stage( out->qsv_details.qsv_atom );
mfxFrameSurface1 *work_surface = stage->out.p_surface;
av_qsv_wait_on_sync( qsv,stage );
av_qsv_space *qsv_vpp = pv->vpp_space;
int64_t duration = ((double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtD/(double)qsv_vpp->m_mfxVideoParam.vpp.Out.FrameRateExtN ) * 90000.;
out->s.start = work_surface->Data.TimeStamp;
out->s.stop = work_surface->Data.TimeStamp + duration;
}
}
hb_list_rem(pv->list,*buf_out);
}
else
*buf_out = NULL;
return HB_FILTER_OK;
}
// see devavcode.c
hb_buffer_t *link_buf_list( hb_filter_private_t *pv )
{
hb_buffer_t *head = hb_list_item( pv->list, 0 );
if ( head )
{
hb_list_rem( pv->list, head );
hb_buffer_t *last = head, *buf;
while ( ( buf = hb_list_item( pv->list, 0 ) ) != NULL )
{
hb_list_rem( pv->list, buf );
last->next = buf;
last = buf;
}
}
return head;
}
#endif // USE_QSV
HandBrake-0.10.2/libhb/opencl.c 0000664 0001752 0001752 00000031202 12463330511 016537 0 ustar handbrake handbrake /* opencl.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifdef _WIN32
#include
#define HB_OCL_DLOPEN LoadLibraryW(L"OpenCL")
#define HB_OCL_DLSYM GetProcAddress
#define HB_OCL_DLCLOSE FreeLibrary
#else
#include
#ifdef __APPLE__
#define HB_OCL_DLOPEN dlopen("/System/Library/Frameworks/OpenCL.framework/OpenCL", RTLD_NOW)
#else
#define HB_OCL_DLOPEN dlopen("libOpenCL.so", RTLD_NOW)
#endif
#define HB_OCL_DLSYM dlsym
#define HB_OCL_DLCLOSE dlclose
#endif
#include "common.h"
#include "opencl.h"
hb_opencl_library_t *hb_ocl = NULL;
int hb_ocl_init()
{
if (hb_ocl == NULL)
{
if ((hb_ocl = hb_opencl_library_init()) == NULL)
{
return -1;
}
}
return 0;
}
void hb_ocl_close()
{
hb_opencl_library_close(&hb_ocl);
}
hb_opencl_library_t* hb_opencl_library_init()
{
hb_opencl_library_t *opencl;
if ((opencl = calloc(1, sizeof(hb_opencl_library_t))) == NULL)
{
hb_error("hb_opencl_library_init: memory allocation failure");
goto fail;
}
opencl->library = HB_OCL_DLOPEN;
if (opencl->library == NULL)
{
goto fail;
}
#define HB_OCL_LOAD(func) \
{ \
if ((opencl->func = (void*)HB_OCL_DLSYM(opencl->library, #func)) == NULL) \
{ \
hb_log("hb_opencl_library_init: failed to load function '%s'", #func); \
goto fail; \
} \
}
HB_OCL_LOAD(clBuildProgram);
HB_OCL_LOAD(clCreateBuffer);
HB_OCL_LOAD(clCreateCommandQueue);
HB_OCL_LOAD(clCreateContextFromType);
HB_OCL_LOAD(clCreateKernel);
HB_OCL_LOAD(clCreateProgramWithBinary);
HB_OCL_LOAD(clCreateProgramWithSource);
HB_OCL_LOAD(clEnqueueCopyBuffer);
HB_OCL_LOAD(clEnqueueMapBuffer);
HB_OCL_LOAD(clEnqueueNDRangeKernel);
HB_OCL_LOAD(clEnqueueReadBuffer);
HB_OCL_LOAD(clEnqueueUnmapMemObject);
HB_OCL_LOAD(clEnqueueWriteBuffer);
HB_OCL_LOAD(clFlush);
HB_OCL_LOAD(clGetCommandQueueInfo);
HB_OCL_LOAD(clGetContextInfo);
HB_OCL_LOAD(clGetDeviceIDs);
HB_OCL_LOAD(clGetDeviceInfo);
HB_OCL_LOAD(clGetPlatformIDs);
HB_OCL_LOAD(clGetPlatformInfo);
HB_OCL_LOAD(clGetProgramBuildInfo);
HB_OCL_LOAD(clGetProgramInfo);
HB_OCL_LOAD(clReleaseCommandQueue);
HB_OCL_LOAD(clReleaseContext);
HB_OCL_LOAD(clReleaseEvent);
HB_OCL_LOAD(clReleaseKernel);
HB_OCL_LOAD(clReleaseMemObject);
HB_OCL_LOAD(clReleaseProgram);
HB_OCL_LOAD(clSetKernelArg);
HB_OCL_LOAD(clWaitForEvents);
//success
return opencl;
fail:
hb_opencl_library_close(&opencl);
return NULL;
}
void hb_opencl_library_close(hb_opencl_library_t **_opencl)
{
if (_opencl == NULL)
{
return;
}
hb_opencl_library_t *opencl = *_opencl;
if (opencl != NULL)
{
if (opencl->library != NULL)
{
HB_OCL_DLCLOSE(opencl->library);
}
free(opencl);
}
*_opencl = NULL;
}
static int hb_opencl_device_is_supported(hb_opencl_device_t* device)
{
// we only support OpenCL on GPUs for now
// Ivy Bridge supports OpenCL on GPU, but it's too slow to be usable
// FIXME: disable on NVIDIA to to a bug
if ((device != NULL) &&
(device->type & CL_DEVICE_TYPE_GPU) &&
(device->ocl_vendor != HB_OCL_VENDOR_NVIDIA) &&
(device->ocl_vendor != HB_OCL_VENDOR_INTEL ||
hb_get_cpu_platform() != HB_CPU_PLATFORM_INTEL_IVB))
{
int major, minor;
// check OpenCL version:
// OpenCL
if (sscanf(device->version, "OpenCL %d.%d", &major, &minor) != 2)
{
return 0;
}
return (major > HB_OCL_MINVERSION_MAJOR) || (major == HB_OCL_MINVERSION_MAJOR &&
minor >= HB_OCL_MINVERSION_MINOR);
}
return 0;
}
static hb_opencl_device_t* hb_opencl_device_get(hb_opencl_library_t *opencl,
cl_device_id device_id)
{
if (opencl == NULL || opencl->clGetDeviceInfo == NULL)
{
hb_error("hb_opencl_device_get: OpenCL support not available");
return NULL;
}
else if (device_id == NULL)
{
hb_error("hb_opencl_device_get: invalid device ID");
return NULL;
}
hb_opencl_device_t *device = calloc(1, sizeof(hb_opencl_device_t));
if (device == NULL)
{
hb_error("hb_opencl_device_get: memory allocation failure");
return NULL;
}
cl_int status = CL_SUCCESS;
device->id = device_id;
status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_VENDOR, sizeof(device->vendor),
device->vendor, NULL);
status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_NAME, sizeof(device->name),
device->name, NULL);
status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_VERSION, sizeof(device->version),
device->version, NULL);
status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_TYPE, sizeof(device->type),
&device->type, NULL);
status |= opencl->clGetDeviceInfo(device->id, CL_DEVICE_PLATFORM, sizeof(device->platform),
&device->platform, NULL);
status |= opencl->clGetDeviceInfo(device->id, CL_DRIVER_VERSION, sizeof(device->driver),
device->driver, NULL);
if (status != CL_SUCCESS)
{
free(device);
return NULL;
}
if (!strcmp(device->vendor, "Advanced Micro Devices, Inc.") ||
!strcmp(device->vendor, "AMD"))
{
device->ocl_vendor = HB_OCL_VENDOR_AMD;
}
else if (!strncmp(device->vendor, "NVIDIA", 6 /* strlen("NVIDIA") */))
{
device->ocl_vendor = HB_OCL_VENDOR_NVIDIA;
}
else if (!strncmp(device->vendor, "Intel", 5 /* strlen("Intel") */))
{
device->ocl_vendor = HB_OCL_VENDOR_INTEL;
}
else
{
device->ocl_vendor = HB_OCL_VENDOR_OTHER;
}
return device;
}
static void hb_opencl_devices_list_close(hb_list_t **_list)
{
if (_list != NULL)
{
hb_list_t *list = *_list;
hb_opencl_device_t *device;
while (list != NULL && hb_list_count(list) > 0)
{
if ((device = hb_list_item(list, 0)) != NULL)
{
hb_list_rem(list, device);
free(device);
}
}
}
hb_list_close(_list);
}
static hb_list_t* hb_opencl_devices_list_get(hb_opencl_library_t *opencl,
cl_device_type device_type)
{
if (opencl == NULL ||
opencl->library == NULL ||
opencl->clGetDeviceIDs == NULL ||
opencl->clGetDeviceInfo == NULL ||
opencl->clGetPlatformIDs == NULL)
{
hb_error("hb_opencl_devices_list_get: OpenCL support not available");
return NULL;
}
hb_list_t *list = hb_list_init();
if (list == NULL)
{
hb_error("hb_opencl_devices_list_get: memory allocation failure");
return NULL;
}
cl_device_id *device_ids = NULL;
hb_opencl_device_t *device = NULL;
cl_platform_id *platform_ids = NULL;
cl_uint i, j, num_platforms, num_devices;
if (opencl->clGetPlatformIDs(0, NULL, &num_platforms) != CL_SUCCESS || !num_platforms)
{
goto fail;
}
if ((platform_ids = malloc(sizeof(cl_platform_id) * num_platforms)) == NULL)
{
hb_error("hb_opencl_devices_list_get: memory allocation failure");
goto fail;
}
if (opencl->clGetPlatformIDs(num_platforms, platform_ids, NULL) != CL_SUCCESS)
{
goto fail;
}
for (i = 0; i < num_platforms; i++)
{
if (opencl->clGetDeviceIDs(platform_ids[i], device_type, 0, NULL, &num_devices) != CL_SUCCESS || !num_devices)
{
// non-fatal
continue;
}
if ((device_ids = malloc(sizeof(cl_device_id) * num_devices)) == NULL)
{
hb_error("hb_opencl_devices_list_get: memory allocation failure");
goto fail;
}
if (opencl->clGetDeviceIDs(platform_ids[i], device_type, num_devices, device_ids, NULL) != CL_SUCCESS)
{
// non-fatal
continue;
}
for (j = 0; j < num_devices; j++)
{
if ((device = hb_opencl_device_get(opencl, device_ids[j])) != NULL)
{
hb_list_add(list, device);
}
}
}
goto end;
fail:
hb_opencl_devices_list_close(&list);
end:
free(platform_ids);
free(device_ids);
return list;
}
int hb_opencl_available()
{
static int opencl_available = -1;
if (opencl_available >= 0)
{
return opencl_available;
}
opencl_available = 0;
/*
* Check whether we can load the OpenCL library, then check devices and make
* sure we support running OpenCL code on at least one of them.
*/
hb_opencl_library_t *opencl;
if ((opencl = hb_opencl_library_init()) != NULL)
{
int i;
hb_list_t *device_list;
hb_opencl_device_t *device;
if ((device_list = hb_opencl_devices_list_get(opencl, CL_DEVICE_TYPE_ALL)) != NULL)
{
for (i = 0; i < hb_list_count(device_list); i++)
{
if ((device = hb_list_item(device_list, i)) != NULL &&
(hb_opencl_device_is_supported(device)))
{
opencl_available = 1;
break;
}
}
hb_opencl_devices_list_close(&device_list);
}
hb_opencl_library_close(&opencl);
}
return opencl_available;
}
void hb_opencl_info_print()
{
/*
* Note: this function should not log any warnings or errors.
* Its only purpose is to list OpenCL-capable devices, so let's initialize
* only what we absolutely need here, rather than calling library_open().
*/
hb_opencl_library_t ocl, *opencl = &ocl;
if ((opencl->library = (void*)HB_OCL_DLOPEN) == NULL ||
(opencl->clGetDeviceIDs = (void*)HB_OCL_DLSYM(opencl->library, "clGetDeviceIDs" )) == NULL ||
(opencl->clGetDeviceInfo = (void*)HB_OCL_DLSYM(opencl->library, "clGetDeviceInfo" )) == NULL ||
(opencl->clGetPlatformIDs = (void*)HB_OCL_DLSYM(opencl->library, "clGetPlatformIDs")) == NULL)
{
// zero or insufficient OpenCL support
hb_log("OpenCL: library not available");
goto end;
}
int i, idx;
hb_list_t *device_list;
hb_opencl_device_t *device;
if ((device_list = hb_opencl_devices_list_get(opencl, CL_DEVICE_TYPE_ALL)) != NULL)
{
for (i = 0, idx = 1; i < hb_list_count(device_list); i++)
{
if ((device = hb_list_item(device_list, i)) != NULL)
{
// don't list CPU devices (always unsupported)
if (!(device->type & CL_DEVICE_TYPE_CPU))
{
hb_log("OpenCL device #%d: %s %s", idx++, device->vendor, device->name);
hb_log(" - OpenCL version: %s", device->version + 7 /* strlen("OpenCL ") */);
hb_log(" - driver version: %s", device->driver);
hb_log(" - device type: %s%s",
device->type & CL_DEVICE_TYPE_CPU ? "CPU" :
device->type & CL_DEVICE_TYPE_GPU ? "GPU" :
device->type & CL_DEVICE_TYPE_CUSTOM ? "Custom" :
device->type & CL_DEVICE_TYPE_ACCELERATOR ? "Accelerator" : "Unknown",
device->type & CL_DEVICE_TYPE_DEFAULT ? " (default)" : "");
hb_log(" - supported: %s",
hb_opencl_device_is_supported(device) ? "YES" : "no");
}
}
}
hb_opencl_devices_list_close(&device_list);
}
end:
/*
* Close only the initialized part
*/
if (opencl->library != NULL)
{
HB_OCL_DLCLOSE(opencl->library);
}
}
HandBrake-0.10.2/libhb/openclwrapper.h 0000664 0001752 0001752 00000006142 12463330511 020152 0 ustar handbrake handbrake /* openclwrapper.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifndef HB_OPENCL_WRAPPER_H
#define HB_OPENCL_WRAPPER_H
#include "common.h"
#include "extras/cl.h"
//support AMD opencl
#define CL_QUEUE_THREAD_HANDLE_AMD 0x403E
#define CL_MAP_WRITE_INVALIDATE_REGION (1 << 2)
typedef struct _KernelEnv
{
cl_context context;
cl_command_queue command_queue;
cl_program program;
cl_kernel kernel;
char kernel_name[150];
int isAMD;
}KernelEnv;
typedef struct _OpenCLEnv
{
cl_platform_id platform;
cl_context context;
cl_device_id devices;
cl_command_queue command_queue;
}OpenCLEnv;
//user defined, this is function wrapper which is used to set the input parameters ,
//luanch kernel and copy data from GPU to CPU or CPU to GPU.
typedef int (*cl_kernel_function)( void **userdata, KernelEnv *kenv );
// registe a wapper for running the kernel specified by the kernel name
int hb_register_kernel_wrapper( const char *kernel_name, cl_kernel_function function );
// run kernel , user call this function to luanch kernel.
// kernel_name: this kernel name is used to find the kernel in opencl runtime environment
// userdata: this userdata is the all parameters for running the kernel specified by kernel name
int hb_run_kernel( const char *kernel_name, void **userdata );
// init the run time environment , this function must be called befor calling any function related to opencl
// the argc must be set zero , argv must be set NULL, build_option is the options for build the kernel.
int hb_init_opencl_run_env( int argc, char **argv, const char *build_option );
//relase all resource about the opencl , this function must be called after calling any functions related to opencl
int hb_release_opencl_run_env();
// get the opencl status , 0: not init ; 1, inited; this function is used the check whether or not the opencl run time has been created
int hb_opencl_stats();
// update opencl run time environments , such as commandqueue , platforme, context. program
int hb_init_opencl_attr( OpenCLEnv * env );
// create kernel object by a kernel name on the specified opencl run time indicated by env parameter
int hb_create_kernel( char * kernelname, KernelEnv * env );
// release kernel object which is generated by calling the hb_create_kernel api
int hb_release_kernel( KernelEnv * env );
void hb_opencl_init();
int hb_get_opencl_env();
int hb_create_buffer(cl_mem *cl_Buf,int flags,int size);
int hb_read_opencl_buffer(cl_mem cl_inBuf,unsigned char *outbuf,int size);
int hb_cl_create_mapped_buffer(cl_mem *mem, unsigned char **addr, int size);
int hb_cl_free_mapped_buffer(cl_mem mem, unsigned char *addr);
int hb_use_buffers();
int hb_confirm_gpu_type();
#endif // HB_OPENCL_WRAPPER_H
HandBrake-0.10.2/libhb/decsrtsub.h 0000664 0001752 0001752 00000000676 12463330511 017275 0 ustar handbrake handbrake /* decsrtsub.h
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef __DECSRTSUB_H__
#define __DECSRTSUB_H__
void hb_srt_to_ssa(hb_buffer_t *sub_in, int line);
#endif // __DECSRTSUB_H__
HandBrake-0.10.2/libhb/ports.c 0000664 0001752 0001752 00000100114 12463330511 016425 0 ustar handbrake handbrake /* ports.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifdef USE_PTHREAD
#ifdef SYS_LINUX
#define _GNU_SOURCE
#include
#endif
#include
#endif
#ifdef SYS_BEOS
#include
#endif
#if defined(SYS_DARWIN) || defined(SYS_FREEBSD)
#include
#include
#endif
#ifdef SYS_OPENBSD
#include
#include
#include
#endif
#ifdef SYS_MINGW
#include
#include
#else
#include
#include
#include
#include
#endif
#ifdef SYS_CYGWIN
#include
#endif
#ifdef SYS_MINGW
#include
#include
#include
#include
#include
#endif
#ifdef SYS_SunOS
#include
#endif
#include
#include
#include
#if defined( SYS_LINUX )
#include
#include
#include
#elif defined( SYS_OPENBSD )
#include
#include
#include
#endif
#ifdef __APPLE__
#include
#endif
#include
#include
#include "hb.h"
#include "libavutil/cpu.h"
/************************************************************************
* hb_get_date()
************************************************************************
* Returns the current date in milliseconds.
* On Win32, we implement a gettimeofday emulation here because
* libdvdread and libmp4v2 use it without checking.
************************************************************************/
/*
#ifdef SYS_CYGWIN
struct timezone
{
};
int gettimeofday( struct timeval * tv, struct timezone * tz )
{
int tick;
tick = GetTickCount();
tv->tv_sec = tick / 1000;
tv->tv_usec = ( tick % 1000 ) * 1000;
return 0;
}
#endif
*/
// Convert utf8 string to current code page.
// The internal string representation in hb is utf8. But some
// libraries (libmkv, and mp4v2) expect filenames in the current
// code page. So we must convert.
char * hb_utf8_to_cp(const char *src)
{
char *dst = NULL;
#if defined( SYS_MINGW )
int num_chars = MultiByteToWideChar(CP_UTF8, 0, src, -1, NULL, 0);
if (num_chars <= 0)
return NULL;
wchar_t * tmp = calloc(num_chars, sizeof(wchar_t));
MultiByteToWideChar(CP_UTF8, 0, src, -1, tmp, num_chars);
int len = WideCharToMultiByte(GetACP(), 0, tmp, num_chars, NULL, 0, NULL, NULL);
if (len <= 0)
return NULL;
dst = calloc(len, sizeof(char));
WideCharToMultiByte(GetACP(), 0, tmp, num_chars, dst, len, NULL, NULL);
free(tmp);
#else
// Other platforms don't have code pages
dst = strdup(src);
#endif
return dst;
}
int hb_dvd_region(char *device, int *region_mask)
{
#if defined( DVD_LU_SEND_RPC_STATE ) && defined( DVD_AUTH )
struct stat st;
dvd_authinfo ai;
int fd, ret;
fd = open( device, O_RDONLY );
if ( fd < 0 )
return -1;
if ( fstat( fd, &st ) < 0 )
{
close( fd );
return -1;
}
if ( !( S_ISBLK( st.st_mode ) || S_ISCHR( st.st_mode ) ) )
{
close( fd );
return -1;
}
ai.type = DVD_LU_SEND_RPC_STATE;
ret = ioctl(fd, DVD_AUTH, &ai);
close( fd );
if ( ret < 0 )
return ret;
*region_mask = ai.lrpcs.region_mask;
return 0;
#else
return -1;
#endif
}
uint64_t hb_get_date()
{
struct timeval tv;
gettimeofday( &tv, NULL );
return( (uint64_t) tv.tv_sec * 1000 + (uint64_t) tv.tv_usec / 1000 );
}
uint64_t hb_get_time_us()
{
#ifdef SYS_MINGW
static LARGE_INTEGER frequency;
LARGE_INTEGER cur_time;
if (frequency.QuadPart == 0)
{
QueryPerformanceFrequency(&frequency);
}
QueryPerformanceCounter(&cur_time);
return (uint64_t)(1000000 * cur_time.QuadPart / frequency.QuadPart);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return ((uint64_t)tv.tv_sec * 1000000 + (uint64_t)tv.tv_usec);
#endif
}
/************************************************************************
* hb_snooze()
************************************************************************
* Waits milliseconds.
************************************************************************/
void hb_snooze( int delay )
{
if( delay < 1 )
{
return;
}
#if defined( SYS_BEOS )
snooze( 1000 * delay );
#elif defined( SYS_DARWIN ) || defined( SYS_LINUX ) || defined( SYS_FREEBSD) || defined( SYS_SunOS )
usleep( 1000 * delay );
#elif defined( SYS_CYGWIN ) || defined( SYS_MINGW )
Sleep( delay );
#endif
}
/************************************************************************
* Get information about the CPU (number of cores, name, platform name)
************************************************************************/
static void init_cpu_info();
static int init_cpu_count();
struct
{
enum hb_cpu_platform platform;
const char *name;
union
{
char buf[48];
uint32_t buf4[12];
};
int count;
} hb_cpu_info;
int hb_get_cpu_count()
{
return hb_cpu_info.count;
}
int hb_get_cpu_platform()
{
return hb_cpu_info.platform;
}
const char* hb_get_cpu_name()
{
return hb_cpu_info.name;
}
const char* hb_get_cpu_platform_name()
{
switch (hb_cpu_info.platform)
{
// Intel 64 and IA-32 Architectures Software Developer's Manual, Vol. 3C
// Table 35-1: CPUID Signature Values of DisplayFamily_DisplayModel
case HB_CPU_PLATFORM_INTEL_BNL:
return "Intel microarchitecture Bonnell";
case HB_CPU_PLATFORM_INTEL_SNB:
return "Intel microarchitecture Sandy Bridge";
case HB_CPU_PLATFORM_INTEL_IVB:
return "Intel microarchitecture Ivy Bridge";
case HB_CPU_PLATFORM_INTEL_SLM:
return "Intel microarchitecture Silvermont";
case HB_CPU_PLATFORM_INTEL_HSW:
return "Intel microarchitecture Haswell";
default:
return NULL;
}
}
#if ARCH_X86_64
# define REG_b "rbx"
# define REG_S "rsi"
#elif ARCH_X86_32
# define REG_b "ebx"
# define REG_S "esi"
#endif // ARCH_X86_32
#if ARCH_X86_64 || ARCH_X86_32
#define cpuid(index, eax, ebx, ecx, edx) \
__asm__ volatile ( \
"mov %%"REG_b", %%"REG_S" \n\t" \
"cpuid \n\t" \
"xchg %%"REG_b", %%"REG_S \
: "=a" (*eax), "=S" (*ebx), "=c" (*ecx), "=d" (*edx) \
: "0" (index))
#endif // ARCH_X86_64 || ARCH_X86_32
static void init_cpu_info()
{
hb_cpu_info.name = NULL;
hb_cpu_info.count = init_cpu_count();
hb_cpu_info.platform = HB_CPU_PLATFORM_UNSPECIFIED;
if (av_get_cpu_flags() & AV_CPU_FLAG_SSE)
{
#if ARCH_X86_64 || ARCH_X86_32
int eax, ebx, ecx, edx, family, model;
cpuid(1, &eax, &ebx, &ecx, &edx);
family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
model = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
// Intel 64 and IA-32 Architectures Software Developer's Manual, Vol. 3C
// Table 35-1: CPUID Signature Values of DisplayFamily_DisplayModel
switch (family)
{
case 0x06:
{
switch (model)
{
case 0x1C:
case 0x26:
case 0x27:
case 0x35:
case 0x36:
hb_cpu_info.platform = HB_CPU_PLATFORM_INTEL_BNL;
break;
case 0x2A:
case 0x2D:
hb_cpu_info.platform = HB_CPU_PLATFORM_INTEL_SNB;
break;
case 0x3A:
case 0x3E:
hb_cpu_info.platform = HB_CPU_PLATFORM_INTEL_IVB;
break;
case 0x37:
case 0x4A:
case 0x4D:
hb_cpu_info.platform = HB_CPU_PLATFORM_INTEL_SLM;
break;
case 0x3C:
case 0x45:
case 0x46:
hb_cpu_info.platform = HB_CPU_PLATFORM_INTEL_HSW;
break;
default:
break;
}
} break;
default:
break;
}
// Intel 64 and IA-32 Architectures Software Developer's Manual, Vol. 2A
// Figure 3-8: Determination of Support for the Processor Brand String
// Table 3-17: Information Returned by CPUID Instruction
cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
if ((eax & 0x80000004) < 0x80000004)
{
cpuid(0x80000002,
&hb_cpu_info.buf4[ 0],
&hb_cpu_info.buf4[ 1],
&hb_cpu_info.buf4[ 2],
&hb_cpu_info.buf4[ 3]);
cpuid(0x80000003,
&hb_cpu_info.buf4[ 4],
&hb_cpu_info.buf4[ 5],
&hb_cpu_info.buf4[ 6],
&hb_cpu_info.buf4[ 7]);
cpuid(0x80000004,
&hb_cpu_info.buf4[ 8],
&hb_cpu_info.buf4[ 9],
&hb_cpu_info.buf4[10],
&hb_cpu_info.buf4[11]);
hb_cpu_info.name = hb_cpu_info.buf;
hb_cpu_info.buf[47] = '\0'; // just in case
while (isspace(*hb_cpu_info.name))
{
// skip leading whitespace to prettify
hb_cpu_info.name++;
}
}
#endif // ARCH_X86_64 || ARCH_X86_32
}
}
/*
* Whenever possible, returns the number of CPUs on the current computer.
* Returns 1 otherwise.
*/
static int init_cpu_count()
{
int cpu_count = 1;
#if defined(SYS_CYGWIN) || defined(SYS_MINGW)
SYSTEM_INFO cpuinfo;
GetSystemInfo( &cpuinfo );
cpu_count = cpuinfo.dwNumberOfProcessors;
#elif defined(SYS_LINUX)
unsigned int bit;
cpu_set_t p_aff;
memset( &p_aff, 0, sizeof(p_aff) );
sched_getaffinity( 0, sizeof(p_aff), &p_aff );
for( cpu_count = 0, bit = 0; bit < sizeof(p_aff); bit++ )
cpu_count += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;
#elif defined(SYS_BEOS)
system_info info;
get_system_info( &info );
cpu_count = info.cpu_count;
#elif defined(SYS_DARWIN) || defined(SYS_FREEBSD) || defined(SYS_OPENBSD)
size_t length = sizeof( cpu_count );
#ifdef SYS_OPENBSD
int mib[2] = { CTL_HW, HW_NCPU };
if( sysctl(mib, 2, &cpu_count, &length, NULL, 0) )
#else
if( sysctlbyname("hw.ncpu", &cpu_count, &length, NULL, 0) )
#endif
{
cpu_count = 1;
}
#elif defined( SYS_SunOS )
{
processorid_t cpumax;
int i,j=0;
cpumax = sysconf(_SC_CPUID_MAX);
for(i = 0; i <= cpumax; i++ )
{
if(p_online(i, P_STATUS) != -1)
{
j++;
}
}
cpu_count=j;
}
#endif
cpu_count = MAX( 1, cpu_count );
cpu_count = MIN( cpu_count, 64 );
return cpu_count;
}
int hb_platform_init()
{
int result = 0;
#if defined(SYS_MINGW) && defined(PTW32_STATIC_LIB)
result = !pthread_win32_process_attach_np();
if (result)
{
hb_error("pthread_win32_process_attach_np() failed!");
return -1;
}
#endif
#if defined(_WIN32) || defined(__MINGW32__)
/*
* win32 _IOLBF (line-buffering) is the same as _IOFBF (full-buffering).
* force it to unbuffered otherwise informative output is not easily parsed.
*/
result = setvbuf(stdout, NULL, _IONBF, 0);
if (result)
{
hb_error("setvbuf(stdout, NULL, _IONBF, 0) failed!");
return -1;
}
result = setvbuf(stderr, NULL, _IONBF, 0);
if (result)
{
hb_error("setvbuf(stderr, NULL, _IONBF, 0) failed!");
return -1;
}
#endif
init_cpu_info();
return result;
}
/************************************************************************
* Get a temporary directory for HB
***********************************************************************/
void hb_get_temporary_directory( char path[512] )
{
char base[512];
char *p;
/* Create the base */
#if defined( SYS_CYGWIN ) || defined( SYS_MINGW )
int i_size = GetTempPath( 512, base );
if( i_size <= 0 || i_size >= 512 )
{
if( getcwd( base, 512 ) == NULL )
strcpy( base, "c:" ); /* Bad fallback but ... */
}
/* c:/path/ works like a charm under cygwin(win32?) so use it */
while( ( p = strchr( base, '\\' ) ) )
*p = '/';
#else
if( (p = getenv( "TMPDIR" ) ) != NULL ||
(p = getenv( "TEMP" ) ) != NULL )
strcpy( base, p );
else
strcpy( base, "/tmp" );
#endif
/* I prefer to remove evntual last '/' (for cygwin) */
if( base[strlen(base)-1] == '/' )
base[strlen(base)-1] = '\0';
snprintf(path, 512, "%s/hb.%d", base, (int)getpid());
}
/************************************************************************
* Get a tempory filename for HB
***********************************************************************/
void hb_get_tempory_filename( hb_handle_t * h, char name[1024],
char *fmt, ... )
{
va_list args;
hb_get_temporary_directory( name );
strcat( name, "/" );
va_start( args, fmt );
vsnprintf( &name[strlen(name)], 1024 - strlen(name), fmt, args );
va_end( args );
}
/************************************************************************
* hb_stat
************************************************************************
* Wrapper to the real stat, needed to handle utf8 filenames on
* windows.
***********************************************************************/
int hb_stat(const char *path, hb_stat_t *sb)
{
#ifdef SYS_MINGW
wchar_t path_utf16[MAX_PATH];
if (!MultiByteToWideChar(CP_UTF8, 0, path, -1, path_utf16, MAX_PATH))
return -1;
return _wstat64( path_utf16, sb );
#else
return stat(path, sb);
#endif
}
/************************************************************************
* hb_fopen
************************************************************************
* Wrapper to the real fopen, needed to handle utf8 filenames on
* windows.
***********************************************************************/
FILE * hb_fopen(const char *path, const char *mode)
{
#ifdef SYS_MINGW
FILE *f;
wchar_t path_utf16[MAX_PATH];
wchar_t mode_utf16[16];
if (!MultiByteToWideChar(CP_UTF8, 0, path, -1, path_utf16, MAX_PATH))
return NULL;
if (!MultiByteToWideChar(CP_UTF8, 0, mode, -1, mode_utf16, 16))
return NULL;
errno_t ret = _wfopen_s(&f, path_utf16, mode_utf16);
if (ret)
return NULL;
return f;
#else
return fopen(path, mode);
#endif
}
HB_DIR* hb_opendir(char *path)
{
#ifdef SYS_MINGW
HB_DIR *dir;
wchar_t path_utf16[MAX_PATH];
if (!MultiByteToWideChar(CP_UTF8, 0, path, -1, path_utf16, MAX_PATH))
return NULL;
dir = malloc(sizeof(HB_DIR));
if (dir == NULL)
return NULL;
dir->wdir = _wopendir(path_utf16);
if (dir->wdir == NULL)
{
free(dir);
return NULL;
}
return dir;
#else
return opendir(path);
#endif
}
int hb_closedir(HB_DIR *dir)
{
#ifdef SYS_MINGW
int ret;
ret = _wclosedir(dir->wdir);
free(dir);
return ret;
#else
return closedir(dir);
#endif
}
struct dirent * hb_readdir(HB_DIR *dir)
{
#ifdef SYS_MINGW
struct _wdirent *entry;
entry = _wreaddir(dir->wdir);
if (entry == NULL)
return NULL;
int len = WideCharToMultiByte(CP_UTF8, 0, entry->d_name, -1,
dir->entry.d_name, sizeof(dir->entry.d_name),
NULL, NULL );
dir->entry.d_ino = entry->d_ino;
dir->entry.d_reclen = entry->d_reclen;
dir->entry.d_namlen = len - 1;
return &dir->entry;
#else
return readdir(dir);
#endif
}
void hb_rewinddir(HB_DIR *dir)
{
#ifdef SYS_MINGW
_wrewinddir(dir->wdir);
#else
return rewinddir(dir);
#endif
}
char * hb_strr_dir_sep(const char *path)
{
#ifdef SYS_MINGW
char *sep = strrchr(path, '/');
if (sep == NULL)
sep = strrchr(path, '\\');
return sep;
#else
return strrchr(path, '/');
#endif
}
/************************************************************************
* hb_mkdir
************************************************************************
* Wrapper to the real mkdir, needed only because it doesn't take a
* second argument on Win32. Grrr.
***********************************************************************/
int hb_mkdir(char * path)
{
#ifdef SYS_MINGW
wchar_t path_utf16[MAX_PATH];
if (!MultiByteToWideChar(CP_UTF8, 0, path, -1, path_utf16, MAX_PATH))
return -1;
return _wmkdir(path_utf16);
#else
return mkdir(path, 0755);
#endif
}
/************************************************************************
* Portable thread implementation
***********************************************************************/
struct hb_thread_s
{
char * name;
int priority;
thread_func_t * function;
void * arg;
hb_lock_t * lock;
int exited;
#if defined( SYS_BEOS )
thread_id thread;
#elif USE_PTHREAD
pthread_t thread;
//#elif defined( SYS_CYGWIN )
// HANDLE thread;
#endif
};
/* Get a unique identifier to thread and represent as 64-bit unsigned.
* If unsupported, the value 0 is be returned.
* Caller should use result only for display/log purposes.
*/
static uint64_t hb_thread_to_integer( const hb_thread_t* t )
{
#if defined( USE_PTHREAD )
#if defined( SYS_CYGWIN )
return (uint64_t)t->thread;
#elif defined( _WIN32 ) || defined( __MINGW32__ )
return (uint64_t)(ptrdiff_t)t->thread.p;
#elif defined( SYS_DARWIN )
return (unsigned long)t->thread;
#else
return (uint64_t)t->thread;
#endif
#else
return 0;
#endif
}
/************************************************************************
* hb_thread_func()
************************************************************************
* We use it as the root routine for any thread, for two reasons:
* + To set the thread priority on OS X (pthread_setschedparam() could
* be called from hb_thread_init(), but it's nicer to do it as we
* are sure it is done before the real routine starts)
* + Get informed when the thread exits, so we know whether
* hb_thread_close() will block or not.
***********************************************************************/
static void attribute_align_thread hb_thread_func( void * _t )
{
hb_thread_t * t = (hb_thread_t *) _t;
#if defined( SYS_DARWIN ) || defined( SYS_FREEBSD )
/* Set the thread priority */
struct sched_param param;
memset( ¶m, 0, sizeof( struct sched_param ) );
param.sched_priority = t->priority;
pthread_setschedparam( pthread_self(), SCHED_OTHER, ¶m );
#endif
#if defined( SYS_BEOS )
signal( SIGINT, SIG_IGN );
#endif
/* Start the actual routine */
t->function( t->arg );
/* Inform that the thread can be joined now */
hb_deep_log( 2, "thread %"PRIx64" exited (\"%s\")", hb_thread_to_integer( t ), t->name );
hb_lock( t->lock );
t->exited = 1;
hb_unlock( t->lock );
}
/************************************************************************
* hb_thread_init()
************************************************************************
* name: user-friendly name
* function: the thread routine
* arg: argument of the routine
* priority: HB_LOW_PRIORITY or HB_NORMAL_PRIORITY
***********************************************************************/
hb_thread_t * hb_thread_init( const char * name, void (* function)(void *),
void * arg, int priority )
{
hb_thread_t * t = calloc( sizeof( hb_thread_t ), 1 );
t->name = strdup( name );
t->function = function;
t->arg = arg;
t->priority = priority;
t->lock = hb_lock_init();
/* Create and start the thread */
#if defined( SYS_BEOS )
t->thread = spawn_thread( (thread_func) hb_thread_func,
name, priority, t );
resume_thread( t->thread );
#elif USE_PTHREAD
pthread_create( &t->thread, NULL,
(void * (*)( void * )) hb_thread_func, t );
//#elif defined( SYS_CYGWIN )
// t->thread = CreateThread( NULL, 0,
// (LPTHREAD_START_ROUTINE) hb_thread_func, t, 0, NULL );
//
// /* Maybe use THREAD_PRIORITY_LOWEST instead */
// if( priority == HB_LOW_PRIORITY )
// SetThreadPriority( GetCurrentThread(), THREAD_PRIORITY_BELOW_NORMAL );
#endif
hb_deep_log( 2, "thread %"PRIx64" started (\"%s\")", hb_thread_to_integer( t ), t->name );
return t;
}
/************************************************************************
* hb_thread_close()
************************************************************************
* Joins the thread and frees memory.
***********************************************************************/
void hb_thread_close( hb_thread_t ** _t )
{
hb_thread_t * t = *_t;
/* Join the thread */
#if defined( SYS_BEOS )
long exit_value;
wait_for_thread( t->thread, &exit_value );
#elif USE_PTHREAD
pthread_join( t->thread, NULL );
//#elif defined( SYS_CYGWIN )
// WaitForSingleObject( t->thread, INFINITE );
#endif
hb_deep_log( 2, "thread %"PRIx64" joined (\"%s\")", hb_thread_to_integer( t ), t->name );
hb_lock_close( &t->lock );
free( t->name );
free( t );
*_t = NULL;
}
/************************************************************************
* hb_thread_has_exited()
************************************************************************
* Returns 1 if the thread can be joined right away, 0 otherwise.
***********************************************************************/
int hb_thread_has_exited( hb_thread_t * t )
{
int exited;
hb_lock( t->lock );
exited = t->exited;
hb_unlock( t->lock );
return exited;
}
/************************************************************************
* Portable mutex implementation
***********************************************************************/
struct hb_lock_s
{
#if defined( SYS_BEOS )
sem_id sem;
#elif USE_PTHREAD
pthread_mutex_t mutex;
//#elif defined( SYS_CYGWIN )
// HANDLE mutex;
#endif
};
/************************************************************************
* hb_lock_init()
* hb_lock_close()
* hb_lock()
* hb_unlock()
************************************************************************
* Basic wrappers to OS-specific semaphore or mutex functions.
***********************************************************************/
hb_lock_t * hb_lock_init()
{
hb_lock_t * l = calloc( sizeof( hb_lock_t ), 1 );
#if defined( SYS_BEOS )
l->sem = create_sem( 1, "sem" );
#elif USE_PTHREAD
pthread_mutexattr_t mta;
pthread_mutexattr_init(&mta);
#if defined( SYS_CYGWIN ) || defined( SYS_FREEBSD )
pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_NORMAL);
#endif
pthread_mutex_init( &l->mutex, &mta );
//#elif defined( SYS_CYGWIN )
// l->mutex = CreateMutex( 0, FALSE, 0 );
#endif
return l;
}
void hb_lock_close( hb_lock_t ** _l )
{
hb_lock_t * l = *_l;
#if defined( SYS_BEOS )
delete_sem( l->sem );
#elif USE_PTHREAD
pthread_mutex_destroy( &l->mutex );
//#elif defined( SYS_CYGWIN )
// CloseHandle( l->mutex );
#endif
free( l );
*_l = NULL;
}
void hb_lock( hb_lock_t * l )
{
#if defined( SYS_BEOS )
acquire_sem( l->sem );
#elif USE_PTHREAD
pthread_mutex_lock( &l->mutex );
//#elif defined( SYS_CYGWIN )
// WaitForSingleObject( l->mutex, INFINITE );
#endif
}
void hb_unlock( hb_lock_t * l )
{
#if defined( SYS_BEOS )
release_sem( l->sem );
#elif USE_PTHREAD
pthread_mutex_unlock( &l->mutex );
//#elif defined( SYS_CYGWIN )
// ReleaseMutex( l->mutex );
#endif
}
/************************************************************************
* Portable condition variable implementation
***********************************************************************/
struct hb_cond_s
{
#if defined( SYS_BEOS )
int thread;
#elif USE_PTHREAD
pthread_cond_t cond;
//#elif defined( SYS_CYGWIN )
// HANDLE event;
#endif
};
/************************************************************************
* hb_cond_init()
* hb_cond_close()
* hb_cond_wait()
* hb_cond_signal()
************************************************************************
* Win9x is not supported by this implementation (SignalObjectAndWait()
* only available on Windows 2000/XP).
***********************************************************************/
hb_cond_t * hb_cond_init()
{
hb_cond_t * c = calloc( sizeof( hb_cond_t ), 1 );
if( c == NULL )
return NULL;
#if defined( SYS_BEOS )
c->thread = -1;
#elif USE_PTHREAD
pthread_cond_init( &c->cond, NULL );
//#elif defined( SYS_CYGWIN )
// c->event = CreateEvent( NULL, FALSE, FALSE, NULL );
#endif
return c;
}
void hb_cond_close( hb_cond_t ** _c )
{
hb_cond_t * c = *_c;
#if defined( SYS_BEOS )
#elif USE_PTHREAD
pthread_cond_destroy( &c->cond );
//#elif defined( SYS_CYGWIN )
// CloseHandle( c->event );
#endif
free( c );
*_c = NULL;
}
void hb_cond_wait( hb_cond_t * c, hb_lock_t * lock )
{
#if defined( SYS_BEOS )
c->thread = find_thread( NULL );
release_sem( lock->sem );
suspend_thread( c->thread );
acquire_sem( lock->sem );
c->thread = -1;
#elif USE_PTHREAD
pthread_cond_wait( &c->cond, &lock->mutex );
//#elif defined( SYS_CYGWIN )
// SignalObjectAndWait( lock->mutex, c->event, INFINITE, FALSE );
// WaitForSingleObject( lock->mutex, INFINITE );
#endif
}
void hb_clock_gettime( struct timespec *tp )
{
struct timeval tv;
gettimeofday( &tv, NULL );
tp->tv_sec = tv.tv_sec;
tp->tv_nsec = tv.tv_usec * 1000;
}
void hb_cond_timedwait( hb_cond_t * c, hb_lock_t * lock, int msec )
{
#if defined( SYS_BEOS )
c->thread = find_thread( NULL );
release_sem( lock->sem );
suspend_thread( c->thread );
acquire_sem( lock->sem );
c->thread = -1;
#elif USE_PTHREAD
struct timespec ts;
hb_clock_gettime(&ts);
ts.tv_nsec += (msec % 1000) * 1000000;
ts.tv_sec += msec / 1000 + (ts.tv_nsec / 1000000000);
ts.tv_nsec %= 1000000000;
pthread_cond_timedwait( &c->cond, &lock->mutex, &ts );
#endif
}
void hb_cond_signal( hb_cond_t * c )
{
#if defined( SYS_BEOS )
while( c->thread != -1 )
{
thread_info info;
get_thread_info( c->thread, &info );
if( info.state == B_THREAD_SUSPENDED )
{
resume_thread( c->thread );
break;
}
/* Looks like we have been called between hb_cond_wait's
release_sem() and suspend_thread() lines. Wait until the
thread is actually suspended before we resume it */
snooze( 5000 );
}
#elif USE_PTHREAD
pthread_cond_signal( &c->cond );
//#elif defined( SYS_CYGWIN )
// PulseEvent( c->event );
#endif
}
void hb_cond_broadcast( hb_cond_t * c )
{
#if USE_PTHREAD
pthread_cond_broadcast( &c->cond );
#endif
}
/************************************************************************
* Network
***********************************************************************/
struct hb_net_s
{
int socket;
};
hb_net_t * hb_net_open( char * address, int port )
{
hb_net_t * n = calloc( sizeof( hb_net_t ), 1 );
struct sockaddr_in sock;
struct hostent * host;
#ifdef SYS_MINGW
WSADATA wsaData;
int iResult, winsock_init = 0;
// Initialize Winsock
if (!winsock_init)
{
iResult = WSAStartup(MAKEWORD(2, 2), &wsaData);
if (iResult != 0)
{
hb_log("WSAStartup failed: %d", iResult);
free(n);
return NULL;
}
winsock_init = 1;
}
#endif
/* TODO: find out why this doesn't work on Win32 */
if( !( host = gethostbyname( address ) ) )
{
hb_log( "gethostbyname failed (%s)", address );
free( n );
return NULL;
}
memset( &sock, 0, sizeof( struct sockaddr_in ) );
sock.sin_family = host->h_addrtype;
sock.sin_port = htons( port );
memcpy( &sock.sin_addr, host->h_addr, host->h_length );
if( ( n->socket = socket( host->h_addrtype, SOCK_STREAM, 0 ) ) < 0 )
{
hb_log( "socket failed" );
free( n );
return NULL;
}
if( connect( n->socket, (struct sockaddr *) &sock,
sizeof( struct sockaddr_in ) ) < 0 )
{
hb_log( "connect failed" );
free( n );
return NULL;
}
return n;
}
int hb_net_send( hb_net_t * n, char * buffer )
{
return send( n->socket, buffer, strlen( buffer ), 0 );
}
int hb_net_recv( hb_net_t * n, char * buffer, int size )
{
return recv( n->socket, buffer, size - 1, 0 );
}
void hb_net_close( hb_net_t ** _n )
{
hb_net_t * n = (hb_net_t *) *_n;
close( n->socket );
free( n );
*_n = NULL;
}
#ifdef SYS_MINGW
char *strtok_r(char *s, const char *delim, char **save_ptr)
{
char *token;
if (s == NULL) s = *save_ptr;
/* Scan leading delimiters. */
s += strspn(s, delim);
if (*s == '\0') return NULL;
/* Find the end of the token. */
token = s;
s = strpbrk(token, delim);
if (s == NULL)
/* This token finishes the string. */
*save_ptr = strchr(token, '\0');
else {
/* Terminate the token and make *SAVE_PTR point past it. */
*s = '\0';
*save_ptr = s + 1;
}
return token;
}
#endif
/************************************************************************
* OS Sleep Allow / Prevent
***********************************************************************/
#ifdef __APPLE__
// 128 chars limit for IOPMAssertionCreateWithName
static CFStringRef reasonForActivity =
CFSTR("HandBrake is currently scanning and/or encoding");
#endif
void* hb_system_sleep_opaque_init()
{
void *opaque = NULL;
#ifdef __APPLE__
opaque = calloc(sizeof(IOPMAssertionID), 1);
if (opaque == NULL)
{
hb_error("hb_system_sleep: failed to allocate opaque");
return NULL;
}
IOPMAssertionID *assertionID = (IOPMAssertionID*)opaque;
*assertionID = -1;
#endif
return opaque;
}
void hb_system_sleep_opaque_close(void **opaque)
{
if (*opaque != NULL)
{
hb_system_sleep_private_enable(*opaque);
}
#ifdef __APPLE__
if (*opaque != NULL)
{
IOPMAssertionID *assertionID = (IOPMAssertionID*)*opaque;
free(assertionID);
}
#endif
*opaque = NULL;
}
void hb_system_sleep_private_enable(void *opaque)
{
#ifdef __APPLE__
if (opaque == NULL)
{
hb_error("hb_system_sleep: opaque is NULL");
}
IOPMAssertionID *assertionID = (IOPMAssertionID*)opaque;
if (*assertionID == -1)
{
// nothing to do
return;
}
IOReturn success = IOPMAssertionRelease(*assertionID);
if (success == kIOReturnSuccess)
{
hb_deep_log(3,
"hb_system_sleep: assertion %d released, sleep allowed",
*assertionID);
*assertionID = -1;
}
else
{
hb_log("hb_system_sleep: failed to allow system sleep");
}
#endif
}
void hb_system_sleep_private_disable(void *opaque)
{
#ifdef __APPLE__
if (opaque == NULL)
{
hb_error("hb_system_sleep: opaque is NULL");
}
IOPMAssertionID *assertionID = (IOPMAssertionID*)opaque;
if (*assertionID != -1)
{
// nothing to do
return;
}
IOReturn success = IOPMAssertionCreateWithName(kIOPMAssertionTypeNoIdleSleep,
kIOPMAssertionLevelOn,
reasonForActivity,
assertionID);
if (success == kIOReturnSuccess)
{
hb_deep_log(3,
"hb_system_sleep: assertion %d created, sleep prevented",
*assertionID);
}
else
{
hb_log("hb_system_sleep: failed to prevent system sleep");
}
#endif
}
HandBrake-0.10.2/libhb/update.c 0000664 0001752 0001752 00000012744 12463330511 016553 0 ustar handbrake handbrake /* update.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
static void UpdateFunc( void * );
typedef struct
{
int * build;
char * version;
} hb_update_t;
hb_thread_t * hb_update_init( int * build, char * version )
{
hb_update_t * data = calloc( sizeof( hb_update_t ), 1 );
data->build = build;
data->version = version;
return hb_thread_init( "update", UpdateFunc, data,
HB_NORMAL_PRIORITY );
}
static void UpdateFunc( void * _data )
{
hb_update_t * data = (hb_update_t *) _data;
char* const url = HB_PROJECT_URL_APPCAST;
char* const urlz = url + strlen( HB_PROJECT_URL_APPCAST ); /* marks null-term */
char url_host[64];
char url_path[128];
char query[256];
hb_net_t * net;
int ret;
char buf[4096];
char * cur, * end;
int size;
int i_vers;
char s_vers[32]; /* must be no larger than hb_handle_s.version */
int i;
/* Setup hb_query and hb_query_two with the correct appcast file */
hb_log( "Using %s", url );
/* extract host part */
cur = strstr( HB_PROJECT_URL_APPCAST, "//" );
if( !cur || cur+2 > urlz )
goto error;
cur += 2;
end = strstr( cur, "/" );
if( !end || end > urlz )
goto error;
memset( url_host, 0, sizeof(url_host) );
strncpy( url_host, cur, (end-cur) );
/* extract path part */
memset( url_path, 0, sizeof(url_path) );
strncpy( url_path, end, (urlz-end) );
if( !strlen( url_path ))
goto error;
memset( query, 0, sizeof(query) );
snprintf( query, sizeof(query), "GET %s HTTP/1.0\r\nHost: %s\r\n\r\n", url_path, url_host );
/* Grab the data from the web server */
if( !( net = hb_net_open( url_host, 80 ) ) )
{
goto error;
}
if( hb_net_send( net, query ) < 0 )
{
hb_log("Error: Unable to connect to server");
hb_net_close( &net );
goto error;
}
size = 0;
memset( buf, 0, 4096 );
for( ;; )
{
ret = hb_net_recv( net, &buf[size], sizeof( buf ) - size );
if( ret < 1 )
{
hb_net_close( &net );
break;
}
size += ret;
}
cur = buf;
end = &buf[sizeof( buf )];
/* Make sure we got it */
cur += 9;
if( size < 15 || strncmp( cur, "200 OK", 6 ) )
{
hb_log("Error: We did not get a 200 OK from the server. \n");
goto error;
}
cur += 6;
/* Find the end of the headers and the beginning of the content */
for( ; &cur[3] < end; cur++ )
{
if( cur[0] == '\r' && cur[1] == '\n' &&
cur[2] == '\r' && cur[3] == '\n' )
{
cur += 4;
break;
}
}
if( cur >= end )
{
hb_log("Error: Found the end of the buffer before the end of the HTTP header information! \n");
goto error;
}
/*
* Find the tag
* Scan though each character of the buffer until we find that the first 4 characters of "cur" are "' )
{
cur += 1;
break;
}
/* If the CLI tag has not been found in the first 768 characters, or the end is reached, something bad happened.*/
if (( i > 768) || ( cur >= end ))
{
hb_log("Error: Did not find the tag in the expected maximum amount of characters into the file. \n");
goto error;
}
}
if( cur >= end )
{
goto error;
}
/*
* Ok, The above code didn't position cur, it only found = end )
{
hb_log("Error: Unexpected end of buffer! Could not find the build information. \n");
goto error;
}
/* Stable HB_PROJECT_BUILD */
i_vers = strtol( cur, &cur, 10 );
if( cur >= end )
{
hb_log("Error: Unexpected end of buffer! \n");
goto error;
}
/*
* The Version number is 2 places after the build, so shift cur, 2 places.
* Get all the characters in cur until the point where " is found.
*/
cur += 2;
if( cur >= end )
{
hb_log("Error: Unexpected end of buffer! Could not get version number. \n");
goto error;
}
memset( s_vers, 0, sizeof( s_vers ) );
for( i = 0; i < sizeof( s_vers ) - 1 && cur < end && *cur != '"'; i++, cur++ )
{
s_vers[i] = *cur;
/* If the CLI tag has not been found in the first 768 characters, or the end is reached, something bad happened.*/
if (( cur >= end ))
{
hb_log("Error: Version number too long, or end of buffer reached. \n");
goto error;
}
}
if( cur >= end )
{
goto error;
}
/* Print the version information */
hb_log( "latest: %s, build %d", s_vers, i_vers );
/* Return the build information */
if( i_vers > HB_PROJECT_BUILD )
{
memcpy( data->version, s_vers, sizeof(s_vers) );
*(data->build) = i_vers;
}
error:
free( data );
return;
}
HandBrake-0.10.2/libhb/encvobsub.c 0000664 0001752 0001752 00000003071 12463330511 017250 0 ustar handbrake handbrake /* encvobsub.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
struct hb_work_private_s
{
hb_job_t * job;
};
int encsubInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv;
pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
return 0;
}
int encsubWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_buffer_t * in = *buf_in;
if (w->subtitle->source != VOBSUB)
{
// Invalid source, send EOF, this shouldn't ever happen
hb_log("encvobsub: invalid subtitle source");
hb_buffer_close( buf_in );
*buf_out = hb_buffer_init(0);
}
if ( in->size <= 0 )
{
/* EOF on input stream - send it downstream & say that we're done */
*buf_out = in;
*buf_in = NULL;
return HB_WORK_DONE;
}
/*
* Not much to do, just pass the buffer on.
* Some day, we may re-encode bd subtitles here ;)
*/
if (buf_out)
{
*buf_out = in;
*buf_in = NULL;
}
return HB_WORK_OK;
}
void encsubClose( hb_work_object_t * w )
{
free( w->private_data );
}
hb_work_object_t hb_encvobsub =
{
WORK_ENCVOBSUB,
"VOBSUB encoder",
encsubInit,
encsubWork,
encsubClose
};
HandBrake-0.10.2/libhb/bd.c 0000664 0001752 0001752 00000070103 12463330511 015647 0 ustar handbrake handbrake /* dvd.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "lang.h"
#include "hbffmpeg.h"
#include "libbluray/bluray.h"
struct hb_bd_s
{
char * path;
BLURAY * bd;
int title_count;
BLURAY_TITLE_INFO ** title_info;
int64_t duration;
hb_stream_t * stream;
int chapter;
int next_chap;
};
/***********************************************************************
* Local prototypes
**********************************************************************/
static int next_packet( BLURAY *bd, uint8_t *pkt );
static int title_info_compare_mpls(const void *, const void *);
/***********************************************************************
* hb_bd_init
***********************************************************************
*
**********************************************************************/
hb_bd_t * hb_bd_init( char * path )
{
hb_bd_t * d;
int ii;
d = calloc( sizeof( hb_bd_t ), 1 );
/* Open device */
d->bd = bd_open( path, NULL );
if( d->bd == NULL )
{
/*
* Not an error, may be a stream - which we'll try in a moment.
*/
hb_log( "bd: not a bd - trying as a stream/file instead" );
goto fail;
}
d->title_count = bd_get_titles( d->bd, TITLES_RELEVANT, 0 );
if ( d->title_count == 0 )
{
hb_log( "bd: not a bd - trying as a stream/file instead" );
goto fail;
}
d->title_info = calloc( sizeof( BLURAY_TITLE_INFO* ) , d->title_count );
for ( ii = 0; ii < d->title_count; ii++ )
{
d->title_info[ii] = bd_get_title_info( d->bd, ii, 0 );
}
qsort(d->title_info, d->title_count, sizeof( BLURAY_TITLE_INFO* ), title_info_compare_mpls );
d->path = strdup( path );
return d;
fail:
if( d->bd ) bd_close( d->bd );
free( d );
return NULL;
}
/***********************************************************************
* hb_bd_title_count
**********************************************************************/
int hb_bd_title_count( hb_bd_t * d )
{
return d->title_count;
}
static void add_subtitle(int track, hb_list_t *list_subtitle, BLURAY_STREAM_INFO *bdsub, uint32_t codec)
{
hb_subtitle_t * subtitle;
iso639_lang_t * lang;
subtitle = calloc( sizeof( hb_subtitle_t ), 1 );
subtitle->track = track;
subtitle->id = bdsub->pid;
lang = lang_for_code2( (char*)bdsub->lang );
snprintf( subtitle->lang, sizeof( subtitle->lang ), "%s",
strlen(lang->native_name) ? lang->native_name : lang->eng_name);
snprintf( subtitle->iso639_2, sizeof( subtitle->iso639_2 ), "%s",
lang->iso639_2);
switch ( bdsub->coding_type )
{
case BLURAY_STREAM_TYPE_SUB_PG:
subtitle->source = PGSSUB;
subtitle->format = PICTURESUB;
subtitle->config.dest = RENDERSUB;
break;
default:
// Unrecognized, don't add to list
free( subtitle );
return;
}
subtitle->reg_desc = STR4_TO_UINT32("HDMV");
subtitle->stream_type = bdsub->coding_type;
subtitle->codec = codec;
hb_log( "bd: subtitle id=0x%x, lang=%s, 3cc=%s", subtitle->id,
subtitle->lang, subtitle->iso639_2 );
hb_list_add( list_subtitle, subtitle );
return;
}
static void add_audio(int track, hb_list_t *list_audio, BLURAY_STREAM_INFO *bdaudio, int substream_type, uint32_t codec, uint32_t codec_param)
{
const char * codec_name;
hb_audio_t * audio;
iso639_lang_t * lang;
audio = calloc( sizeof( hb_audio_t ), 1 );
audio->id = (substream_type << 16) | bdaudio->pid;
audio->config.in.reg_desc = STR4_TO_UINT32("HDMV");
audio->config.in.stream_type = bdaudio->coding_type;
audio->config.in.substream_type = substream_type;
audio->config.in.codec = codec;
audio->config.in.codec_param = codec_param;
switch( audio->config.in.codec )
{
case HB_ACODEC_AC3:
codec_name = "AC3";
break;
case HB_ACODEC_DCA:
codec_name = "DTS";
break;
default:
{
if( audio->config.in.codec & HB_ACODEC_FF_MASK )
{
switch( bdaudio->coding_type )
{
case BLURAY_STREAM_TYPE_AUDIO_AC3PLUS:
codec_name = "E-AC3";
break;
case BLURAY_STREAM_TYPE_AUDIO_DTSHD:
codec_name = "DTS-HD HRA";
break;
case BLURAY_STREAM_TYPE_AUDIO_DTSHD_MASTER:
codec_name = "DTS-HD MA";
break;
case BLURAY_STREAM_TYPE_AUDIO_LPCM:
codec_name = "BD LPCM";
break;
case BLURAY_STREAM_TYPE_AUDIO_MPEG1:
codec_name = "MPEG1";
break;
case BLURAY_STREAM_TYPE_AUDIO_MPEG2:
codec_name = "MPEG2";
break;
case BLURAY_STREAM_TYPE_AUDIO_TRUHD:
codec_name = "TrueHD";
break;
default:
codec_name = "Unknown FFmpeg";
break;
}
}
else
{
codec_name = "Unknown";
}
}
break;
}
lang = lang_for_code2( (char*)bdaudio->lang );
audio->config.lang.type = 0;
snprintf( audio->config.lang.simple,
sizeof( audio->config.lang.simple ), "%s",
strlen( lang->native_name ) ? lang->native_name : lang->eng_name );
snprintf( audio->config.lang.iso639_2,
sizeof( audio->config.lang.iso639_2 ), "%s", lang->iso639_2 );
hb_log("bd: audio id=0x%x, lang=%s (%s), 3cc=%s", audio->id,
audio->config.lang.simple, codec_name, audio->config.lang.iso639_2);
audio->config.in.track = track;
hb_list_add( list_audio, audio );
return;
}
static int bd_audio_equal( BLURAY_CLIP_INFO *a, BLURAY_CLIP_INFO *b )
{
int ii, jj, equal;
if ( a->audio_stream_count != b->audio_stream_count )
return 0;
if ( a->audio_stream_count == 0 )
return 0;
for ( ii = 0; ii < a->audio_stream_count; ii++ )
{
BLURAY_STREAM_INFO * s = &a->audio_streams[ii];
equal = 0;
for ( jj = 0; jj < b->audio_stream_count; jj++ )
{
if ( s->pid == b->audio_streams[jj].pid &&
s->coding_type == b->audio_streams[jj].coding_type)
{
equal = 1;
break;
}
}
if ( !equal )
return 0;
}
return 1;
}
/***********************************************************************
* hb_bd_title_scan
**********************************************************************/
hb_title_t * hb_bd_title_scan( hb_bd_t * d, int tt, uint64_t min_duration )
{
hb_title_t * title;
hb_chapter_t * chapter;
int ii, jj;
BLURAY_TITLE_INFO * ti = NULL;
hb_log( "bd: scanning title %d", tt );
title = hb_title_init( d->path, tt );
title->demuxer = HB_TS_DEMUXER;
title->type = HB_BD_TYPE;
title->reg_desc = STR4_TO_UINT32("HDMV");
char * p_cur, * p_last = d->path;
for( p_cur = d->path; *p_cur; p_cur++ )
{
if( IS_DIR_SEP(p_cur[0]) && p_cur[1] )
{
p_last = &p_cur[1];
}
}
snprintf( title->name, sizeof( title->name ), "%s", p_last );
char *dot_term = strrchr(title->name, '.');
if (dot_term)
*dot_term = '\0';
title->vts = 0;
title->ttn = 0;
ti = d->title_info[tt - 1];
if ( ti == NULL )
{
hb_log( "bd: invalid title" );
goto fail;
}
if ( ti->clip_count == 0 )
{
hb_log( "bd: stream has no clips" );
goto fail;
}
if ( ti->clips[0].video_stream_count == 0 )
{
hb_log( "bd: stream has no video" );
goto fail;
}
hb_log( "bd: playlist %05d.MPLS", ti->playlist );
title->playlist = ti->playlist;
uint64_t pkt_count = 0;
for ( ii = 0; ii < ti->clip_count; ii++ )
{
pkt_count += ti->clips[ii].pkt_count;
}
title->block_start = 0;
title->block_end = pkt_count;
title->block_count = pkt_count;
title->angle_count = ti->angle_count;
/* Get duration */
title->duration = ti->duration;
title->hours = title->duration / 90000 / 3600;
title->minutes = ( ( title->duration / 90000 ) % 3600 ) / 60;
title->seconds = ( title->duration / 90000 ) % 60;
hb_log( "bd: duration is %02d:%02d:%02d (%"PRId64" ms)",
title->hours, title->minutes, title->seconds,
title->duration / 90 );
/* ignore short titles because they're often stills */
if( ti->duration < min_duration )
{
hb_log( "bd: ignoring title (too short)" );
goto fail;
}
BLURAY_STREAM_INFO * bdvideo = &ti->clips[0].video_streams[0];
title->video_id = bdvideo->pid;
title->video_stream_type = bdvideo->coding_type;
hb_log( "bd: video id=0x%x, stream type=%s, format %s", title->video_id,
bdvideo->coding_type == BLURAY_STREAM_TYPE_VIDEO_MPEG1 ? "MPEG1" :
bdvideo->coding_type == BLURAY_STREAM_TYPE_VIDEO_MPEG2 ? "MPEG2" :
bdvideo->coding_type == BLURAY_STREAM_TYPE_VIDEO_VC1 ? "VC-1" :
bdvideo->coding_type == BLURAY_STREAM_TYPE_VIDEO_H264 ? "H.264" :
"Unknown",
bdvideo->format == BLURAY_VIDEO_FORMAT_480I ? "480i" :
bdvideo->format == BLURAY_VIDEO_FORMAT_576I ? "576i" :
bdvideo->format == BLURAY_VIDEO_FORMAT_480P ? "480p" :
bdvideo->format == BLURAY_VIDEO_FORMAT_1080I ? "1080i" :
bdvideo->format == BLURAY_VIDEO_FORMAT_720P ? "720p" :
bdvideo->format == BLURAY_VIDEO_FORMAT_1080P ? "1080p" :
bdvideo->format == BLURAY_VIDEO_FORMAT_576P ? "576p" :
"Unknown"
);
switch( bdvideo->coding_type )
{
case BLURAY_STREAM_TYPE_VIDEO_MPEG1:
case BLURAY_STREAM_TYPE_VIDEO_MPEG2:
title->video_codec = WORK_DECAVCODECV;
title->video_codec_param = AV_CODEC_ID_MPEG2VIDEO;
break;
case BLURAY_STREAM_TYPE_VIDEO_VC1:
title->video_codec = WORK_DECAVCODECV;
title->video_codec_param = AV_CODEC_ID_VC1;
break;
case BLURAY_STREAM_TYPE_VIDEO_H264:
title->video_codec = WORK_DECAVCODECV;
title->video_codec_param = AV_CODEC_ID_H264;
break;
default:
hb_log( "scan: unknown video codec (0x%x)",
bdvideo->coding_type );
goto fail;
}
switch ( bdvideo->aspect )
{
case BLURAY_ASPECT_RATIO_4_3:
title->container_aspect = 4. / 3.;
break;
case BLURAY_ASPECT_RATIO_16_9:
title->container_aspect = 16. / 9.;
break;
default:
hb_log( "bd: unknown aspect" );
goto fail;
}
hb_log( "bd: aspect = %g", title->container_aspect );
/* Detect audio */
// Max primary BD audios is 32
int matches;
int most_audio = 0;
int audio_clip_index = 0;
if (ti->clip_count > 2)
{
// All BD clips are not all required to have the same audio.
// But clips that have seamless transition are required
// to have the same audio as the previous clip.
// So find the clip that has the most other clips with the
// matching audio.
for ( ii = 0; ii < ti->clip_count; ii++ )
{
matches = 0;
for ( jj = 0; jj < ti->clip_count; jj++ )
{
if ( bd_audio_equal( &ti->clips[ii], &ti->clips[jj] ) )
{
matches++;
}
}
if ( matches > most_audio )
{
most_audio = matches;
audio_clip_index = ii;
}
}
}
else if (ti->clip_count == 2)
{
// If there are only 2 clips, pick audios from the longer clip
if (ti->clips[0].pkt_count < ti->clips[1].pkt_count)
audio_clip_index = 1;
}
// Add all the audios found in the above clip.
for (ii = 0; ii < ti->clips[audio_clip_index].audio_stream_count; ii++)
{
BLURAY_STREAM_INFO * bdaudio;
bdaudio = &ti->clips[audio_clip_index].audio_streams[ii];
switch (bdaudio->coding_type)
{
case BLURAY_STREAM_TYPE_AUDIO_TRUHD:
// Add 2 audio tracks. One for TrueHD and one for AC-3
add_audio(ii, title->list_audio, bdaudio, HB_SUBSTREAM_BD_AC3,
HB_ACODEC_AC3, AV_CODEC_ID_AC3);
add_audio(ii, title->list_audio, bdaudio, HB_SUBSTREAM_BD_TRUEHD,
HB_ACODEC_FFMPEG, AV_CODEC_ID_TRUEHD);
break;
case BLURAY_STREAM_TYPE_AUDIO_DTS:
add_audio(ii, title->list_audio, bdaudio, 0,
HB_ACODEC_DCA, AV_CODEC_ID_DTS);
break;
case BLURAY_STREAM_TYPE_AUDIO_MPEG2:
case BLURAY_STREAM_TYPE_AUDIO_MPEG1:
add_audio(ii, title->list_audio, bdaudio, 0,
HB_ACODEC_FFMPEG, AV_CODEC_ID_MP2);
break;
case BLURAY_STREAM_TYPE_AUDIO_AC3PLUS:
add_audio(ii, title->list_audio, bdaudio, 0,
HB_ACODEC_FFMPEG, AV_CODEC_ID_EAC3);
break;
case BLURAY_STREAM_TYPE_AUDIO_LPCM:
add_audio(ii, title->list_audio, bdaudio, 0,
HB_ACODEC_FFMPEG, AV_CODEC_ID_PCM_BLURAY);
break;
case BLURAY_STREAM_TYPE_AUDIO_AC3:
add_audio(ii, title->list_audio, bdaudio, 0,
HB_ACODEC_AC3, AV_CODEC_ID_AC3);
break;
case BLURAY_STREAM_TYPE_AUDIO_DTSHD_MASTER:
case BLURAY_STREAM_TYPE_AUDIO_DTSHD:
// Add 2 audio tracks. One for DTS-HD and one for DTS
add_audio(ii, title->list_audio, bdaudio, HB_SUBSTREAM_BD_DTS,
HB_ACODEC_DCA, AV_CODEC_ID_DTS);
// DTS-HD is special. The substreams must be concatinated
// DTS-core followed by DTS-hd-extensions. Setting
// a substream id of 0 says use all substreams.
add_audio(ii, title->list_audio, bdaudio, 0,
HB_ACODEC_DCA_HD, AV_CODEC_ID_DTS);
break;
default:
hb_log("scan: unknown audio pid 0x%x codec 0x%x", bdaudio->pid,
bdaudio->coding_type);
break;
}
}
// Add all the subtitles found in the above clip.
for ( ii = 0; ii < ti->clips[audio_clip_index].pg_stream_count; ii++ )
{
BLURAY_STREAM_INFO * bdpgs;
bdpgs = &ti->clips[audio_clip_index].pg_streams[ii];
switch( bdpgs->coding_type )
{
case BLURAY_STREAM_TYPE_SUB_PG:
add_subtitle(ii, title->list_subtitle, bdpgs, WORK_DECPGSSUB);
break;
default:
hb_log( "scan: unknown subtitle pid 0x%x codec 0x%x",
bdpgs->pid, bdpgs->coding_type );
break;
}
}
/* Chapters */
for ( ii = 0, jj = 0; ii < ti->chapter_count; ii++ )
{
char chapter_title[80];
// Sanity check start time of this chapter.
// If it is beyond the end of the title, drop it.
if (ti->chapters[ii].start > ti->duration)
{
hb_log("bd: chapter %d invalid start %ld, dropping", ii+1,
ti->chapters[ii].start);
continue;
}
chapter = calloc( sizeof( hb_chapter_t ), 1 );
chapter->index = ++jj;
sprintf( chapter_title, "Chapter %d", chapter->index );
hb_chapter_set_title( chapter, chapter_title );
chapter->duration = ti->chapters[ii].duration;
chapter->block_start = ti->chapters[ii].offset;
// Sanity check chapter duration and start times
// Have seen some invalid durations in the wild
if (ii < ti->chapter_count - 1)
{
// Validate start time
if (ti->chapters[ii+1].start < ti->chapters[ii].start)
{
hb_log("bd: chapter %d invalid start %ld", ii+1,
ti->chapters[ii+1].start);
ti->chapters[ii+1].start = ti->chapters[ii].start +
chapter->duration;
}
if (ti->chapters[ii+1].start - ti->chapters[ii].start !=
chapter->duration)
{
hb_log("bd: chapter %d invalid duration %ld", ii+1,
chapter->duration);
chapter->duration = ti->chapters[ii+1].start -
ti->chapters[ii].start;
}
}
else
{
if (ti->duration - ti->chapters[ii].start != chapter->duration)
{
hb_log("bd: chapter %d invalid duration %ld", ii+1,
chapter->duration);
chapter->duration = ti->duration - ti->chapters[ii].start;
}
}
int seconds = ( chapter->duration + 45000 ) / 90000;
chapter->hours = ( seconds / 3600 );
chapter->minutes = ( seconds % 3600 ) / 60;
chapter->seconds = ( seconds % 60 );
hb_log( "bd: chap %d packet=%"PRIu64", %"PRId64" ms",
chapter->index,
chapter->block_start,
chapter->duration / 90 );
hb_list_add( title->list_chapter, chapter );
}
hb_log( "bd: title %d has %d chapters", tt, ti->chapter_count );
/* This title is ok so far */
goto cleanup;
fail:
hb_title_close( &title );
cleanup:
return title;
}
/***********************************************************************
* hb_bd_main_feature
**********************************************************************/
int hb_bd_main_feature( hb_bd_t * d, hb_list_t * list_title )
{
int longest = 0;
int ii;
uint64_t longest_duration = 0;
int highest_rank = 0;
int most_chapters = 0;
int rank[8] = {0, 1, 3, 2, 6, 5, 7, 4};
BLURAY_TITLE_INFO * ti;
for ( ii = 0; ii < hb_list_count( list_title ); ii++ )
{
hb_title_t * title = hb_list_item( list_title, ii );
ti = d->title_info[title->index - 1];
if ( ti )
{
BLURAY_STREAM_INFO * bdvideo = &ti->clips[0].video_streams[0];
if ( title->duration > longest_duration * 0.7 && bdvideo->format < 8 )
{
if (highest_rank < rank[bdvideo->format] ||
( title->duration > longest_duration &&
highest_rank == rank[bdvideo->format]))
{
longest = title->index;
longest_duration = title->duration;
highest_rank = rank[bdvideo->format];
most_chapters = ti->chapter_count;
}
else if (highest_rank == rank[bdvideo->format] &&
title->duration == longest_duration &&
ti->chapter_count > most_chapters)
{
longest = title->index;
most_chapters = ti->chapter_count;
}
}
}
else if ( title->duration > longest_duration )
{
longest_duration = title->duration;
longest = title->index;
}
}
return longest;
}
/***********************************************************************
* hb_bd_start
***********************************************************************
* Title and chapter start at 1
**********************************************************************/
int hb_bd_start( hb_bd_t * d, hb_title_t *title )
{
BD_EVENT event;
d->duration = title->duration;
// Calling bd_get_event initializes libbluray event queue.
bd_select_title( d->bd, d->title_info[title->index - 1]->idx );
bd_get_event( d->bd, &event );
d->chapter = 1;
d->stream = hb_bd_stream_open( title );
if ( d->stream == NULL )
{
return 0;
}
return 1;
}
/***********************************************************************
* hb_bd_stop
***********************************************************************
*
**********************************************************************/
void hb_bd_stop( hb_bd_t * d )
{
if( d->stream ) hb_stream_close( &d->stream );
}
/***********************************************************************
* hb_bd_seek
***********************************************************************
*
**********************************************************************/
int hb_bd_seek( hb_bd_t * d, float f )
{
uint64_t pos = f * d->duration;
bd_seek_time(d->bd, pos);
d->next_chap = bd_get_current_chapter( d->bd ) + 1;
hb_ts_stream_reset(d->stream);
return 1;
}
int hb_bd_seek_pts( hb_bd_t * d, uint64_t pts )
{
bd_seek_time(d->bd, pts);
d->next_chap = bd_get_current_chapter( d->bd ) + 1;
hb_ts_stream_reset(d->stream);
return 1;
}
int hb_bd_seek_chapter( hb_bd_t * d, int c )
{
d->next_chap = c;
bd_seek_chapter( d->bd, c - 1 );
hb_ts_stream_reset(d->stream);
return 1;
}
/***********************************************************************
* hb_bd_read
***********************************************************************
*
**********************************************************************/
hb_buffer_t * hb_bd_read( hb_bd_t * d )
{
int result;
int error_count = 0;
uint8_t buf[192];
BD_EVENT event;
uint64_t pos;
hb_buffer_t * b;
uint8_t discontinuity;
int new_chap = 0;
discontinuity = 0;
while ( 1 )
{
if ( d->next_chap != d->chapter )
{
new_chap = d->chapter = d->next_chap;
}
result = next_packet( d->bd, buf );
if ( result < 0 )
{
hb_error("bd: Read Error");
pos = bd_tell( d->bd );
bd_seek( d->bd, pos + 192 );
error_count++;
if (error_count > 10)
{
hb_error("bd: Error, too many consecutive read errors");
return 0;
}
continue;
}
else if ( result == 0 )
{
return 0;
}
error_count = 0;
while ( bd_get_event( d->bd, &event ) )
{
switch ( event.event )
{
case BD_EVENT_CHAPTER:
// The muxers expect to only get chapter 2 and above
// They write chapter 1 when chapter 2 is detected.
d->next_chap = event.param;
break;
case BD_EVENT_PLAYITEM:
discontinuity = 1;
hb_deep_log(2, "bd: Playitem %u", event.param);
break;
case BD_EVENT_STILL:
bd_read_skip_still( d->bd );
break;
default:
break;
}
}
// buf+4 to skip the BD timestamp at start of packet
b = hb_ts_decode_pkt( d->stream, buf+4 );
if ( b )
{
b->s.discontinuity = discontinuity;
b->s.new_chap = new_chap;
return b;
}
}
return NULL;
}
/***********************************************************************
* hb_bd_chapter
***********************************************************************
* Returns in which chapter the next block to be read is.
* Chapter numbers start at 1.
**********************************************************************/
int hb_bd_chapter( hb_bd_t * d )
{
return d->next_chap;
}
/***********************************************************************
* hb_bd_close
***********************************************************************
* Closes and frees everything
**********************************************************************/
void hb_bd_close( hb_bd_t ** _d )
{
hb_bd_t * d = *_d;
int ii;
if ( d->title_info )
{
for ( ii = 0; ii < d->title_count; ii++ )
bd_free_title_info( d->title_info[ii] );
free( d->title_info );
}
if( d->stream ) hb_stream_close( &d->stream );
if( d->bd ) bd_close( d->bd );
if( d->path ) free( d->path );
free( d );
*_d = NULL;
}
/***********************************************************************
* hb_bd_set_angle
***********************************************************************
* Sets the angle to read
**********************************************************************/
void hb_bd_set_angle( hb_bd_t * d, int angle )
{
if ( !bd_select_angle( d->bd, angle) )
{
hb_log("bd_select_angle failed");
}
}
static int check_ts_sync(const uint8_t *buf)
{
// must have initial sync byte, no scrambling & a legal adaptation ctrl
return (buf[0] == 0x47) && ((buf[3] >> 6) == 0) && ((buf[3] >> 4) > 0);
}
static int have_ts_sync(const uint8_t *buf, int psize)
{
return check_ts_sync(&buf[0*psize]) && check_ts_sync(&buf[1*psize]) &&
check_ts_sync(&buf[2*psize]) && check_ts_sync(&buf[3*psize]) &&
check_ts_sync(&buf[4*psize]) && check_ts_sync(&buf[5*psize]) &&
check_ts_sync(&buf[6*psize]) && check_ts_sync(&buf[7*psize]);
}
#define MAX_HOLE 192*80
static uint64_t align_to_next_packet(BLURAY *bd, uint8_t *pkt)
{
uint8_t buf[MAX_HOLE];
uint64_t pos = 0;
uint64_t start = bd_tell(bd);
uint64_t orig;
uint64_t off = 192;
memcpy(buf, pkt, 192);
if ( start >= 192 ) {
start -= 192;
}
orig = start;
while (1)
{
if (bd_read(bd, buf+off, sizeof(buf)-off) == sizeof(buf)-off)
{
const uint8_t *bp = buf;
int i;
for ( i = sizeof(buf) - 8 * 192; --i >= 0; ++bp )
{
if ( have_ts_sync( bp, 192 ) )
{
break;
}
}
if ( i >= 0 )
{
pos = ( bp - buf );
break;
}
off = 8 * 192;
memcpy(buf, buf + sizeof(buf) - off, off);
start += sizeof(buf) - off;
}
else
{
return 0;
}
}
off = start + pos - 4;
// bd_seek seeks to the nearest access unit *before* the requested position
// we don't want to seek backwards, so we need to read until we get
// past that position.
bd_seek(bd, off);
while (off > bd_tell(bd))
{
if (bd_read(bd, buf, 192) != 192)
{
break;
}
}
return start - orig + pos;
}
static int next_packet( BLURAY *bd, uint8_t *pkt )
{
int result;
while ( 1 )
{
result = bd_read( bd, pkt, 192 );
if ( result < 0 )
{
return -1;
}
if ( result < 192 )
{
return 0;
}
// Sync byte is byte 4. 0-3 are timestamp.
if (pkt[4] == 0x47)
{
return 1;
}
// lost sync - back up to where we started then try to re-establish.
uint64_t pos = bd_tell(bd);
uint64_t pos2 = align_to_next_packet(bd, pkt);
if ( pos2 == 0 )
{
hb_log( "next_packet: eof while re-establishing sync @ %"PRId64, pos );
return 0;
}
hb_log( "next_packet: sync lost @ %"PRId64", regained after %"PRId64" bytes",
pos, pos2 );
}
}
static int title_info_compare_mpls(const void *va, const void *vb)
{
BLURAY_TITLE_INFO *a, *b;
a = *(BLURAY_TITLE_INFO**)va;
b = *(BLURAY_TITLE_INFO**)vb;
return a->playlist - b->playlist;
}
HandBrake-0.10.2/libhb/taskset.c 0000664 0001752 0001752 00000015124 12463330511 016742 0 ustar handbrake handbrake /* taskset.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "ports.h"
#include "taskset.h"
int
taskset_init( taskset_t *ts, int thread_count, size_t arg_size )
{
int init_step;
init_step = 0;
memset( ts, 0, sizeof( *ts ) );
ts->thread_count = thread_count;
ts->arg_size = arg_size;
ts->bitmap_elements = ( ts->thread_count + 31 ) / 32;
ts->task_threads = malloc( sizeof( hb_thread_t* ) * ts->thread_count );
if( ts->task_threads == NULL )
goto fail;
init_step++;
if( arg_size != 0 )
{
ts->task_threads_args = malloc( arg_size * ts->thread_count );
if( ts->task_threads == NULL )
goto fail;
}
init_step++;
ts->task_begin_bitmap = malloc( sizeof( uint32_t ) * ts->bitmap_elements );
if( ts->task_begin_bitmap == NULL )
goto fail;
init_step++;
ts->task_complete_bitmap = malloc( sizeof( uint32_t ) * ts->bitmap_elements );
if( ts->task_complete_bitmap == NULL )
goto fail;
init_step++;
ts->task_stop_bitmap = malloc( sizeof( uint32_t ) * ts->bitmap_elements );
if( ts->task_stop_bitmap == NULL )
goto fail;
init_step++;
ts->task_cond_lock = hb_lock_init();
if( ts->task_cond_lock == NULL)
goto fail;
init_step++;
ts->task_begin = hb_cond_init();
if( ts->task_begin == NULL)
goto fail;
init_step++;
ts->task_complete = hb_cond_init();
if( ts->task_complete == NULL)
goto fail;
init_step++;
/*
* Initialize all arg data to 0.
*/
memset(ts->task_threads_args, 0, ts->arg_size * ts->thread_count );
/*
* Inialize bitmaps to all bits set. This means that any unused bits
* in the bitmap are already in the "condition satisfied" state allowing
* us to test the bitmap 32bits at a time without having to mask off
* the end.
*/
memset(ts->task_begin_bitmap, 0xFF, sizeof( uint32_t ) * ts->bitmap_elements );
memset(ts->task_complete_bitmap, 0xFF, sizeof( uint32_t ) * ts->bitmap_elements );
memset(ts->task_stop_bitmap, 0, sizeof( uint32_t ) * ts->bitmap_elements );
/*
* Important to start off with the threads locked waiting
* on input, no work completed, and not asked to stop.
*/
bit_nclear( ts->task_begin_bitmap, 0, ts->thread_count - 1 );
bit_nclear( ts->task_complete_bitmap, 0, ts->thread_count - 1 );
bit_nclear( ts->task_stop_bitmap, 0, ts->thread_count - 1 );
return (1);
fail:
switch (init_step)
{
default:
hb_cond_close( &ts->task_complete );
/* FALL THROUGH */
case 7:
hb_cond_close( &ts->task_begin );
/* FALL THROUGH */
case 6:
hb_lock_close( &ts->task_cond_lock );
/* FALL THROUGH */
case 5:
free( ts->task_stop_bitmap );
/* FALL THROUGH */
case 4:
free( ts->task_complete_bitmap );
/* FALL THROUGH */
case 3:
free( ts->task_begin_bitmap );
/* FALL THROUGH */
case 2:
if( ts->task_threads_args == NULL )
free( ts->task_threads_args );
/* FALL THROUGH */
case 1:
free( ts->task_threads );
/* FALL THROUGH */
case 0:
break;
}
return (0);
}
int
taskset_thread_spawn( taskset_t *ts, int thr_idx, const char *descr,
thread_func_t *func, int priority )
{
ts->task_threads[thr_idx] = hb_thread_init( descr, func,
taskset_thread_args( ts, thr_idx ),
priority);
return( ts->task_threads[thr_idx] != NULL );
}
void
taskset_cycle( taskset_t *ts )
{
hb_lock( ts->task_cond_lock );
/*
* Signal all threads that their work is available.
*/
bit_nset( ts->task_begin_bitmap, 0, ts->thread_count - 1 );
hb_cond_broadcast( ts->task_begin );
/*
* Wait until all threads have completed. Note that we must
* loop here as hb_cond_wait() on some platforms (e.g pthead_cond_wait)
* may unblock prematurely.
*/
do
{
hb_cond_wait( ts->task_complete, ts->task_cond_lock );
} while ( !allbits_set( ts->task_complete_bitmap, ts->bitmap_elements ) );
/*
* Clear completion indications for next time.
*/
bit_nclear( ts->task_complete_bitmap, 0, ts->thread_count - 1 );
hb_unlock( ts->task_cond_lock );
}
/*
* Block current thread until work is available for it.
*/
void
taskset_thread_wait4start( taskset_t *ts, int thr_idx )
{
hb_lock( ts->task_cond_lock );
while ( bit_is_clear( ts->task_begin_bitmap, thr_idx ) )
hb_cond_wait( ts->task_begin, ts->task_cond_lock );
/*
* We've been released for one run. Insure we block the next
* time through the loop.
*/
bit_clear( ts->task_begin_bitmap, thr_idx );
hb_unlock( ts->task_cond_lock );
}
/*
* Current thread has completed its work. Indicate completion,
* and if all threads in this task set have completed, wakeup
* anyone waiting for this condition.
*/
void
taskset_thread_complete( taskset_t *ts, int thr_idx )
{
hb_lock( ts->task_cond_lock );
bit_set( ts->task_complete_bitmap, thr_idx );
if( allbits_set( ts->task_complete_bitmap, ts->bitmap_elements ) )
{
hb_cond_signal( ts->task_complete );
}
hb_unlock( ts->task_cond_lock );
}
void
taskset_fini( taskset_t *ts )
{
int i;
hb_lock( ts->task_cond_lock );
/*
* Tell each thread to stop, and then cleanup.
*/
bit_nset( ts->task_stop_bitmap, 0, ts->thread_count - 1 );
bit_nset( ts->task_begin_bitmap, 0, ts->thread_count - 1 );
hb_cond_broadcast( ts->task_begin );
/*
* Wait for all threads to exit.
*/
hb_cond_wait( ts->task_complete, ts->task_cond_lock );
hb_unlock( ts->task_cond_lock );
/*
* Clean up taskset memory.
*/
for( i = 0; i < ts->thread_count; i++)
{
hb_thread_close( &ts->task_threads[i] );
}
hb_lock_close( &ts->task_cond_lock );
hb_cond_close( &ts->task_begin );
hb_cond_close( &ts->task_complete );
free( ts->task_threads );
if( ts->task_threads_args != NULL )
free( ts->task_threads_args );
free( ts->task_begin_bitmap );
free( ts->task_complete_bitmap );
free( ts->task_stop_bitmap );
}
HandBrake-0.10.2/libhb/cropscale.c 0000664 0001752 0001752 00000020205 12463330511 017233 0 ustar handbrake handbrake /* cropscale.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "hbffmpeg.h"
#include "common.h"
#include "opencl.h"
struct hb_filter_private_s
{
hb_job_t *job;
int width_in;
int height_in;
int pix_fmt;
int pix_fmt_out;
int width_out;
int height_out;
int crop[4];
/* OpenCL/DXVA2 */
int use_dxva;
int use_decomb;
int use_detelecine;
hb_oclscale_t *os; //ocl scaler handler
struct SwsContext * context;
};
static int hb_crop_scale_init( hb_filter_object_t * filter,
hb_filter_init_t * init );
static int hb_crop_scale_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out );
static int hb_crop_scale_info( hb_filter_object_t * filter,
hb_filter_info_t * info );
static void hb_crop_scale_close( hb_filter_object_t * filter );
hb_filter_object_t hb_filter_crop_scale =
{
.id = HB_FILTER_CROP_SCALE,
.enforce_order = 1,
.name = "Crop and Scale",
.settings = NULL,
.init = hb_crop_scale_init,
.work = hb_crop_scale_work,
.close = hb_crop_scale_close,
.info = hb_crop_scale_info,
};
static int hb_crop_scale_init( hb_filter_object_t * filter,
hb_filter_init_t * init )
{
filter->private_data = calloc( 1, sizeof(struct hb_filter_private_s) );
hb_filter_private_t * pv = filter->private_data;
// TODO: add pix format option to settings
pv->job = init->job;
pv->pix_fmt_out = init->pix_fmt;
pv->width_in = init->width;
pv->height_in = init->height;
pv->width_out = init->width - (init->crop[2] + init->crop[3]);
pv->height_out = init->height - (init->crop[0] + init->crop[1]);
/* OpenCL/DXVA2 */
pv->use_dxva = init->use_dxva;
pv->use_decomb = init->job->use_decomb;
pv->use_detelecine = init->job->use_detelecine;
if (pv->job->use_opencl && pv->job->title->opencl_support)
{
pv->os = ( hb_oclscale_t * )malloc( sizeof( hb_oclscale_t ) );
memset( pv->os, 0, sizeof( hb_oclscale_t ) );
}
memcpy( pv->crop, init->crop, sizeof( int[4] ) );
if( filter->settings )
{
sscanf( filter->settings, "%d:%d:%d:%d:%d:%d",
&pv->width_out, &pv->height_out,
&pv->crop[0], &pv->crop[1], &pv->crop[2], &pv->crop[3] );
}
// Set init values so the next stage in the pipline
// knows what it will be getting
init->pix_fmt = pv->pix_fmt;
init->width = pv->width_out;
init->height = pv->height_out;
memcpy( init->crop, pv->crop, sizeof( int[4] ) );
return 0;
}
static int hb_crop_scale_info( hb_filter_object_t * filter,
hb_filter_info_t * info )
{
hb_filter_private_t * pv = filter->private_data;
if( !pv )
return 0;
// Set init values so the next stage in the pipline
// knows what it will be getting
memset( info, 0, sizeof( hb_filter_info_t ) );
info->out.pix_fmt = pv->pix_fmt;
info->out.width = pv->width_out;
info->out.height = pv->height_out;
memcpy( info->out.crop, pv->crop, sizeof( int[4] ) );
int cropped_width = pv->width_in - ( pv->crop[2] + pv->crop[3] );
int cropped_height = pv->height_in - ( pv->crop[0] + pv->crop[1] );
sprintf( info->human_readable_desc,
"source: %d * %d, crop (%d/%d/%d/%d): %d * %d, scale: %d * %d",
pv->width_in, pv->height_in,
pv->crop[0], pv->crop[1], pv->crop[2], pv->crop[3],
cropped_width, cropped_height, pv->width_out, pv->height_out );
return 0;
}
static void hb_crop_scale_close( hb_filter_object_t * filter )
{
hb_filter_private_t * pv = filter->private_data;
if ( !pv )
{
return;
}
/* OpenCL */
if (pv->job->use_opencl && pv->job->title->opencl_support && pv->os)
{
if (hb_ocl != NULL)
{
HB_OCL_BUF_FREE(hb_ocl, pv->os->bicubic_x_weights);
HB_OCL_BUF_FREE(hb_ocl, pv->os->bicubic_y_weights);
}
free(pv->os);
}
if( pv->context )
{
sws_freeContext( pv->context );
}
free( pv );
filter->private_data = NULL;
}
/* OpenCL */
static hb_buffer_t* crop_scale( hb_filter_private_t * pv, hb_buffer_t * in )
{
AVPicture pic_in;
AVPicture pic_out;
AVPicture pic_crop;
hb_buffer_t * out;
out = hb_video_buffer_init( pv->width_out, pv->height_out );
hb_avpicture_fill( &pic_in, in );
hb_avpicture_fill( &pic_out, out );
// Crop; this alters the pointer to the data to point to the
// correct place for cropped frame
av_picture_crop( &pic_crop, &pic_in, in->f.fmt,
pv->crop[0], pv->crop[2] );
// Use bicubic OpenCL scaling when selected and when downsampling < 4:1;
if ((pv->job->use_opencl && pv->job->title->opencl_support) &&
(pv->width_out * 4 > pv->width_in) &&
(in->cl.buffer != NULL) && (out->cl.buffer != NULL))
{
/* OpenCL */
hb_ocl_scale(in, out, pv->crop, pv->os);
}
else
{
if (pv->context == NULL ||
pv->width_in != in->f.width ||
pv->height_in != in->f.height ||
pv->pix_fmt != in->f.fmt)
{
// Something changed, need a new scaling context.
if (pv->context != NULL)
{
sws_freeContext(pv->context);
}
pv->context = hb_sws_get_context(in->f.width - (pv->crop[2] + pv->crop[3]),
in->f.height - (pv->crop[0] + pv->crop[1]),
in->f.fmt, out->f.width, out->f.height,
out->f.fmt, SWS_LANCZOS|SWS_ACCURATE_RND);
pv->width_in = in->f.width;
pv->height_in = in->f.height;
pv->pix_fmt = in->f.fmt;
}
// Scale pic_crop into pic_render according to the
// context set up above
sws_scale(pv->context,
(const uint8_t* const*)pic_crop.data, pic_crop.linesize,
0, in->f.height - (pv->crop[0] + pv->crop[1]),
pic_out.data, pic_out.linesize);
}
out->s = in->s;
hb_buffer_move_subs( out, in );
return out;
}
static int hb_crop_scale_work( hb_filter_object_t * filter,
hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_filter_private_t * pv = filter->private_data;
hb_buffer_t * in = *buf_in;
if ( in->size <= 0 )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_DONE;
}
if ( !pv )
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_OK;
}
// If width or height were not set, set them now based on the
// input width & height
if ( pv->width_out <= 0 || pv->height_out <= 0 )
{
pv->width_out = in->f.width - (pv->crop[2] + pv->crop[3]);
pv->height_out = in->f.height - (pv->crop[0] + pv->crop[1]);
}
/* OpenCL/DXVA2 */
if ((!pv->use_decomb && !pv->use_detelecine &&
!pv->crop[0] && !pv->crop[1] && !pv->crop[2] && !pv->crop[3] &&
in->f.fmt == pv->pix_fmt_out && in->f.width == pv->width_out &&
in->f.height == pv->height_out) || (pv->use_dxva &&
in->f.width == pv->width_out &&
in->f.height == pv->height_out))
{
*buf_out = in;
*buf_in = NULL;
return HB_FILTER_OK;
}
*buf_out = crop_scale( pv, in );
return HB_FILTER_OK;
}
HandBrake-0.10.2/libhb/stream.c 0000664 0001752 0001752 00000560624 12470727143 016602 0 ustar handbrake handbrake /* stream.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include
#include
#include "hb.h"
#include "hbffmpeg.h"
#include "lang.h"
#include "libbluray/bluray.h"
#include "vadxva2.h"
#define min(a, b) a < b ? a : b
#define HB_MAX_PROBE_SIZE (1*1024*1024)
/*
* This table defines how ISO MPEG stream type codes map to HandBrake
* codecs. It is indexed by the 8 bit stream type and contains the codec
* worker object id and a parameter for that worker proc (ignored except
* for the ffmpeg-based codecs in which case it is the ffmpeg codec id).
*
* Entries with a worker proc id of 0 or a kind of 'U' indicate that HB
* doesn't handle the stream type.
* N - Not used
* U - Unknown (to be determined by further processing)
* A - Audio
* V - Video
* S - Subtitle
* P - PCR
*/
typedef enum { N, U, A, V, P, S } kind_t;
typedef struct {
kind_t kind; /* not handled / unknown / audio / video */
int codec; /* HB worker object id of codec */
int codec_param; /* param for codec (usually ffmpeg codec id) */
const char* name; /* description of type */
} stream2codec_t;
#define st(id, kind, codec, codec_param, name) \
[id] = { kind, codec, codec_param, name }
static const stream2codec_t st2codec[256] = {
st(0x00, U, 0, 0, NULL),
st(0x01, V, WORK_DECAVCODECV, AV_CODEC_ID_MPEG2VIDEO, "MPEG1"),
st(0x02, V, WORK_DECAVCODECV, AV_CODEC_ID_MPEG2VIDEO, "MPEG2"),
st(0x03, A, HB_ACODEC_FFMPEG, AV_CODEC_ID_MP2, "MPEG1"),
st(0x04, A, HB_ACODEC_FFMPEG, AV_CODEC_ID_MP2, "MPEG2"),
st(0x05, N, 0, 0, "ISO 13818-1 private section"),
st(0x06, U, 0, 0, "ISO 13818-1 PES private data"),
st(0x07, N, 0, 0, "ISO 13522 MHEG"),
st(0x08, N, 0, 0, "ISO 13818-1 DSM-CC"),
st(0x09, N, 0, 0, "ISO 13818-1 auxiliary"),
st(0x0a, N, 0, 0, "ISO 13818-6 encap"),
st(0x0b, N, 0, 0, "ISO 13818-6 DSM-CC U-N msgs"),
st(0x0c, N, 0, 0, "ISO 13818-6 Stream descriptors"),
st(0x0d, N, 0, 0, "ISO 13818-6 Sections"),
st(0x0e, N, 0, 0, "ISO 13818-1 auxiliary"),
st(0x0f, A, HB_ACODEC_FFAAC, AV_CODEC_ID_AAC, "AAC"),
st(0x10, V, WORK_DECAVCODECV, AV_CODEC_ID_MPEG4, "MPEG4"),
st(0x11, A, HB_ACODEC_FFMPEG, AV_CODEC_ID_AAC_LATM, "LATM AAC"),
st(0x12, U, 0, 0, "MPEG4 generic"),
st(0x14, N, 0, 0, "ISO 13818-6 DSM-CC download"),
st(0x1b, V, WORK_DECAVCODECV, AV_CODEC_ID_H264, "H.264"),
st(0x80, U, HB_ACODEC_FFMPEG, AV_CODEC_ID_PCM_BLURAY, "Digicipher II Video"),
st(0x81, A, HB_ACODEC_AC3, AV_CODEC_ID_AC3, "AC3"),
st(0x82, A, HB_ACODEC_DCA, AV_CODEC_ID_DTS, "DTS"),
// 0x83 can be LPCM or BD TrueHD. Set to 'unknown' till we know more.
st(0x83, U, HB_ACODEC_LPCM, 0, "LPCM"),
// BD E-AC3 Primary audio
st(0x84, U, 0, 0, "SDDS"),
st(0x85, U, 0, 0, "ATSC Program ID"),
// 0x86 can be BD DTS-HD/DTS. Set to 'unknown' till we know more.
st(0x86, U, HB_ACODEC_DCA_HD, AV_CODEC_ID_DTS, "DTS-HD MA"),
st(0x87, A, HB_ACODEC_FFMPEG, AV_CODEC_ID_EAC3, "E-AC3"),
st(0x8a, A, HB_ACODEC_DCA, AV_CODEC_ID_DTS, "DTS"),
st(0x90, S, WORK_DECPGSSUB, 0, "PGS Subtitle"),
// 0x91 can be AC3 or BD Interactive Graphics Stream.
st(0x91, U, 0, 0, "AC3/IGS"),
st(0x92, N, 0, 0, "Subtitle"),
st(0x94, U, 0, 0, "SDDS"),
st(0xa0, V, 0, 0, "MSCODEC"),
// BD E-AC3 Secondary audio
st(0xa1, U, 0, 0, "E-AC3"),
// BD DTS-HD Secondary audio
st(0xa2, U, 0, 0, "DTS-HD LBR"),
st(0xea, V, WORK_DECAVCODECV, AV_CODEC_ID_VC1, "VC-1"),
};
#undef st
typedef enum {
hb_stream_type_unknown = 0,
transport,
program,
ffmpeg
} hb_stream_type_t;
#define MAX_PS_PROBE_SIZE (5*1024*1024)
#define kMaxNumberPMTStreams 32
typedef struct {
hb_buffer_t *buf;
hb_buffer_t *extra_buf;
int8_t skipbad;
int8_t continuity;
uint8_t pkt_summary[8];
int pid;
uint8_t is_pcr;
int pes_list;
} hb_ts_stream_t;
typedef struct {
int map_idx;
int stream_id;
uint8_t stream_id_ext;
uint8_t stream_type;
kind_t stream_kind;
int lang_code;
uint32_t format_id;
#define TS_FORMAT_ID_AC3 (('A' << 24) | ('C' << 16) | ('-' << 8) | '3')
int codec; // HB worker object id of codec
int codec_param; // param for codec (usually ffmpeg codec id)
char codec_name[80];
int next; // next pointer for list
// hb_ts_stream_t points to a list of
// hb_pes_stream_t
hb_buffer_t *probe_buf;
int probe_next_size;
} hb_pes_stream_t;
struct hb_stream_s
{
int scan;
int frames; /* video frames so far */
int errors; /* total errors so far */
int last_error_frame; /* frame # at last error message */
int last_error_count; /* # errors at last error message */
int packetsize; /* Transport Stream packet size */
int need_keyframe; // non-zero if want to start at a keyframe
int chapter; /* Chapter that we are currently in */
int64_t chapter_end; /* HB time that the current chapter ends */
struct
{
uint8_t found_pcr; // non-zero if we've found at least one pcr
int pcr_out; // sequence number of most recent output pcr
int pcr_in; // sequence number of most recent input pcr
int pcr_discontinuity; // sequence number of last discontinuity
int pcr_current; // last discontinuity sent to reader
int64_t pcr; // most recent input pcr
int64_t last_timestamp; // used for discontinuity detection when
// there are no PCRs
uint8_t *packet; // buffer for one TS packet
hb_ts_stream_t *list;
int count;
int alloc;
} ts;
struct
{
uint8_t found_scr; // non-zero if we've found at least one scr
int64_t scr; // most recent input scr
hb_pes_stream_t *list;
int count;
int alloc;
} pes;
/*
* Stuff before this point is dynamic state updated as we read the
* stream. Stuff after this point is stream description state that
* we learn during the initial scan but cache so it can be
* reused during the conversion read.
*/
uint8_t has_IDRs; // # IDRs found during duration scan
uint8_t ts_flags; // stream characteristics:
#define TS_HAS_PCR (1 << 0) // at least one PCR seen
#define TS_HAS_RAP (1 << 1) // Random Access Point bit seen
#define TS_HAS_RSEI (1 << 2) // "Restart point" SEI seen
char *path;
FILE *file_handle;
hb_stream_type_t hb_stream_type;
hb_title_t *title;
AVFormatContext *ffmpeg_ic;
AVPacket *ffmpeg_pkt;
uint8_t ffmpeg_video_id;
uint32_t reg_desc; // 4 byte registration code that identifies
// stream semantics
struct
{
unsigned short program_number;
unsigned short program_map_PID;
} pat_info[kMaxNumberPMTStreams];
int ts_number_pat_entries;
struct
{
int reading;
unsigned char *tablebuf;
unsigned int tablepos;
unsigned char current_continuity_counter;
unsigned int PCR_PID;
} pmt_info;
};
typedef struct {
uint8_t *buf;
uint32_t val;
int pos;
int size;
} bitbuf_t;
typedef struct
{
uint8_t has_stream_id_ext;
uint8_t stream_id;
uint8_t stream_id_ext;
uint8_t bd_substream_id;
int64_t pts;
int64_t dts;
int64_t scr;
int header_len;
int packet_len;
} hb_pes_info_t;
/***********************************************************************
* Local prototypes
**********************************************************************/
static void hb_stream_duration(hb_stream_t *stream, hb_title_t *inTitle);
static off_t align_to_next_packet(hb_stream_t *stream);
static int64_t pes_timestamp( const uint8_t *pes );
static int hb_ts_stream_init(hb_stream_t *stream);
static hb_buffer_t * hb_ts_stream_decode(hb_stream_t *stream);
static void hb_init_audio_list(hb_stream_t *stream, hb_title_t *title);
static void hb_init_subtitle_list(hb_stream_t *stream, hb_title_t *title);
static int hb_ts_stream_find_pids(hb_stream_t *stream);
static void hb_ps_stream_init(hb_stream_t *stream);
static hb_buffer_t * hb_ps_stream_decode(hb_stream_t *stream);
static void hb_ps_stream_find_streams(hb_stream_t *stream);
static int hb_ps_read_packet( hb_stream_t * stream, hb_buffer_t *b );
static int update_ps_streams( hb_stream_t * stream, int stream_id, int stream_id_ext, int stream_type, int in_kind );
static int update_ts_streams( hb_stream_t * stream, int pid, int stream_id_ext, int stream_type, int in_kind, int *pes_idx );
static void update_pes_kind( hb_stream_t * stream, int idx );
static int ffmpeg_open( hb_stream_t *stream, hb_title_t *title, int scan );
static void ffmpeg_close( hb_stream_t *d );
static hb_title_t *ffmpeg_title_scan( hb_stream_t *stream, hb_title_t *title );
hb_buffer_t *hb_ffmpeg_read( hb_stream_t *stream );
static int ffmpeg_seek( hb_stream_t *stream, float frac );
static int ffmpeg_seek_ts( hb_stream_t *stream, int64_t ts );
static inline unsigned int bits_get(bitbuf_t *bb, int bits);
static inline void bits_init(bitbuf_t *bb, uint8_t* buf, int bufsize, int clear);
static inline unsigned int bits_peek(bitbuf_t *bb, int bits);
static inline int bits_eob(bitbuf_t *bb);
static inline int bits_read_ue(bitbuf_t *bb );
static void pes_add_audio_to_title(hb_stream_t *s, int i, hb_title_t *t, int sort);
static int hb_parse_ps( hb_stream_t *stream, uint8_t *buf, int len, hb_pes_info_t *pes_info );
static void hb_ts_resolve_pid_types(hb_stream_t *stream);
static void hb_ps_resolve_stream_types(hb_stream_t *stream);
void hb_ts_stream_reset(hb_stream_t *stream);
void hb_ps_stream_reset(hb_stream_t *stream);
/*
* logging routines.
* these frontend hb_log because transport streams can have a lot of errors
* so we want to rate limit messages. this routine limits the number of
* messages to at most one per minute of video. other errors that occur
* during the minute are counted & the count is output with the next
* error msg we print.
*/
static void ts_warn_helper( hb_stream_t *stream, char *log, va_list args )
{
// limit error printing to at most one per minute of video (at 30fps)
++stream->errors;
if ( stream->frames - stream->last_error_frame >= 30*60 )
{
char msg[256];
vsnprintf( msg, sizeof(msg), log, args );
if ( stream->errors - stream->last_error_count < 10 )
{
hb_log( "stream: error near frame %d: %s", stream->frames, msg );
}
else
{
int Edelta = stream->errors - stream->last_error_count;
double Epcnt = (double)Edelta * 100. /
(stream->frames - stream->last_error_frame);
hb_log( "stream: %d new errors (%.0f%%) up to frame %d: %s",
Edelta, Epcnt, stream->frames, msg );
}
stream->last_error_frame = stream->frames;
stream->last_error_count = stream->errors;
}
}
static void ts_warn( hb_stream_t*, char*, ... ) HB_WPRINTF(2,3);
static void ts_err( hb_stream_t*, int, char*, ... ) HB_WPRINTF(3,4);
static void ts_warn( hb_stream_t *stream, char *log, ... )
{
va_list args;
va_start( args, log );
ts_warn_helper( stream, log, args );
va_end( args );
}
static int get_id(hb_pes_stream_t *pes)
{
return ( pes->stream_id_ext << 16 ) + pes->stream_id;
}
static int index_of_id(hb_stream_t *stream, int id)
{
int i;
for ( i = 0; i < stream->pes.count; ++i )
{
if ( id == get_id( &stream->pes.list[i] ) )
return i;
}
return -1;
}
static int index_of_pid(hb_stream_t *stream, int pid)
{
int i;
for ( i = 0; i < stream->ts.count; ++i )
{
if ( pid == stream->ts.list[i].pid )
{
return i;
}
}
return -1;
}
static int index_of_ps_stream(hb_stream_t *stream, int id, int sid)
{
int i;
for ( i = 0; i < stream->pes.count; ++i )
{
if ( id == stream->pes.list[i].stream_id &&
sid == stream->pes.list[i].stream_id_ext )
{
return i;
}
}
// If there is no match on the stream_id_ext, try matching
// on only the stream_id.
for ( i = 0; i < stream->pes.count; ++i )
{
if ( id == stream->pes.list[i].stream_id &&
0 == stream->pes.list[i].stream_id_ext )
{
return i;
}
}
return -1;
}
static kind_t ts_stream_kind( hb_stream_t * stream, int idx )
{
if ( stream->ts.list[idx].pes_list != -1 )
{
// Retuns kind for the first pes substream in the pes list
// All substreams in a TS stream are the same kind.
return stream->pes.list[stream->ts.list[idx].pes_list].stream_kind;
}
else
{
return U;
}
}
static kind_t ts_stream_type( hb_stream_t * stream, int idx )
{
if ( stream->ts.list[idx].pes_list != -1 )
{
// Retuns stream type for the first pes substream in the pes list
// All substreams in a TS stream are the same stream type.
return stream->pes.list[stream->ts.list[idx].pes_list].stream_type;
}
else
{
return 0x00;
}
}
static int pes_index_of_video(hb_stream_t *stream)
{
int i;
for ( i = 0; i < stream->pes.count; ++i )
if ( V == stream->pes.list[i].stream_kind )
return i;
return -1;
}
static int ts_index_of_video(hb_stream_t *stream)
{
int i;
for ( i = 0; i < stream->ts.count; ++i )
if ( V == ts_stream_kind( stream, i ) )
return i;
return -1;
}
static void ts_err( hb_stream_t *stream, int curstream, char *log, ... )
{
va_list args;
va_start( args, log );
ts_warn_helper( stream, log, args );
va_end( args );
stream->ts.list[curstream].skipbad = 1;
stream->ts.list[curstream].continuity = -1;
}
static int check_ps_sync(const uint8_t *buf)
{
// a legal MPEG program stream must start with a Pack header in the
// first four bytes.
return (buf[0] == 0x00) && (buf[1] == 0x00) &&
(buf[2] == 0x01) && (buf[3] == 0xba);
}
static int check_ps_sc(const uint8_t *buf)
{
// a legal MPEG program stream must start with a Pack followed by a
// some other start code. If we've already verified the pack, this skip
// it and checks for a start code prefix.
int pos;
int mark = buf[4] >> 4;
if ( mark == 0x02 )
{
// Check other marker bits to make it less likely
// that we are being spoofed.
if( ( buf[4] & 0xf1 ) != 0x21 ||
( buf[6] & 0x01 ) != 0x01 ||
( buf[8] & 0x01 ) != 0x01 ||
( buf[9] & 0x80 ) != 0x80 ||
( buf[11] & 0x01 ) != 0x01 )
{
return 0;
}
// mpeg-1 pack header
pos = 12; // skip over the PACK
}
else
{
// Check other marker bits to make it less likely
// that we are being spoofed.
if( ( buf[4] & 0xC4 ) != 0x44 ||
( buf[6] & 0x04 ) != 0x04 ||
( buf[8] & 0x04 ) != 0x04 ||
( buf[9] & 0x01 ) != 0x01 ||
( buf[12] & 0x03 ) != 0x03 )
{
return 0;
}
// mpeg-2 pack header
pos = 14 + ( buf[13] & 0x7 ); // skip over the PACK
}
return (buf[pos+0] == 0x00) && (buf[pos+1] == 0x00) && (buf[pos+2] == 0x01);
}
static int check_ts_sync(const uint8_t *buf)
{
// must have initial sync byte & a legal adaptation ctrl
return (buf[0] == 0x47) && (((buf[3] & 0x30) >> 4) > 0);
}
static int have_ts_sync(const uint8_t *buf, int psize, int count)
{
int ii;
for ( ii = 0; ii < count; ii++ )
{
if ( !check_ts_sync(&buf[ii*psize]) )
return 0;
}
return 1;
}
static int hb_stream_check_for_ts(const uint8_t *buf)
{
// transport streams should have a sync byte every 188 bytes.
// search the first 8KB of buf looking for at least 8 consecutive
// correctly located sync patterns.
int offset = 0;
int count = 16;
for ( offset = 0; offset < 8*1024-count*188; ++offset )
{
if ( have_ts_sync( &buf[offset], 188, count) )
return 188 | (offset << 8);
if ( have_ts_sync( &buf[offset], 192, count) )
return 192 | (offset << 8);
if ( have_ts_sync( &buf[offset], 204, count) )
return 204 | (offset << 8);
if ( have_ts_sync( &buf[offset], 208, count) )
return 208 | (offset << 8);
}
return 0;
}
static int hb_stream_check_for_ps(hb_stream_t *stream)
{
uint8_t buf[2048*4];
uint8_t sc_buf[4];
int pos = 0;
fseek(stream->file_handle, 0, SEEK_SET);
// program streams should start with a PACK then some other mpeg start
// code (usually a SYS but that might be missing if we only have a clip).
while (pos < 512 * 1024)
{
int offset;
if ( fread(buf, 1, sizeof(buf), stream->file_handle) != sizeof(buf) )
return 0;
for ( offset = 0; offset < 8*1024-27; ++offset )
{
if ( check_ps_sync( &buf[offset] ) && check_ps_sc( &buf[offset] ) )
{
int pes_offset, prev, data_len;
uint8_t sid;
uint8_t *b = buf+offset;
// Skip the pack header
int mark = buf[4] >> 4;
if ( mark == 0x02 )
{
// mpeg-1 pack header
pes_offset = 12;
}
else
{
// mpeg-2 pack header
pes_offset = 14 + ( buf[13] & 0x7 );
}
b += pes_offset;
// Get the next stream id
sid = b[3];
data_len = (b[4] << 8) + b[5];
if ( data_len && sid > 0xba && sid < 0xf9 )
{
prev = ftell( stream->file_handle );
pos = prev - ( sizeof(buf) - offset );
pos += pes_offset + 6 + data_len;
fseek( stream->file_handle, pos, SEEK_SET );
if ( fread(sc_buf, 1, 4, stream->file_handle) != 4 )
return 0;
if (sc_buf[0] == 0x00 && sc_buf[1] == 0x00 &&
sc_buf[2] == 0x01)
{
return 1;
}
fseek( stream->file_handle, prev, SEEK_SET );
}
}
}
fseek( stream->file_handle, -27, SEEK_CUR );
pos = ftell( stream->file_handle );
}
return 0;
}
static int hb_stream_get_type(hb_stream_t *stream)
{
uint8_t buf[2048*4];
if ( fread(buf, 1, sizeof(buf), stream->file_handle) == sizeof(buf) )
{
#ifdef USE_HWD
if ( hb_gui_use_hwd_flag == 1 )
return 0;
#endif
int psize;
if ( ( psize = hb_stream_check_for_ts(buf) ) != 0 )
{
int offset = psize >> 8;
psize &= 0xff;
hb_log("file is MPEG Transport Stream with %d byte packets"
" offset %d bytes", psize, offset);
stream->packetsize = psize;
stream->hb_stream_type = transport;
if (hb_ts_stream_init(stream) == 0)
return 1;
}
else if ( hb_stream_check_for_ps(stream) != 0 )
{
hb_log("file is MPEG Program Stream");
stream->hb_stream_type = program;
hb_ps_stream_init(stream);
// We default to mpeg codec for ps streams if no
// video found in program stream map
return 1;
}
}
return 0;
}
static void hb_stream_delete_dynamic( hb_stream_t *d )
{
if( d->file_handle )
{
fclose( d->file_handle );
d->file_handle = NULL;
}
int i=0;
if ( d->ts.packet )
{
free( d->ts.packet );
d->ts.packet = NULL;
}
if ( d->ts.list )
{
for (i = 0; i < d->ts.count; i++)
{
if (d->ts.list[i].buf)
{
hb_buffer_close(&(d->ts.list[i].buf));
hb_buffer_close(&(d->ts.list[i].extra_buf));
d->ts.list[i].buf = NULL;
d->ts.list[i].extra_buf = NULL;
}
}
}
}
static void hb_stream_delete( hb_stream_t *d )
{
hb_stream_delete_dynamic( d );
free( d->ts.list );
free( d->pes.list );
free( d->path );
free( d );
}
static int audio_inactive( hb_stream_t *stream, int id, int stream_id_ext )
{
if ( id < 0 )
{
// PID declared inactive by hb_stream_title_scan
return 1;
}
if ( id == stream->pmt_info.PCR_PID )
{
// PCR PID is always active
return 0;
}
int i;
for ( i = 0; i < hb_list_count( stream->title->list_audio ); ++i )
{
hb_audio_t *audio = hb_list_item( stream->title->list_audio, i );
if ( audio->id == ((stream_id_ext << 16) | id) )
{
return 0;
}
}
return 1;
}
/* when the file was first opened we made entries for all the audio elementary
* streams we found in it. Streams that were later found during the preview scan
* now have an audio codec, type, rate, etc., associated with them. At the end
* of the scan we delete all the audio entries that weren't found by the scan
* or don't have a format we support. This routine deletes audio entry 'indx'
* by setting its PID to an invalid value so no packet will match it. (We can't
* move any of the entries since the index of the entry is used as the id
* of the media stream for HB. */
static void hb_stream_delete_ts_entry(hb_stream_t *stream, int indx)
{
if ( stream->ts.list[indx].pid > 0 )
{
stream->ts.list[indx].pid = -stream->ts.list[indx].pid;
}
}
static int hb_stream_try_delete_ts_entry(hb_stream_t *stream, int indx)
{
int ii;
if ( stream->ts.list[indx].pid < 0 )
return 1;
for ( ii = stream->ts.list[indx].pes_list; ii != -1;
ii = stream->pes.list[ii].next )
{
if ( stream->pes.list[ii].stream_id >= 0 )
return 0;
}
stream->ts.list[indx].pid = -stream->ts.list[indx].pid;
return 1;
}
static void hb_stream_delete_ps_entry(hb_stream_t *stream, int indx)
{
if ( stream->pes.list[indx].stream_id > 0 )
{
stream->pes.list[indx].stream_id = -stream->pes.list[indx].stream_id;
}
}
static void prune_streams(hb_stream_t *d)
{
if ( d->hb_stream_type == transport )
{
int ii, jj;
for ( ii = 0; ii < d->ts.count; ii++)
{
// If probing didn't find audio or video, and the pid
// is not the PCR, remove the track
if ( ts_stream_kind ( d, ii ) == U &&
!d->ts.list[ii].is_pcr )
{
hb_stream_delete_ts_entry(d, ii);
continue;
}
if ( ts_stream_kind ( d, ii ) == A )
{
for ( jj = d->ts.list[ii].pes_list; jj != -1;
jj = d->pes.list[jj].next )
{
if ( audio_inactive( d, d->pes.list[jj].stream_id,
d->pes.list[jj].stream_id_ext ) )
{
hb_stream_delete_ps_entry(d, jj);
}
}
if ( !d->ts.list[ii].is_pcr &&
hb_stream_try_delete_ts_entry(d, ii) )
{
continue;
}
}
}
// reset to beginning of file and reset some stream
// state information
hb_stream_seek( d, 0. );
}
else if ( d->hb_stream_type == program )
{
int ii;
for ( ii = 0; ii < d->pes.count; ii++)
{
// If probing didn't find audio or video, remove the track
if ( d->pes.list[ii].stream_kind == U )
{
hb_stream_delete_ps_entry(d, ii);
}
if ( d->pes.list[ii].stream_kind == A &&
audio_inactive( d, d->pes.list[ii].stream_id,
d->pes.list[ii].stream_id_ext ) )
{
// this PID isn't wanted (we don't have a codec for it
// or scan didn't find audio parameters)
hb_stream_delete_ps_entry(d, ii);
continue;
}
}
// reset to beginning of file and reset some stream
// state information
hb_stream_seek( d, 0. );
}
}
/***********************************************************************
* hb_stream_open
***********************************************************************
*
**********************************************************************/
hb_stream_t * hb_stream_open( char *path, hb_title_t *title, int scan )
{
FILE *f = hb_fopen(path, "rb");
if ( f == NULL )
{
hb_log( "hb_stream_open: open %s failed", path );
return NULL;
}
hb_stream_t *d = calloc( sizeof( hb_stream_t ), 1 );
if ( d == NULL )
{
fclose( f );
hb_log( "hb_stream_open: can't allocate space for %s stream state", path );
return NULL;
}
if( title && !( title->flags & HBTF_NO_IDR ) )
{
d->has_IDRs = 1;
}
/*
* If it's something we can deal with (MPEG2 PS or TS) return a stream
* reference structure & null otherwise.
*/
d->file_handle = f;
d->title = title;
d->scan = scan;
d->path = strdup( path );
if (d->path != NULL )
{
if ( hb_stream_get_type( d ) != 0 )
{
if( !scan )
{
prune_streams( d );
}
// reset to beginning of file and reset some stream
// state information
hb_stream_seek( d, 0. );
return d;
}
fclose( d->file_handle );
d->file_handle = NULL;
if ( ffmpeg_open( d, title, scan ) )
{
return d;
}
}
if ( d->file_handle )
{
fclose( d->file_handle );
}
if (d->path)
{
free( d->path );
}
hb_log( "hb_stream_open: open %s failed", path );
free( d );
return NULL;
}
static int new_pid( hb_stream_t * stream )
{
int num = stream->ts.alloc;
if ( stream->ts.count == stream->ts.alloc )
{
num = stream->ts.alloc ? stream->ts.alloc * 2 : 32;
stream->ts.list = realloc( stream->ts.list,
sizeof( hb_ts_stream_t ) * num );
}
int ii;
for ( ii = stream->ts.alloc; ii < num; ii++ )
{
memset(&stream->ts.list[ii], 0, sizeof( hb_ts_stream_t ));
stream->ts.list[ii].continuity = -1;
stream->ts.list[ii].pid = -1;
stream->ts.list[ii].pes_list = -1;
}
stream->ts.alloc = num;
num = stream->ts.count;
stream->ts.count++;
return num;
}
static int new_pes( hb_stream_t * stream )
{
int num = stream->pes.alloc;
if ( stream->pes.count == stream->pes.alloc )
{
num = stream->pes.alloc ? stream->pes.alloc * 2 : 32;
stream->pes.list = realloc( stream->pes.list,
sizeof( hb_pes_stream_t ) * num );
}
int ii;
for ( ii = stream->pes.alloc; ii < num; ii++ )
{
memset(&stream->pes.list[ii], 0, sizeof( hb_pes_stream_t ));
stream->pes.list[ii].stream_id = -1;
stream->pes.list[ii].next = -1;
}
stream->pes.alloc = num;
num = stream->pes.count;
stream->pes.count++;
return num;
}
hb_stream_t * hb_bd_stream_open( hb_title_t *title )
{
int ii;
hb_stream_t *d = calloc( sizeof( hb_stream_t ), 1 );
if ( d == NULL )
{
hb_error( "hb_bd_stream_open: can't allocate space for stream state" );
return NULL;
}
d->file_handle = NULL;
d->title = title;
d->path = NULL;
d->ts.packet = NULL;
int pid = title->video_id;
int stream_type = title->video_stream_type;
update_ts_streams( d, pid, 0, stream_type, V, NULL );
hb_audio_t * audio;
for ( ii = 0; ( audio = hb_list_item( title->list_audio, ii ) ); ++ii )
{
int stream_id_ext = audio->config.in.substream_type;
pid = audio->id & 0xFFFF;
stream_type = audio->config.in.stream_type;
update_ts_streams( d, pid, stream_id_ext, stream_type, A, NULL );
}
hb_subtitle_t * subtitle;
for ( ii = 0; ( subtitle = hb_list_item( title->list_subtitle, ii ) ); ++ii )
{
// If the subtitle track is CC embedded in the video stream, then
// it does not have an independent pid. In this case, we assigned
// the subtitle->id to 0.
if (subtitle->id != 0)
{
pid = subtitle->id & 0xFFFF;
stream_type = subtitle->stream_type;
update_ts_streams( d, pid, 0, stream_type, S, NULL );
}
}
// We don't need to wait for a PCR when scanning. In fact, it
// trips us up on the first preview of every title since we would
// have to read quite a lot of data before finding the PCR.
if ( title->flags & HBTF_SCAN_COMPLETE )
{
/* BD has PCRs, but the BD index always points to a packet
* after a PCR packet, so we will not see the initial PCR
* after any seek. So don't set the flag that causes us
* to drop packets till we see a PCR. */
//d->ts_flags = TS_HAS_RAP | TS_HAS_PCR;
// BD PCR PID is specified to always be 0x1001
update_ts_streams( d, 0x1001, 0, -1, P, NULL );
}
d->packetsize = 192;
d->hb_stream_type = transport;
for ( ii = 0; ii < d->ts.count; ii++ )
{
d->ts.list[ii].buf = hb_buffer_init(d->packetsize);
d->ts.list[ii].extra_buf = hb_buffer_init(d->packetsize);
d->ts.list[ii].buf->size = 0;
d->ts.list[ii].extra_buf->size = 0;
}
return d;
}
/***********************************************************************
* hb_stream_close
***********************************************************************
* Closes and frees everything
**********************************************************************/
void hb_stream_close( hb_stream_t ** _d )
{
hb_stream_t *stream = * _d;
if ( stream->hb_stream_type == ffmpeg )
{
ffmpeg_close( stream );
hb_stream_delete( stream );
*_d = NULL;
return;
}
if ( stream->frames )
{
hb_log( "stream: %d good frames, %d errors (%.0f%%)", stream->frames,
stream->errors, (double)stream->errors * 100. /
(double)stream->frames );
}
hb_stream_delete( stream );
*_d = NULL;
}
/***********************************************************************
* hb_ps_stream_title_scan
***********************************************************************
*
**********************************************************************/
hb_title_t * hb_stream_title_scan(hb_stream_t *stream, hb_title_t * title)
{
if ( stream->hb_stream_type == ffmpeg )
return ffmpeg_title_scan( stream, title );
// 'Barebones Title'
title->type = HB_STREAM_TYPE;
title->index = 1;
// Copy part of the stream path to the title name
char *sep = hb_strr_dir_sep(stream->path);
if (sep)
strcpy(title->name, sep+1);
char *dot_term = strrchr(title->name, '.');
if (dot_term)
*dot_term = '\0';
// Figure out how many audio streams we really have:
// - For transport streams, for each PID listed in the PMT (whether
// or not it was an audio stream type) read the bitstream until we
// find an packet from that PID containing a PES header and see if
// the elementary stream is an audio type.
// - For program streams read the first 4MB and take every unique
// audio stream we find.
hb_init_audio_list(stream, title);
hb_init_subtitle_list(stream, title);
// set the video id, codec & muxer
int idx = pes_index_of_video( stream );
if ( idx < 0 )
{
hb_title_close( &title );
return NULL;
}
title->video_id = get_id( &stream->pes.list[idx] );
title->video_codec = stream->pes.list[idx].codec;
title->video_codec_param = stream->pes.list[idx].codec_param;
if (stream->hb_stream_type == transport)
{
title->demuxer = HB_TS_DEMUXER;
// make sure we're grabbing the PCR PID
update_ts_streams( stream, stream->pmt_info.PCR_PID, 0, -1, P, NULL );
}
else
{
title->demuxer = HB_PS_DEMUXER;
}
// IDRs will be search for in hb_stream_duration
stream->has_IDRs = 0;
hb_stream_duration(stream, title);
// One Chapter
hb_chapter_t * chapter;
chapter = calloc( sizeof( hb_chapter_t ), 1 );
hb_chapter_set_title( chapter, "Chapter 1" );
chapter->index = 1;
chapter->duration = title->duration;
chapter->hours = title->hours;
chapter->minutes = title->minutes;
chapter->seconds = title->seconds;
hb_list_add( title->list_chapter, chapter );
if ( stream->has_IDRs < 1 )
{
hb_log( "stream doesn't seem to have video IDR frames" );
title->flags |= HBTF_NO_IDR;
}
if ( stream->hb_stream_type == transport &&
( stream->ts_flags & TS_HAS_PCR ) == 0 )
{
hb_log( "transport stream missing PCRs - using video DTS instead" );
}
#ifdef USE_HWD
hb_va_dxva2_t * dxva2 = NULL;
dxva2 = hb_va_create_dxva2( dxva2, title->video_codec_param );
if ( dxva2 )
{
title->hwd_support = 1;
hb_va_close(dxva2);
dxva2 = NULL;
}
else
title->hwd_support = 0;
#else
title->hwd_support = 0;
#endif
// Height, width, rate and aspect ratio information is filled in
// when the previews are built
return title;
}
/*
* read the next transport stream packet from 'stream'. Return NULL if
* we hit eof & a pointer to the sync byte otherwise.
*/
static const uint8_t *next_packet( hb_stream_t *stream )
{
uint8_t *buf = stream->ts.packet + stream->packetsize - 188;
while ( 1 )
{
if ( fread(stream->ts.packet, 1, stream->packetsize, stream->file_handle) !=
stream->packetsize )
{
return NULL;
}
if (buf[0] == 0x47)
{
return buf;
}
// lost sync - back up to where we started then try to re-establish.
off_t pos = ftello(stream->file_handle) - stream->packetsize;
off_t pos2 = align_to_next_packet(stream);
if ( pos2 == 0 )
{
hb_log( "next_packet: eof while re-establishing sync @ %"PRId64, pos );
return NULL;
}
ts_warn( stream, "next_packet: sync lost @ %"PRId64", regained after %"PRId64" bytes",
pos, pos2 );
}
}
/*
* skip to the start of the next PACK header in program stream src_stream.
*/
static void skip_to_next_pack( hb_stream_t *src_stream )
{
// scan forward until we find the start of the next pack
uint32_t strt_code = -1;
int c;
flockfile( src_stream->file_handle );
while ( ( c = getc_unlocked( src_stream->file_handle ) ) != EOF )
{
strt_code = ( strt_code << 8 ) | c;
if ( strt_code == 0x000001ba )
// we found the start of the next pack
break;
}
funlockfile( src_stream->file_handle );
// if we didn't terminate on an eof back up so the next read
// starts on the pack boundary.
if ( c != EOF )
{
fseeko( src_stream->file_handle, -4, SEEK_CUR );
}
}
static void CreateDecodedNAL( uint8_t **dst, int *dst_len,
const uint8_t *src, int src_len )
{
const uint8_t *end = &src[src_len];
uint8_t *d = malloc( src_len );
*dst = d;
if( d )
{
while( src < end )
{
if( src < end - 3 && src[0] == 0x00 && src[1] == 0x00 &&
src[2] == 0x01 )
{
// Next start code found
break;
}
if( src < end - 3 && src[0] == 0x00 && src[1] == 0x00 &&
src[2] == 0x03 )
{
*d++ = 0x00;
*d++ = 0x00;
src += 3;
continue;
}
*d++ = *src++;
}
}
*dst_len = d - *dst;
}
static int isRecoveryPoint( const uint8_t *buf, int len )
{
uint8_t *nal;
int nal_len;
int ii, type, size;
int recovery_frames = 0;
CreateDecodedNAL( &nal, &nal_len, buf, len );
for ( ii = 0; ii+1 < nal_len; )
{
type = 0;
while ( ii+1 < nal_len )
{
type += nal[ii++];
if ( nal[ii-1] != 0xff )
break;
}
size = 0;
while ( ii+1 < nal_len )
{
size += nal[ii++];
if ( nal[ii-1] != 0xff )
break;
}
if( type == 6 )
{
recovery_frames = 1;
break;
}
ii += size;
}
free( nal );
return recovery_frames;
}
static int isIframe( hb_stream_t *stream, const uint8_t *buf, int len )
{
// For mpeg2: look for a gop start or i-frame picture start
// for h.264: look for idr nal type or a slice header for an i-frame
// for vc1: look for a Sequence header
int ii;
uint32_t strid = 0;
int vid = pes_index_of_video( stream );
hb_pes_stream_t *pes = &stream->pes.list[vid];
if ( pes->stream_type <= 2 ||
pes->codec_param == AV_CODEC_ID_MPEG1VIDEO ||
pes->codec_param == AV_CODEC_ID_MPEG2VIDEO )
{
// This section of the code handles MPEG-1 and MPEG-2 video streams
for (ii = 0; ii < len; ii++)
{
strid = (strid << 8) | buf[ii];
if ( ( strid >> 8 ) == 1 )
{
// we found a start code
uint8_t id = strid;
switch ( id )
{
case 0xB8: // group_start_code (GOP header)
case 0xB3: // sequence_header code
return 1;
case 0x00: // picture_start_code
// picture_header, let's see if it's an I-frame
if (ii < len - 3)
{
// check if picture_coding_type == 1
if ((buf[ii+2] & (0x7 << 3)) == (1 << 3))
{
// found an I-frame picture
return 1;
}
}
break;
}
}
}
// didn't find an I-frame
return 0;
}
if ( pes->stream_type == 0x1b || pes->codec_param == AV_CODEC_ID_H264 )
{
// we have an h.264 stream
for (ii = 0; ii < len; ii++)
{
strid = (strid << 8) | buf[ii];
if ( ( strid >> 8 ) == 1 )
{
// we found a start code - remove the ref_idc from the nal type
uint8_t nal_type = strid & 0x1f;
if ( nal_type == 0x01 )
{
// Found slice and no recovery point
return 0;
}
if ( nal_type == 0x05 )
{
// h.264 IDR picture start
return 1;
}
else if ( nal_type == 0x06 )
{
int off = ii + 1;
int recovery_frames = isRecoveryPoint( buf+off, len-off );
if ( recovery_frames )
{
return recovery_frames;
}
}
}
}
// didn't find an I-frame
return 0;
}
if ( pes->stream_type == 0xea || pes->codec_param == AV_CODEC_ID_VC1 )
{
// we have an vc1 stream
for (ii = 0; ii < len; ii++)
{
strid = (strid << 8) | buf[ii];
if ( strid == 0x10f )
{
// the ffmpeg vc1 decoder requires a seq hdr code in the first
// frame.
return 1;
}
}
// didn't find an I-frame
return 0;
}
if ( pes->stream_type == 0x10 || pes->codec_param == AV_CODEC_ID_MPEG4 )
{
// we have an mpeg4 stream
for (ii = 0; ii < len-1; ii++)
{
strid = (strid << 8) | buf[ii];
if ( strid == 0x1b6 )
{
if ((buf[ii+1] & 0xC0) == 0)
return 1;
}
}
// didn't find an I-frame
return 0;
}
// we don't understand the stream type so just say "yes" otherwise
// we'll discard all the video.
return 1;
}
static int ts_isIframe( hb_stream_t *stream, const uint8_t *buf, int adapt_len )
{
return isIframe( stream, buf + 13 + adapt_len, 188 - ( 13 + adapt_len ) );
}
/*
* scan the next MB of 'stream' to find the next start packet for
* the Packetized Elementary Stream associated with TS PID 'pid'.
*/
static const uint8_t *hb_ts_stream_getPEStype(hb_stream_t *stream, uint32_t pid, int *out_adapt_len)
{
int npack = 300000; // max packets to read
while (--npack >= 0)
{
const uint8_t *buf = next_packet( stream );
if ( buf == NULL )
{
hb_log("hb_ts_stream_getPEStype: EOF while searching for PID 0x%x", pid);
return 0;
}
// while we're reading the stream, check if it has valid PCRs
// and/or random access points.
uint32_t pack_pid = ( (buf[1] & 0x1f) << 8 ) | buf[2];
if ( pack_pid == stream->pmt_info.PCR_PID )
{
if ( ( buf[5] & 0x10 ) &&
( ( ( buf[3] & 0x30 ) == 0x20 ) ||
( ( buf[3] & 0x30 ) == 0x30 && buf[4] > 6 ) ) )
{
stream->ts_flags |= TS_HAS_PCR;
}
}
if ( buf[5] & 0x40 )
{
stream->ts_flags |= TS_HAS_RAP;
}
/*
* The PES header is only in TS packets with 'start' set so we check
* that first then check for the right PID.
*/
if ((buf[1] & 0x40) == 0 || pack_pid != pid )
{
// not a start packet or not the pid we want
continue;
}
int adapt_len = 0;
/* skip over the TS hdr to return a pointer to the PES hdr */
switch (buf[3] & 0x30)
{
case 0x00: // illegal
case 0x20: // fill packet
continue;
case 0x30: // adaptation
adapt_len = buf[4] + 1;
if (adapt_len > 184)
{
hb_log("hb_ts_stream_getPEStype: invalid adaptation field length %d for PID 0x%x", buf[4], pid);
continue;
}
break;
}
/* PES hdr has to begin with an mpeg start code */
if (buf[adapt_len+4] == 0x00 && buf[adapt_len+5] == 0x00 && buf[adapt_len+6] == 0x01)
{
*out_adapt_len = adapt_len;
return buf;
}
}
/* didn't find it */
return 0;
}
static hb_buffer_t * hb_ps_stream_getVideo(
hb_stream_t *stream,
hb_pes_info_t *pi)
{
hb_buffer_t *buf = hb_buffer_init(HB_DVD_READ_BUFFER_SIZE);
hb_pes_info_t pes_info;
// how many blocks we read while searching for a video PES header
int blksleft = 2048;
while (--blksleft >= 0)
{
buf->size = 0;
int len = hb_ps_read_packet( stream, buf );
if ( len == 0 )
{
// EOF
break;
}
if ( !hb_parse_ps( stream, buf->data, buf->size, &pes_info ) )
continue;
int idx;
if ( pes_info.stream_id == 0xbd )
{
idx = index_of_ps_stream( stream, pes_info.stream_id,
pes_info.bd_substream_id );
}
else
{
idx = index_of_ps_stream( stream, pes_info.stream_id,
pes_info.stream_id_ext );
}
if ( stream->pes.list[idx].stream_kind == V )
{
if ( pes_info.pts != AV_NOPTS_VALUE )
{
*pi = pes_info;
return buf;
}
}
}
hb_buffer_close( &buf );
return NULL;
}
/***********************************************************************
* hb_stream_duration
***********************************************************************
*
* Finding stream duration is difficult. One issue is that the video file
* may have chunks from several different program fragments (main feature,
* commercials, station id, trailers, etc.) all with their own base pts
* value. We can't find the piece boundaries without reading the entire
* file but if we compute a rate based on time stamps from two different
* pieces the result will be meaningless. The second issue is that the
* data rate of compressed video normally varies by 5-10x over the length
* of the video. This says that we want to compute the rate over relatively
* long segments to get a representative average but long segments increase
* the likelihood that we'll cross a piece boundary.
*
* What we do is take time stamp samples at several places in the file
* (currently 16) then compute the average rate (i.e., ticks of video per
* byte of the file) for all pairs of samples (N^2 rates computed for N
* samples). Some of those rates will be absurd because the samples came
* from different segments. Some will be way low or high because the
* samples came from a low or high motion part of the segment. But given
* that we're comparing *all* pairs the majority of the computed rates
* should be near the overall average. So we median filter the computed
* rates to pick the most representative value.
*
**********************************************************************/
struct pts_pos {
uint64_t pos; /* file position of this PTS sample */
uint64_t pts; /* PTS from video stream */
};
#define NDURSAMPLES 128
// get one (position, timestamp) sampple from a transport or program
// stream.
static struct pts_pos hb_sample_pts(hb_stream_t *stream, uint64_t fpos)
{
struct pts_pos pp = { 0, 0 };
if ( stream->hb_stream_type == transport )
{
const uint8_t *buf;
int adapt_len;
fseeko( stream->file_handle, fpos, SEEK_SET );
align_to_next_packet( stream );
int pid = stream->ts.list[ts_index_of_video(stream)].pid;
buf = hb_ts_stream_getPEStype( stream, pid, &adapt_len );
if ( buf == NULL )
{
hb_log("hb_sample_pts: couldn't find video packet near %"PRIu64, fpos);
return pp;
}
const uint8_t *pes = buf + 4 + adapt_len;
if ( ( pes[7] >> 7 ) != 1 )
{
hb_log("hb_sample_pts: no PTS in video packet near %"PRIu64, fpos);
return pp;
}
pp.pts = ((((uint64_t)pes[ 9] >> 1 ) & 7) << 30) |
( (uint64_t)pes[10] << 22) |
( ((uint64_t)pes[11] >> 1 ) << 15) |
( (uint64_t)pes[12] << 7 ) |
( (uint64_t)pes[13] >> 1 );
if ( ts_isIframe( stream, buf, adapt_len ) )
{
if ( stream->has_IDRs < 255 )
{
++stream->has_IDRs;
}
}
pp.pos = ftello(stream->file_handle);
if ( !stream->has_IDRs )
{
// Scan a little more to see if we will stumble upon one
int ii;
for ( ii = 0; ii < 10; ii++ )
{
buf = hb_ts_stream_getPEStype( stream, pid, &adapt_len );
if ( buf == NULL )
break;
if ( ts_isIframe( stream, buf, adapt_len ) )
{
++stream->has_IDRs;
break;
}
}
}
}
else
{
hb_buffer_t *buf;
hb_pes_info_t pes_info;
// round address down to nearest dvd sector start
fpos &=~ ( HB_DVD_READ_BUFFER_SIZE - 1 );
fseeko( stream->file_handle, fpos, SEEK_SET );
if ( stream->hb_stream_type == program )
{
skip_to_next_pack( stream );
}
buf = hb_ps_stream_getVideo( stream, &pes_info );
if ( buf == NULL )
{
hb_log("hb_sample_pts: couldn't find video packet near %"PRIu64, fpos);
return pp;
}
if ( pes_info.pts < 0 )
{
hb_log("hb_sample_pts: no PTS in video packet near %"PRIu64, fpos);
hb_buffer_close( &buf );
return pp;
}
if ( isIframe( stream, buf->data, buf->size ) )
{
if ( stream->has_IDRs < 255 )
{
++stream->has_IDRs;
}
}
hb_buffer_close( &buf );
if ( !stream->has_IDRs )
{
// Scan a little more to see if we will stumble upon one
int ii;
for ( ii = 0; ii < 10; ii++ )
{
buf = hb_ps_stream_getVideo( stream, &pes_info );
if ( buf == NULL )
break;
if ( isIframe( stream, buf->data, buf->size ) )
{
++stream->has_IDRs;
hb_buffer_close( &buf );
break;
}
hb_buffer_close( &buf );
}
}
pp.pts = pes_info.pts;
pp.pos = ftello(stream->file_handle);
}
return pp;
}
static int dur_compare( const void *a, const void *b )
{
const double *aval = a, *bval = b;
return ( *aval < *bval ? -1 : ( *aval == *bval ? 0 : 1 ) );
}
// given an array of (position, time) samples, compute a max-likelihood
// estimate of the average rate by computing the rate between all pairs
// of samples then taking the median of those rates.
static double compute_stream_rate( struct pts_pos *pp, int n )
{
int i, j;
double rates[NDURSAMPLES * NDURSAMPLES / 8];
double *rp = rates;
// the following nested loops compute the rates between all pairs.
*rp = 0;
for ( i = 0; i < n-1; ++i )
{
// Bias the median filter by not including pairs that are "far"
// from one another. This is to handle cases where the file is
// made of roughly equal size pieces where a symmetric choice of
// pairs results in having the same number of intra-piece &
// inter-piece rate estimates. This would mean that the median
// could easily fall in the inter-piece part of the data which
// would give a bogus estimate. The 'ns' index creates an
// asymmetry that favors locality.
int ns = i + ( n >> 3 );
if ( ns > n )
ns = n;
for ( j = i+1; j < ns; ++j )
{
if ( (uint64_t)(pp[j].pts - pp[i].pts) > 90000LL*3600*6 )
break;
if ( pp[j].pts != pp[i].pts && pp[j].pos > pp[i].pos )
{
*rp = ((double)( pp[j].pts - pp[i].pts )) /
((double)( pp[j].pos - pp[i].pos ));
++rp;
}
}
}
// now compute and return the median of all the (n*n/2) rates we computed
// above.
int nrates = rp - rates;
qsort( rates, nrates, sizeof (rates[0] ), dur_compare );
return rates[nrates >> 1];
}
static void hb_stream_duration(hb_stream_t *stream, hb_title_t *inTitle)
{
struct pts_pos ptspos[NDURSAMPLES];
struct pts_pos *pp = ptspos;
int i;
fseeko(stream->file_handle, 0, SEEK_END);
uint64_t fsize = ftello(stream->file_handle);
uint64_t fincr = fsize / NDURSAMPLES;
uint64_t fpos = fincr / 2;
for ( i = NDURSAMPLES; --i >= 0; fpos += fincr )
{
*pp++ = hb_sample_pts(stream, fpos);
}
uint64_t dur = compute_stream_rate( ptspos, pp - ptspos ) * (double)fsize;
inTitle->duration = dur;
dur /= 90000;
inTitle->hours = dur / 3600;
inTitle->minutes = ( dur % 3600 ) / 60;
inTitle->seconds = dur % 60;
rewind(stream->file_handle);
}
/***********************************************************************
* hb_stream_read
***********************************************************************
*
**********************************************************************/
hb_buffer_t * hb_stream_read( hb_stream_t * src_stream )
{
if ( src_stream->hb_stream_type == ffmpeg )
{
return hb_ffmpeg_read( src_stream );
}
if ( src_stream->hb_stream_type == program )
{
return hb_ps_stream_decode( src_stream );
}
return hb_ts_stream_decode( src_stream );
}
int64_t ffmpeg_initial_timestamp( hb_stream_t * stream )
{
AVFormatContext *ic = stream->ffmpeg_ic;
if ( ic->start_time != AV_NOPTS_VALUE && ic->start_time > 0 )
return ic->start_time;
else
return 0;
}
int hb_stream_seek_chapter( hb_stream_t * stream, int chapter_num )
{
if ( stream->hb_stream_type != ffmpeg )
{
// currently meaningliess for transport and program streams
return 1;
}
if ( !stream || !stream->title ||
chapter_num > hb_list_count( stream->title->list_chapter ) )
{
return 0;
}
int64_t sum_dur = 0;
hb_chapter_t *chapter = NULL;
int i;
for ( i = 0; i < chapter_num; ++i)
{
chapter = hb_list_item( stream->title->list_chapter, i );
sum_dur += chapter->duration;
}
stream->chapter = chapter_num - 1;
stream->chapter_end = sum_dur;
int64_t pos = ( ( ( sum_dur - chapter->duration ) * AV_TIME_BASE ) / 90000 ) + ffmpeg_initial_timestamp( stream );
hb_deep_log( 2, "Seeking to chapter %d: starts %"PRId64", ends %"PRId64", AV pos %"PRId64,
chapter_num, sum_dur - chapter->duration, sum_dur, pos);
if ( chapter_num > 1 && pos > 0 )
{
AVStream *st = stream->ffmpeg_ic->streams[stream->ffmpeg_video_id];
// timebase must be adjusted to match timebase of stream we are
// using for seeking.
pos = av_rescale(pos, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
avformat_seek_file( stream->ffmpeg_ic, stream->ffmpeg_video_id, 0, pos, pos, AVSEEK_FLAG_BACKWARD);
}
return 1;
}
/***********************************************************************
* hb_stream_chapter
***********************************************************************
* Return the number of the chapter that we are currently in. We store
* the chapter number starting from 0, so + 1 for the real chpater num.
**********************************************************************/
int hb_stream_chapter( hb_stream_t * src_stream )
{
return( src_stream->chapter + 1 );
}
/***********************************************************************
* hb_stream_seek
***********************************************************************
*
**********************************************************************/
int hb_stream_seek( hb_stream_t * stream, float f )
{
if ( stream->hb_stream_type == ffmpeg )
{
return ffmpeg_seek( stream, f );
}
off_t stream_size, cur_pos, new_pos;
double pos_ratio = f;
cur_pos = ftello( stream->file_handle );
fseeko( stream->file_handle, 0, SEEK_END );
stream_size = ftello( stream->file_handle );
new_pos = (off_t) ((double) (stream_size) * pos_ratio);
new_pos &=~ (HB_DVD_READ_BUFFER_SIZE - 1);
int r = fseeko( stream->file_handle, new_pos, SEEK_SET );
if (r == -1)
{
fseeko( stream->file_handle, cur_pos, SEEK_SET );
return 0;
}
if ( stream->hb_stream_type == transport )
{
// We need to drop the current decoder output and move
// forwards to the next transport stream packet.
hb_ts_stream_reset(stream);
align_to_next_packet(stream);
if ( !stream->has_IDRs )
{
// the stream has no IDRs so don't look for one.
stream->need_keyframe = 0;
}
}
else if ( stream->hb_stream_type == program )
{
hb_ps_stream_reset(stream);
skip_to_next_pack( stream );
if ( !stream->has_IDRs )
{
// the stream has no IDRs so don't look for one.
stream->need_keyframe = 0;
}
}
return 1;
}
int hb_stream_seek_ts( hb_stream_t * stream, int64_t ts )
{
if ( stream->hb_stream_type == ffmpeg )
{
return ffmpeg_seek_ts( stream, ts );
}
return -1;
}
static char* strncpyupper( char *dst, const char *src, int len )
{
int ii;
for ( ii = 0; ii < len-1 && src[ii]; ii++ )
{
dst[ii] = islower(src[ii]) ? toupper(src[ii]) : src[ii];
}
dst[ii] = '\0';
return dst;
}
static const char *stream_type_name2(hb_stream_t *stream, hb_pes_stream_t *pes)
{
static char codec_name_caps[80];
if ( stream->reg_desc == STR4_TO_UINT32("HDMV") )
{
// Names for streams we know about.
switch ( pes->stream_type )
{
case 0x80:
return "BD LPCM";
case 0x83:
return "TrueHD";
case 0x84:
return "E-AC3";
case 0x85:
return "DTS-HD HRA";
case 0x86:
return "DTS-HD MA";
default:
break;
}
}
if ( st2codec[pes->stream_type].name )
{
return st2codec[pes->stream_type].name;
}
if ( pes->codec_name[0] != 0 )
{
return pes->codec_name;
}
if ( pes->codec & HB_ACODEC_FF_MASK )
{
AVCodec * codec = avcodec_find_decoder( pes->codec_param );
if ( codec && codec->name && codec->name[0] )
{
strncpyupper( codec_name_caps, codec->name, 80 );
return codec_name_caps;
}
}
return "Unknown";
}
static void set_audio_description(hb_audio_t *audio, iso639_lang_t *lang)
{
snprintf( audio->config.lang.simple,
sizeof( audio->config.lang.simple ), "%s",
strlen( lang->native_name ) ? lang->native_name : lang->eng_name );
snprintf( audio->config.lang.iso639_2,
sizeof( audio->config.lang.iso639_2 ), "%s", lang->iso639_2 );
audio->config.lang.type = 0;
}
// Sort specifies the index in the audio list where you would
// like sorted items to begin.
static void pes_add_subtitle_to_title(
hb_stream_t *stream,
int idx,
hb_title_t *title,
int sort)
{
hb_pes_stream_t *pes = &stream->pes.list[idx];
// Sort by id when adding to the list
// This assures that they are always displayed in the same order
int id = get_id( pes );
int i;
hb_subtitle_t *tmp = NULL;
int count = hb_list_count( title->list_subtitle );
// Don't add the same audio twice. Search for audio.
for ( i = 0; i < count; i++ )
{
tmp = hb_list_item( title->list_subtitle, i );
if ( id == tmp->id )
return;
}
hb_subtitle_t *subtitle = calloc( sizeof( hb_subtitle_t ), 1 );
iso639_lang_t * lang;
subtitle->track = idx;
subtitle->id = id;
lang = lang_for_code( pes->lang_code );
snprintf( subtitle->lang, sizeof( subtitle->lang ), "%s",
strlen(lang->native_name) ? lang->native_name : lang->eng_name);
snprintf( subtitle->iso639_2, sizeof( subtitle->iso639_2 ), "%s",
lang->iso639_2);
switch ( pes->codec )
{
case WORK_DECPGSSUB:
subtitle->source = PGSSUB;
subtitle->format = PICTURESUB;
subtitle->config.dest = RENDERSUB;
break;
case WORK_DECVOBSUB:
subtitle->source = VOBSUB;
subtitle->format = PICTURESUB;
subtitle->config.dest = RENDERSUB;
break;
default:
// Unrecognized, don't add to list
hb_log("unregonized subtitle!");
free( subtitle );
return;
}
subtitle->reg_desc = stream->reg_desc;
subtitle->stream_type = pes->stream_type;
subtitle->substream_type = pes->stream_id_ext;
subtitle->codec = pes->codec;
// Create a default palette since vob files do not include the
// vobsub palette.
if ( subtitle->source == VOBSUB )
{
subtitle->palette[0] = 0x108080;
subtitle->palette[1] = 0x108080;
subtitle->palette[2] = 0x108080;
subtitle->palette[3] = 0xbff000;
subtitle->palette[4] = 0xbff000;
subtitle->palette[5] = 0x108080;
subtitle->palette[6] = 0x108080;
subtitle->palette[7] = 0x108080;
subtitle->palette[8] = 0xbff000;
subtitle->palette[9] = 0x108080;
subtitle->palette[10] = 0x108080;
subtitle->palette[11] = 0x108080;
subtitle->palette[12] = 0x108080;
subtitle->palette[13] = 0xbff000;
subtitle->palette[14] = 0x108080;
subtitle->palette[15] = 0x108080;
}
hb_log("stream id 0x%x (type 0x%x substream 0x%x) subtitle 0x%x",
pes->stream_id, pes->stream_type, pes->stream_id_ext, subtitle->id);
// Search for the sort position
if ( sort >= 0 )
{
sort = sort < count ? sort : count;
for ( i = sort; i < count; i++ )
{
tmp = hb_list_item( title->list_subtitle, i );
int sid = tmp->id & 0xffff;
int ssid = tmp->id >> 16;
if ( pes->stream_id < sid )
break;
else if ( pes->stream_id <= sid &&
pes->stream_id_ext <= ssid )
{
break;
}
}
hb_list_insert( title->list_subtitle, i, subtitle );
}
else
{
hb_list_add( title->list_subtitle, subtitle );
}
}
// Sort specifies the index in the audio list where you would
// like sorted items to begin.
static void pes_add_audio_to_title(
hb_stream_t *stream,
int idx,
hb_title_t *title,
int sort)
{
hb_pes_stream_t *pes = &stream->pes.list[idx];
// Sort by id when adding to the list
// This assures that they are always displayed in the same order
int id = get_id( pes );
int i;
hb_audio_t *tmp = NULL;
int count = hb_list_count( title->list_audio );
// Don't add the same audio twice. Search for audio.
for ( i = 0; i < count; i++ )
{
tmp = hb_list_item( title->list_audio, i );
if ( id == tmp->id )
return;
}
hb_audio_t *audio = calloc( sizeof( hb_audio_t ), 1 );
audio->id = id;
audio->config.in.reg_desc = stream->reg_desc;
audio->config.in.stream_type = pes->stream_type;
audio->config.in.substream_type = pes->stream_id_ext;
audio->config.in.codec = pes->codec;
audio->config.in.codec_param = pes->codec_param;
set_audio_description(audio, lang_for_code(pes->lang_code));
hb_log("stream id 0x%x (type 0x%x substream 0x%x) audio 0x%x",
pes->stream_id, pes->stream_type, pes->stream_id_ext, audio->id);
audio->config.in.track = idx;
// Search for the sort position
if ( sort >= 0 )
{
sort = sort < count ? sort : count;
for ( i = sort; i < count; i++ )
{
tmp = hb_list_item( title->list_audio, i );
int sid = tmp->id & 0xffff;
int ssid = tmp->id >> 16;
if ( pes->stream_id < sid )
break;
else if ( pes->stream_id <= sid &&
pes->stream_id_ext <= ssid )
{
break;
}
}
hb_list_insert( title->list_audio, i, audio );
}
else
{
hb_list_add( title->list_audio, audio );
}
}
static void hb_init_subtitle_list(hb_stream_t *stream, hb_title_t *title)
{
int ii;
int map_idx;
int largest = -1;
// First add all that were found in a map.
for ( map_idx = 0; 1; map_idx++ )
{
for ( ii = 0; ii < stream->pes.count; ii++ )
{
if ( stream->pes.list[ii].stream_kind == S )
{
if ( stream->pes.list[ii].map_idx == map_idx )
{
pes_add_subtitle_to_title( stream, ii, title, -1 );
}
if ( stream->pes.list[ii].map_idx > largest )
largest = stream->pes.list[ii].map_idx;
}
}
if ( map_idx > largest )
break;
}
int count = hb_list_count( title->list_audio );
// Now add the reset. Sort them by stream id.
for ( ii = 0; ii < stream->pes.count; ii++ )
{
if ( stream->pes.list[ii].stream_kind == S )
{
pes_add_subtitle_to_title( stream, ii, title, count );
}
}
}
static void hb_init_audio_list(hb_stream_t *stream, hb_title_t *title)
{
int ii;
int map_idx;
int largest = -1;
// First add all that were found in a map.
for ( map_idx = 0; 1; map_idx++ )
{
for ( ii = 0; ii < stream->pes.count; ii++ )
{
if ( stream->pes.list[ii].stream_kind == A )
{
if ( stream->pes.list[ii].map_idx == map_idx )
{
pes_add_audio_to_title( stream, ii, title, -1 );
}
if ( stream->pes.list[ii].map_idx > largest )
largest = stream->pes.list[ii].map_idx;
}
}
if ( map_idx > largest )
break;
}
int count = hb_list_count( title->list_audio );
// Now add the reset. Sort them by stream id.
for ( ii = 0; ii < stream->pes.count; ii++ )
{
if ( stream->pes.list[ii].stream_kind == A )
{
pes_add_audio_to_title( stream, ii, title, count );
}
}
}
/***********************************************************************
* hb_ts_stream_init
***********************************************************************
*
**********************************************************************/
static int hb_ts_stream_init(hb_stream_t *stream)
{
int i;
if ( stream->ts.list )
{
for (i=0; i < stream->ts.alloc; i++)
{
stream->ts.list[i].continuity = -1;
stream->ts.list[i].pid = -1;
stream->ts.list[i].pes_list = -1;
}
}
stream->ts.count = 0;
if ( stream->pes.list )
{
for (i=0; i < stream->pes.alloc; i++)
{
stream->pes.list[i].stream_id = -1;
stream->pes.list[i].next = -1;
}
}
stream->pes.count = 0;
stream->ts.packet = malloc( stream->packetsize );
// Find the audio and video pids in the stream
if (hb_ts_stream_find_pids(stream) < 0)
{
return -1;
}
// hb_ts_resolve_pid_types reads some data, so the TS buffers
// are needed here.
for (i = 0; i < stream->ts.count; i++)
{
// demuxing buffer for TS to PS conversion
stream->ts.list[i].buf = hb_buffer_init(stream->packetsize);
stream->ts.list[i].extra_buf = hb_buffer_init(stream->packetsize);
stream->ts.list[i].buf->size = 0;
stream->ts.list[i].extra_buf->size = 0;
}
hb_ts_resolve_pid_types(stream);
if( stream->scan )
{
hb_log("Found the following PIDS");
hb_log(" Video PIDS : ");
for (i=0; i < stream->ts.count; i++)
{
if ( ts_stream_kind( stream, i ) == V )
{
hb_log( " 0x%x type %s (0x%x)%s",
stream->ts.list[i].pid,
stream_type_name2(stream,
&stream->pes.list[stream->ts.list[i].pes_list]),
ts_stream_type( stream, i ),
stream->ts.list[i].is_pcr ? " (PCR)" : "");
}
}
hb_log(" Audio PIDS : ");
for (i = 0; i < stream->ts.count; i++)
{
if ( ts_stream_kind( stream, i ) == A )
{
hb_log( " 0x%x type %s (0x%x)%s",
stream->ts.list[i].pid,
stream_type_name2(stream,
&stream->pes.list[stream->ts.list[i].pes_list]),
ts_stream_type( stream, i ),
stream->ts.list[i].is_pcr ? " (PCR)" : "");
}
}
hb_log(" Subtitle PIDS : ");
for (i = 0; i < stream->ts.count; i++)
{
if ( ts_stream_kind( stream, i ) == S )
{
hb_log( " 0x%x type %s (0x%x)%s",
stream->ts.list[i].pid,
stream_type_name2(stream,
&stream->pes.list[stream->ts.list[i].pes_list]),
ts_stream_type( stream, i ),
stream->ts.list[i].is_pcr ? " (PCR)" : "");
}
}
hb_log(" Other PIDS : ");
for (i = 0; i < stream->ts.count; i++)
{
if ( ts_stream_kind( stream, i ) == N ||
ts_stream_kind( stream, i ) == P )
{
hb_log( " 0x%x type %s (0x%x)%s",
stream->ts.list[i].pid,
stream_type_name2(stream,
&stream->pes.list[stream->ts.list[i].pes_list]),
ts_stream_type( stream, i ),
stream->ts.list[i].is_pcr ? " (PCR)" : "");
}
if ( ts_stream_kind( stream, i ) == N )
hb_stream_delete_ts_entry(stream, i);
}
}
else
{
for (i = 0; i < stream->ts.count; i++)
{
if ( ts_stream_kind( stream, i ) == N )
hb_stream_delete_ts_entry(stream, i);
}
}
return 0;
}
static void hb_ps_stream_init(hb_stream_t *stream)
{
int i;
if ( stream->pes.list )
{
for (i=0; i < stream->pes.alloc; i++)
{
stream->pes.list[i].stream_id = -1;
stream->pes.list[i].next = -1;
}
}
stream->pes.count = 0;
// Find the audio and video pids in the stream
hb_ps_stream_find_streams(stream);
hb_ps_resolve_stream_types(stream);
if( stream->scan )
{
hb_log("Found the following streams");
hb_log(" Video Streams : ");
for (i=0; i < stream->pes.count; i++)
{
if ( stream->pes.list[i].stream_kind == V )
{
hb_log( " 0x%x-0x%x type %s (0x%x)",
stream->pes.list[i].stream_id,
stream->pes.list[i].stream_id_ext,
stream_type_name2(stream,
&stream->pes.list[i]),
stream->pes.list[i].stream_type);
}
}
hb_log(" Audio Streams : ");
for (i = 0; i < stream->pes.count; i++)
{
if ( stream->pes.list[i].stream_kind == A )
{
hb_log( " 0x%x-0x%x type %s (0x%x)",
stream->pes.list[i].stream_id,
stream->pes.list[i].stream_id_ext,
stream_type_name2(stream,
&stream->pes.list[i]),
stream->pes.list[i].stream_type );
}
}
hb_log(" Subtitle Streams : ");
for (i = 0; i < stream->pes.count; i++)
{
if ( stream->pes.list[i].stream_kind == S )
{
hb_log( " 0x%x-0x%x type %s (0x%x)",
stream->pes.list[i].stream_id,
stream->pes.list[i].stream_id_ext,
stream_type_name2(stream,
&stream->pes.list[i]),
stream->pes.list[i].stream_type );
}
}
hb_log(" Other Streams : ");
for (i = 0; i < stream->pes.count; i++)
{
if ( stream->pes.list[i].stream_kind == N )
{
hb_log( " 0x%x-0x%x type %s (0x%x)",
stream->pes.list[i].stream_id,
stream->pes.list[i].stream_id_ext,
stream_type_name2(stream,
&stream->pes.list[i]),
stream->pes.list[i].stream_type );
hb_stream_delete_ps_entry(stream, i);
}
}
}
else
{
for (i = 0; i < stream->pes.count; i++)
{
if ( stream->pes.list[i].stream_kind == N )
hb_stream_delete_ps_entry(stream, i);
}
}
}
#define MAX_HOLE 208*80
static off_t align_to_next_packet(hb_stream_t *stream)
{
uint8_t buf[MAX_HOLE];
off_t pos = 0;
off_t start = ftello(stream->file_handle);
off_t orig;
if ( start >= stream->packetsize ) {
start -= stream->packetsize;
fseeko(stream->file_handle, start, SEEK_SET);
}
orig = start;
while (1)
{
if (fread(buf, sizeof(buf), 1, stream->file_handle) == 1)
{
const uint8_t *bp = buf;
int i;
for ( i = sizeof(buf) - 8 * stream->packetsize; --i >= 0; ++bp )
{
if ( have_ts_sync( bp, stream->packetsize, 8 ) )
{
break;
}
}
if ( i >= 0 )
{
pos = ( bp - buf ) - stream->packetsize + 188;
break;
}
fseeko(stream->file_handle, -8 * stream->packetsize, SEEK_CUR);
start = ftello(stream->file_handle);
}
else
{
return 0;
}
}
fseeko(stream->file_handle, start+pos, SEEK_SET);
return start - orig + pos;
}
static const unsigned int bitmask[] = {
0x0,0x1,0x3,0x7,0xf,0x1f,0x3f,0x7f,0xff,
0x1ff,0x3ff,0x7ff,0xfff,0x1fff,0x3fff,0x7fff,0xffff,
0x1ffff,0x3ffff,0x7ffff,0xfffff,0x1fffff,0x3fffff,0x7fffff,0xffffff,
0x1ffffff,0x3ffffff,0x7ffffff,0xfffffff,0x1fffffff,0x3fffffff,0x7fffffff,0xffffffff};
static inline void bits_init(bitbuf_t *bb, uint8_t* buf, int bufsize, int clear)
{
bb->pos = 0;
bb->buf = buf;
bb->size = bufsize;
bb->val = (bb->buf[0] << 24) | (bb->buf[1] << 16) |
(bb->buf[2] << 8) | bb->buf[3];
if (clear)
memset(bb->buf, 0, bufsize);
bb->size = bufsize;
}
static inline void bits_clone( bitbuf_t *dst, bitbuf_t *src, int bufsize )
{
*dst = *src;
dst->size = (dst->pos >> 3) + bufsize;
}
static inline int bits_bytes_left(bitbuf_t *bb)
{
return bb->size - (bb->pos >> 3);
}
static inline int bits_eob(bitbuf_t *bb)
{
return bb->pos >> 3 == bb->size;
}
static inline unsigned int bits_peek(bitbuf_t *bb, int bits)
{
unsigned int val;
int left = 32 - (bb->pos & 31);
if (bits < left)
{
val = (bb->val >> (left - bits)) & bitmask[bits];
}
else
{
val = (bb->val & bitmask[left]) << (bits - left);
int bpos = bb->pos + left;
bits -= left;
if (bits > 0)
{
int pos = bpos >> 3;
int bval = (bb->buf[pos] << 24) |
(bb->buf[pos + 1] << 16) |
(bb->buf[pos + 2] << 8) |
bb->buf[pos + 3];
val |= (bval >> (32 - bits)) & bitmask[bits];
}
}
return val;
}
static inline unsigned int bits_get(bitbuf_t *bb, int bits)
{
unsigned int val;
int left = 32 - (bb->pos & 31);
if (bits < left)
{
val = (bb->val >> (left - bits)) & bitmask[bits];
bb->pos += bits;
}
else
{
val = (bb->val & bitmask[left]) << (bits - left);
bb->pos += left;
bits -= left;
int pos = bb->pos >> 3;
bb->val = (bb->buf[pos] << 24) | (bb->buf[pos + 1] << 16) | (bb->buf[pos + 2] << 8) | bb->buf[pos + 3];
if (bits > 0)
{
val |= (bb->val >> (32 - bits)) & bitmask[bits];
bb->pos += bits;
}
}
return val;
}
static inline int bits_read_ue(bitbuf_t *bb )
{
int ii = 0;
while( bits_get( bb, 1 ) == 0 && !bits_eob( bb ) && ii < 32 )
{
ii++;
}
return( ( 1 << ii) - 1 + bits_get( bb, ii ) );
}
static inline int bits_skip(bitbuf_t *bb, int bits)
{
if (bits <= 0)
return 0;
while (bits > 32)
{
bits_get(bb, 32);
bits -= 32;
}
bits_get(bb, bits);
return 0;
}
// extract what useful information we can from the elementary stream
// descriptor list at 'dp' and add it to the stream at 'esindx'.
// Descriptors with info we don't currently use are ignored.
// The descriptor list & descriptor item formats are defined in
// ISO 13818-1 (2000E) section 2.6 (pg. 62).
static void decode_element_descriptors(
hb_stream_t *stream,
int pes_idx,
bitbuf_t *bb)
{
int ii;
while( bits_bytes_left( bb ) > 2 )
{
uint8_t tag = bits_get(bb, 8);
uint8_t len = bits_get(bb, 8);
switch ( tag )
{
case 5: // Registration descriptor
stream->pes.list[pes_idx].format_id = bits_get(bb, 32);
bits_skip(bb, 8 * (len - 4));
break;
case 10: // ISO_639_language descriptor
{
char code[3];
for (ii = 0; ii < 3; ii++)
{
code[ii] = bits_get(bb, 8);
}
stream->pes.list[pes_idx].lang_code =
lang_to_code(lang_for_code2(code));
bits_skip(bb, 8 * (len - 3));
} break;
case 0x56: // DVB Teletext descriptor
{
// We don't currently process teletext from
// TS or PS streams. Set stream 'kind' to N
stream->pes.list[pes_idx].stream_type = 0x00;
stream->pes.list[pes_idx].stream_kind = N;
strncpy(stream->pes.list[pes_idx].codec_name,
"DVB Teletext", 80);
bits_skip(bb, 8 * len);
} break;
case 0x59: // DVB Subtitleing descriptor
{
// We don't currently process subtitles from
// TS or PS streams. Set stream 'kind' to N
stream->pes.list[pes_idx].stream_type = 0x00;
stream->pes.list[pes_idx].stream_kind = N;
strncpy(stream->pes.list[pes_idx].codec_name,
"DVB Subtitling", 80);
bits_skip(bb, 8 * len);
} break;
case 0x6a: // DVB AC-3 descriptor
{
stream->pes.list[pes_idx].stream_type = 0x81;
update_pes_kind( stream, pes_idx );
bits_skip(bb, 8 * len);
} break;
case 0x7a: // DVB EAC-3 descriptor
{
stream->pes.list[pes_idx].stream_type = 0x87;
update_pes_kind( stream, pes_idx );
bits_skip(bb, 8 * len);
} break;
default:
bits_skip(bb, 8 * len);
break;
}
}
}
int decode_program_map(hb_stream_t* stream)
{
bitbuf_t bb;
bits_init(&bb, stream->pmt_info.tablebuf, stream->pmt_info.tablepos, 0);
bits_get(&bb, 8); // table_id
bits_get(&bb, 4);
unsigned int section_length = bits_get(&bb, 12);
bits_get(&bb, 16); // program number
bits_get(&bb, 2);
bits_get(&bb, 5); // version_number
bits_get(&bb, 1);
bits_get(&bb, 8); // section_number
bits_get(&bb, 8); // last_section_number
bits_get(&bb, 3);
stream->pmt_info.PCR_PID = bits_get(&bb, 13);
bits_get(&bb, 4);
int program_info_length = bits_get(&bb, 12);
int i;
for (i = 0; i < program_info_length - 2; )
{
uint8_t tag, len;
tag = bits_get(&bb, 8);
len = bits_get(&bb, 8);
i += 2;
if ( i + len > program_info_length )
{
break;
}
if (tag == 0x05 && len >= 4)
{
// registration descriptor
stream->reg_desc = bits_get(&bb, 32);
i += 4;
len -= 4;
}
int j;
for ( j = 0; j < len; j++ )
{
bits_get(&bb, 8);
}
i += len;
}
for ( ; i < program_info_length; i++ )
{
bits_get(&bb, 8);
}
int cur_pos = 9 /* data after the section length field*/ + program_info_length;
int done_reading_stream_types = 0;
int ii = 0;
while (!done_reading_stream_types)
{
unsigned char stream_type = bits_get(&bb, 8);
bits_get(&bb, 3);
unsigned int elementary_PID = bits_get(&bb, 13);
bits_get(&bb, 4);
unsigned int info_len = bits_get(&bb, 12);
// Defined audio stream types are 0x81 for AC-3/A52 audio
// and 0x03 for mpeg audio. But content producers seem to
// use other values (0x04 and 0x06 have both been observed)
// so at this point we say everything that isn't a video
// pid is audio then at the end of hb_stream_title_scan
// we'll figure out which are really audio by looking at
// the PES headers.
int pes_idx;
update_ts_streams( stream, elementary_PID, 0,
stream_type, -1, &pes_idx );
if ( pes_idx >= 0 )
stream->pes.list[pes_idx].map_idx = ii;
if (info_len > 0)
{
bitbuf_t bb_desc;
bits_clone( &bb_desc, &bb, info_len );
if ( pes_idx >= 0 )
decode_element_descriptors( stream, pes_idx, &bb_desc );
bits_skip(&bb, 8 * info_len);
}
cur_pos += 5 /* stream header */ + info_len;
if (cur_pos >= section_length - 4 /* stop before the CRC */)
done_reading_stream_types = 1;
ii++;
}
return 1;
}
static int build_program_map(const uint8_t *buf, hb_stream_t *stream)
{
// Get adaption header info
int adapt_len = 0;
int adaption = (buf[3] & 0x30) >> 4;
if (adaption == 0)
return 0;
else if (adaption == 0x2)
adapt_len = 184;
else if (adaption == 0x3)
adapt_len = buf[4] + 1;
if (adapt_len > 184)
return 0;
// Get payload start indicator
int start;
start = (buf[1] & 0x40) != 0;
// Get pointer length - only valid in packets with a start flag
int pointer_len = 0;
if (start)
{
pointer_len = buf[4 + adapt_len] + 1;
stream->pmt_info.tablepos = 0;
}
// Get Continuity Counter
int continuity_counter = buf[3] & 0x0f;
if (!start && (stream->pmt_info.current_continuity_counter + 1 != continuity_counter))
{
hb_log("build_program_map - Continuity Counter %d out of sequence - expected %d", continuity_counter, stream->pmt_info.current_continuity_counter+1);
return 0;
}
stream->pmt_info.current_continuity_counter = continuity_counter;
stream->pmt_info.reading |= start;
// Add the payload for this packet to the current buffer
int amount_to_copy = 184 - adapt_len - pointer_len;
if (stream->pmt_info.reading && (amount_to_copy > 0))
{
stream->pmt_info.tablebuf = realloc(stream->pmt_info.tablebuf, stream->pmt_info.tablepos + amount_to_copy);
memcpy(stream->pmt_info.tablebuf + stream->pmt_info.tablepos, buf + 4 + adapt_len + pointer_len, amount_to_copy);
stream->pmt_info.tablepos += amount_to_copy;
}
if (stream->pmt_info.tablepos > 3)
{
// We have enough to check the section length
int length;
length = ((stream->pmt_info.tablebuf[1] << 8) +
stream->pmt_info.tablebuf[2]) & 0xFFF;
if (stream->pmt_info.tablepos > length + 1)
{
// We just finished a bunch of packets - parse the program map details
int decode_ok = 0;
if (stream->pmt_info.tablebuf[0] == 0x02)
decode_ok = decode_program_map(stream);
free(stream->pmt_info.tablebuf);
stream->pmt_info.tablebuf = NULL;
stream->pmt_info.tablepos = 0;
stream->pmt_info.reading = 0;
if (decode_ok)
return decode_ok;
}
}
return 0;
}
static int decode_PAT(const uint8_t *buf, hb_stream_t *stream)
{
unsigned char tablebuf[1024];
unsigned int tablepos = 0;
int reading = 0;
// Get adaption header info
int adapt_len = 0;
int adaption = (buf[3] & 0x30) >> 4;
if (adaption == 0)
return 0;
else if (adaption == 0x2)
adapt_len = 184;
else if (adaption == 0x3)
adapt_len = buf[4] + 1;
if (adapt_len > 184)
return 0;
// Get pointer length
int pointer_len = buf[4 + adapt_len] + 1;
// Get payload start indicator
int start;
start = (buf[1] & 0x40) != 0;
if (start)
reading = 1;
// Add the payload for this packet to the current buffer
if (reading && (184 - adapt_len) > 0)
{
if (tablepos + 184 - adapt_len - pointer_len > 1024)
{
hb_log("decode_PAT - Bad program section length (> 1024)");
return 0;
}
memcpy(tablebuf + tablepos, buf + 4 + adapt_len + pointer_len, 184 - adapt_len - pointer_len);
tablepos += 184 - adapt_len - pointer_len;
}
if (start && reading)
{
memcpy(tablebuf + tablepos, buf + 4 + adapt_len + 1, pointer_len - 1);
unsigned int pos = 0;
//while (pos < tablepos)
{
bitbuf_t bb;
bits_init(&bb, tablebuf + pos, tablepos - pos, 0);
unsigned char section_id = bits_get(&bb, 8);
bits_get(&bb, 4);
unsigned int section_len = bits_get(&bb, 12);
bits_get(&bb, 16); // transport_id
bits_get(&bb, 2);
bits_get(&bb, 5); // version_num
bits_get(&bb, 1); // current_next
bits_get(&bb, 8); // section_num
bits_get(&bb, 8); // last_section
switch (section_id)
{
case 0x00:
{
// Program Association Section
section_len -= 5; // Already read transport stream ID, version num, section num, and last section num
section_len -= 4; // Ignore the CRC
int curr_pos = 0;
stream->ts_number_pat_entries = 0;
while ((curr_pos < section_len) && (stream->ts_number_pat_entries < kMaxNumberPMTStreams))
{
unsigned int pkt_program_num = bits_get(&bb, 16);
stream->pat_info[stream->ts_number_pat_entries].program_number = pkt_program_num;
bits_get(&bb, 3); // Reserved
if (pkt_program_num == 0)
{
bits_get(&bb, 13); // pkt_network_id
}
else
{
unsigned int pkt_program_map_PID = bits_get(&bb, 13);
stream->pat_info[stream->ts_number_pat_entries].program_map_PID = pkt_program_map_PID;
}
curr_pos += 4;
stream->ts_number_pat_entries++;
}
}
break;
case 0xC7:
{
break;
}
case 0xC8:
{
break;
}
}
pos += 3 + section_len;
}
tablepos = 0;
}
return 1;
}
// convert a PES PTS or DTS to an int64
static int64_t parse_pes_timestamp( bitbuf_t *bb )
{
int64_t ts;
ts = ( (uint64_t) bits_get(bb, 3) << 30 ) +
bits_skip(bb, 1) +
( bits_get(bb, 15) << 15 ) +
bits_skip(bb, 1) +
bits_get(bb, 15);
bits_skip(bb, 1);
return ts;
}
static int parse_pes_header(
hb_stream_t *stream,
bitbuf_t *bb,
hb_pes_info_t *pes_info )
{
if ( bits_bytes_left(bb) < 6 )
{
return 0;
}
bits_skip(bb, 8 * 4);
pes_info->packet_len = bits_get(bb, 16);
/*
* This would normally be an error. But the decoders can generally
* recover well from missing data. So let the packet pass.
if ( bits_bytes_left(bb) < pes_info->packet_len )
{
return 0;
}
*/
int mark = bits_peek(bb, 2);
if ( mark == 0x02 )
{
// mpeg2 pes
if ( bits_bytes_left(bb) < 3 )
{
return 0;
}
/*
bits_skip(bb, 2);
bits_get(bb, 2); // scrambling
bits_get(bb, 1); // priority
bits_get(bb, 1); // alignment
bits_get(bb, 1); // copyright
bits_get(bb, 1); // original
*/
bits_get(bb, 8); // skip all of the above
int has_pts = bits_get(bb, 2);
int has_escr = bits_get(bb, 1);
int has_esrate = bits_get(bb, 1);
int has_dsm = bits_get(bb, 1);
int has_copy_info = bits_get(bb, 1);
int has_crc = bits_get(bb, 1);
int has_ext = bits_get(bb, 1);
int hdr_len = pes_info->header_len = bits_get(bb, 8);
pes_info->header_len += bb->pos >> 3;
bitbuf_t bb_hdr;
bits_clone(&bb_hdr, bb, hdr_len);
if ( bits_bytes_left(&bb_hdr) < hdr_len )
{
return 0;
}
int expect = (!!has_pts) * 5 + (has_pts & 0x01) * 5 + has_escr * 6 +
has_esrate * 3 + has_dsm + has_copy_info + has_crc * 2 +
has_ext;
if ( bits_bytes_left(&bb_hdr) < expect )
{
return 0;
}
if( has_pts )
{
if ( bits_bytes_left(&bb_hdr) < 5 )
{
return 0;
}
bits_skip(&bb_hdr, 4);
pes_info->pts = parse_pes_timestamp( &bb_hdr );
if ( has_pts & 1 )
{
if ( bits_bytes_left(&bb_hdr) < 5 )
{
return 0;
}
bits_skip(&bb_hdr, 4);
pes_info->dts = parse_pes_timestamp( &bb_hdr );
}
else
{
pes_info->dts = pes_info->pts;
}
}
// A user encountered a stream that has garbage DTS timestamps.
// DTS should never be > PTS. Such broken timestamps leads to
// HandBrake computing negative buffer start times.
if (pes_info->dts > pes_info->pts)
{
pes_info->dts = pes_info->pts;
}
if ( has_escr )
bits_skip(&bb_hdr, 8 * 6);
if ( has_esrate )
bits_skip(&bb_hdr, 8 * 3);
if ( has_dsm )
bits_skip(&bb_hdr, 8);
if ( has_copy_info )
bits_skip(&bb_hdr, 8);
if ( has_crc )
bits_skip(&bb_hdr, 8 * 2);
if ( has_ext )
{
int has_private = bits_get(&bb_hdr, 1);
int has_pack = bits_get(&bb_hdr, 1);
int has_counter = bits_get(&bb_hdr, 1);
int has_pstd = bits_get(&bb_hdr, 1);
bits_skip(&bb_hdr, 3); // reserved bits
int has_ext2 = bits_get(&bb_hdr, 1);
expect = (has_private) * 16 + has_pack + has_counter * 2 +
has_pstd * 2 + has_ext2 * 2;
if ( bits_bytes_left(&bb_hdr) < expect )
{
return 0;
}
if ( has_private )
{
bits_skip(&bb_hdr, 8 * 16);
expect -= 2;
}
if ( has_pack )
{
int len = bits_get(&bb_hdr, 8);
expect -= 1;
if ( bits_bytes_left(&bb_hdr) < len + expect )
{
return 0;
}
bits_skip(&bb_hdr, 8 * len);
}
if ( has_counter )
bits_skip(&bb_hdr, 8 * 2);
if ( has_pstd )
bits_skip(&bb_hdr, 8 * 2);
if ( has_ext2 )
{
bits_skip(&bb_hdr, 1); // marker
bits_get(&bb_hdr, 7); // extension length
pes_info->has_stream_id_ext = !bits_get(&bb_hdr, 1);
if ( pes_info->has_stream_id_ext )
pes_info->stream_id_ext = bits_get(&bb_hdr, 7);
}
}
// eat header stuffing
bits_skip(bb, 8 * hdr_len);
}
else
{
// mpeg1 pes
// Skip stuffing
while ( bits_peek(bb, 1) && bits_bytes_left(bb) )
bits_get(bb, 8);
if ( !bits_bytes_left(bb) )
return 0;
// Skip std buffer info
int mark = bits_get(bb, 2);
if ( mark == 0x01 )
{
if ( bits_bytes_left(bb) < 2 )
return 0;
bits_skip(bb, 8 * 2);
}
int has_pts = bits_get(bb, 2);
if( has_pts == 0x02 )
{
pes_info->pts = parse_pes_timestamp( bb );
pes_info->dts = pes_info->pts;
}
else if( has_pts == 0x03 )
{
pes_info->pts = parse_pes_timestamp( bb );
bits_skip(bb, 4);
pes_info->dts = parse_pes_timestamp( bb );
}
else
{
bits_skip(bb, 8); // 0x0f flag
}
if ( bits_bytes_left(bb) < 0 )
return 0;
pes_info->header_len = bb->pos >> 3;
}
if ( pes_info->stream_id == 0xbd && stream->hb_stream_type == program )
{
if ( bits_bytes_left(bb) < 4 )
{
return 0;
}
int ssid = bits_peek(bb, 8);
if( ( ssid >= 0xa0 && ssid <= 0xaf ) ||
( ssid >= 0x20 && ssid <= 0x2f ) )
{
// DVD LPCM or DVD SPU (subtitles)
pes_info->bd_substream_id = bits_get(bb, 8);
pes_info->header_len += 1;
}
else if ( ssid >= 0xb0 && ssid <= 0xbf )
{
// HD-DVD TrueHD has a 4 byte header
pes_info->bd_substream_id = bits_get(bb, 8);
bits_skip(bb, 8 * 4);
pes_info->header_len += 5;
}
else if( ( ssid >= 0x80 && ssid <= 0x9f ) ||
( ssid >= 0xc0 && ssid <= 0xcf ) )
{
// AC3, E-AC3, DTS, and DTS-HD has 3 byte header
pes_info->bd_substream_id = bits_get(bb, 8);
bits_skip(bb, 8 * 3);
pes_info->header_len += 4;
}
}
return 1;
}
static int parse_pack_header(
hb_stream_t *stream,
bitbuf_t *bb,
hb_pes_info_t *pes_info )
{
if ( bits_bytes_left(bb) < 12)
{
return 0;
}
bits_skip(bb, 8 * 4);
int mark = bits_get(bb, 2);
if ( mark == 0x00 )
{
// mpeg1 pack
bits_skip(bb, 2); // marker
}
pes_info->scr = parse_pes_timestamp( bb );
if ( mark == 0x00 )
{
bits_skip(bb, 24);
pes_info->header_len = (bb->pos >> 3);
}
else
{
bits_skip(bb, 39);
int stuffing = bits_get(bb, 3);
pes_info->header_len = stuffing;
pes_info->header_len += (bb->pos >> 3);
}
return 1;
}
// Returns the length of the header
static int hb_parse_ps(
hb_stream_t *stream,
uint8_t *buf,
int len,
hb_pes_info_t *pes_info )
{
memset( pes_info, 0, sizeof( hb_pes_info_t ) );
pes_info->pts = AV_NOPTS_VALUE;
pes_info->dts = AV_NOPTS_VALUE;
bitbuf_t bb, cc;
bits_init(&bb, buf, len, 0);
bits_clone(&cc, &bb, len);
if ( bits_bytes_left(&bb) < 4 )
return 0;
// Validate start code
if ( bits_get(&bb, 8 * 3) != 0x000001 )
{
return 0;
}
pes_info->stream_id = bits_get(&bb, 8);
if ( pes_info->stream_id == 0xb9 )
{
// Program stream end code
return 1;
}
else if ( pes_info->stream_id == 0xba )
{
return parse_pack_header( stream, &cc, pes_info );
}
else if ( pes_info->stream_id >= 0xbd &&
pes_info->stream_id != 0xbe &&
pes_info->stream_id != 0xbf &&
pes_info->stream_id != 0xf0 &&
pes_info->stream_id != 0xf1 &&
pes_info->stream_id != 0xf2 &&
pes_info->stream_id != 0xf8 &&
pes_info->stream_id != 0xff )
{
return parse_pes_header( stream, &cc, pes_info );
}
else
{
if ( bits_bytes_left(&bb) < 2 )
{
return 0;
}
pes_info->packet_len = bits_get(&bb, 16);
pes_info->header_len = bb.pos >> 3;
return 1;
}
}
static int hb_ps_read_packet( hb_stream_t * stream, hb_buffer_t *b )
{
// Appends to buffer if size != 0
int start_code = -1;
int pos = b->size;
int stream_id = -1;
int c;
#define cp (b->data)
flockfile( stream->file_handle );
while ( ( c = getc_unlocked( stream->file_handle ) ) != EOF )
{
start_code = ( start_code << 8 ) | c;
if ( ( start_code >> 8 )== 0x000001 )
// we found the start of the next start
break;
}
if ( c == EOF )
goto done;
if ( pos + 4 > b->alloc )
{
// need to expand the buffer
hb_buffer_realloc( b, b->alloc * 2 );
}
cp[pos++] = ( start_code >> 24 ) & 0xff;
cp[pos++] = ( start_code >> 16 ) & 0xff;
cp[pos++] = ( start_code >> 8 ) & 0xff;
cp[pos++] = ( start_code ) & 0xff;
stream_id = start_code & 0xff;
if ( stream_id == 0xba )
{
int start = pos - 4;
// Read pack header
if ( pos + 21 >= b->alloc )
{
// need to expand the buffer
hb_buffer_realloc( b, b->alloc * 2 );
}
// There are at least 8 bytes. More if this is mpeg2 pack.
fread( cp+pos, 1, 8, stream->file_handle );
int mark = cp[pos] >> 4;
pos += 8;
if ( mark != 0x02 )
{
// mpeg-2 pack,
fread( cp+pos, 1, 2, stream->file_handle );
pos += 2;
int len = cp[start+13] & 0x7;
fread( cp+pos, 1, len, stream->file_handle );
pos += len;
}
}
// Non-video streams can emulate start codes, so we need
// to inspect PES packets and skip over their data
// sections to avoid mis-detection of the next pack or pes start code
else if ( stream_id >= 0xbb )
{
int len = 0;
c = getc_unlocked( stream->file_handle );
if ( c == EOF )
goto done;
len = c << 8;
c = getc_unlocked( stream->file_handle );
if ( c == EOF )
goto done;
len |= c;
if ( pos + len + 2 > b->alloc )
{
if ( b->alloc * 2 > pos + len + 2 )
hb_buffer_realloc( b, b->alloc * 2 );
else
hb_buffer_realloc( b, b->alloc * 2 + len + 2 );
}
cp[pos++] = len >> 8;
cp[pos++] = len & 0xff;
if ( len )
{
// Length is non-zero, read the packet all at once
len = fread( cp+pos, 1, len, stream->file_handle );
pos += len;
}
else
{
// Length is zero, read bytes till we find a start code.
// Only video PES packets are allowed to have zero length.
start_code = -1;
while ( ( c = getc_unlocked( stream->file_handle ) ) != EOF )
{
start_code = ( start_code << 8 ) | c;
if ( pos >= b->alloc )
{
// need to expand the buffer
hb_buffer_realloc( b, b->alloc * 2 );
}
cp[pos++] = c;
if ( ( start_code >> 8 ) == 0x000001 &&
( start_code & 0xff ) >= 0xb9 )
{
// we found the start of the next start
break;
}
}
if ( c == EOF )
goto done;
pos -= 4;
fseeko( stream->file_handle, -4, SEEK_CUR );
}
}
else
{
// Unknown, find next start code
start_code = -1;
while ( ( c = getc_unlocked( stream->file_handle ) ) != EOF )
{
start_code = ( start_code << 8 ) | c;
if ( pos >= b->alloc )
{
// need to expand the buffer
hb_buffer_realloc( b, b->alloc * 2 );
}
cp[pos++] = c;
if ( ( start_code >> 8 ) == 0x000001 &&
( start_code & 0xff ) >= 0xb9 )
// we found the start of the next start
break;
}
if ( c == EOF )
goto done;
pos -= 4;
fseeko( stream->file_handle, -4, SEEK_CUR );
}
done:
// Parse packet for information we might need
funlockfile( stream->file_handle );
int len = pos - b->size;
b->size = pos;
#undef cp
return len;
}
static hb_buffer_t * hb_ps_stream_decode( hb_stream_t *stream )
{
hb_pes_info_t pes_info;
hb_buffer_t *buf = hb_buffer_init(HB_DVD_READ_BUFFER_SIZE);
while (1)
{
buf->size = 0;
int len = hb_ps_read_packet( stream, buf );
if ( len == 0 )
{
// End of file
hb_buffer_close( &buf );
return buf;
}
if ( !hb_parse_ps( stream, buf->data, buf->size, &pes_info ) )
{
++stream->errors;
continue;
}
// pack header
if ( pes_info.stream_id == 0xba )
{
stream->pes.found_scr = 1;
stream->ts_flags |= TS_HAS_PCR;
stream->pes.scr = pes_info.scr;
continue;
}
// If we don't have a SCR yet but the stream has SCRs just loop
// so we don't process anything until we have a clock reference.
if ( !stream->pes.found_scr && ( stream->ts_flags & TS_HAS_PCR ) )
{
continue;
}
// system header
if ( pes_info.stream_id == 0xbb )
continue;
int idx;
if ( pes_info.stream_id == 0xbd )
{
idx = index_of_ps_stream( stream, pes_info.stream_id,
pes_info.bd_substream_id );
}
else
{
idx = index_of_ps_stream( stream, pes_info.stream_id,
pes_info.stream_id_ext );
}
// Is this a stream carrying data that we care about?
if ( idx < 0 )
continue;
switch (stream->pes.list[idx].stream_kind)
{
case A:
buf->s.type = AUDIO_BUF;
break;
case V:
buf->s.type = VIDEO_BUF;
break;
default:
buf->s.type = OTHER_BUF;
break;
}
if ( stream->need_keyframe )
{
// we're looking for the first video frame because we're
// doing random access during 'scan'
if ( buf->s.type != VIDEO_BUF ||
!isIframe( stream, buf->data, buf->size ) )
{
// not the video stream or didn't find an I frame
// but we'll only wait 600 video frames for an I frame.
if ( buf->s.type != VIDEO_BUF || ++stream->need_keyframe < 600 )
{
continue;
}
}
stream->need_keyframe = 0;
}
if ( buf->s.type == VIDEO_BUF )
++stream->frames;
buf->s.id = get_id( &stream->pes.list[idx] );
buf->s.pcr = stream->pes.scr;
buf->s.start = pes_info.pts;
buf->s.renderOffset = pes_info.dts;
memmove( buf->data, buf->data + pes_info.header_len,
buf->size - pes_info.header_len );
buf->size -= pes_info.header_len;
if ( buf->size == 0 )
continue;
stream->pes.scr = AV_NOPTS_VALUE;
return buf;
}
}
static int update_ps_streams( hb_stream_t * stream, int stream_id, int stream_id_ext, int stream_type, int in_kind )
{
int ii;
int same_stream = -1;
kind_t kind = in_kind == -1 ? st2codec[stream_type].kind : in_kind;
for ( ii = 0; ii < stream->pes.count; ii++ )
{
if ( stream->pes.list[ii].stream_id == stream_id )
same_stream = ii;
if ( stream->pes.list[ii].stream_id == stream_id &&
stream->pes.list[ii].stream_id_ext == 0 &&
stream->pes.list[ii].stream_kind == U )
{
// This is an unknown stream type that hasn't been
// given a stream_id_ext. So match only to stream_id
//
// is the stream_id_ext being updated?
if ( stream_id_ext != 0 )
break;
// If stream is already in the list and the new 'kind' is
// PCR, Unknown, or same as before, just return the index
// to the entry found.
if ( kind == P || kind == U || kind == stream->pes.list[ii].stream_kind )
return ii;
// Update stream_type and kind
break;
}
if ( stream_id == stream->pes.list[ii].stream_id &&
stream_id_ext == stream->pes.list[ii].stream_id_ext )
{
// If stream is already in the list and the new 'kind' is
// PCR and the old 'kind' is unknown, set the new 'kind'
if ( kind == P && stream->pes.list[ii].stream_kind == U )
break;
// If stream is already in the list and the new 'kind' is
// PCR, Unknown, or same as before, just return the index
// to the entry found.
if ( kind == P || kind == U || kind == stream->pes.list[ii].stream_kind )
return ii;
// Replace unknown 'kind' with known 'kind'
break;
}
// Resolve multiple videos
if ( kind == V && stream->pes.list[ii].stream_kind == V )
{
if ( stream_id <= stream->pes.list[ii].stream_id &&
stream_id_ext <= stream->pes.list[ii].stream_id_ext )
{
// Assume primary video stream has the smallest stream id
// and only use the primary. move the current item
// to the end of the list. we want to keep it for
// debug and informational purposes.
int jj = new_pes( stream );
memcpy( &stream->pes.list[jj], &stream->pes.list[ii],
sizeof( hb_pes_stream_t ) );
break;
}
}
}
if ( ii == stream->pes.count )
{
ii = new_pes( stream );
if ( same_stream >= 0 )
{
memcpy( &stream->pes.list[ii], &stream->pes.list[same_stream],
sizeof( hb_pes_stream_t ) );
}
else
{
stream->pes.list[ii].map_idx = -1;
}
}
stream->pes.list[ii].stream_id = stream_id;
stream->pes.list[ii].stream_id_ext = stream_id_ext;
stream->pes.list[ii].stream_type = stream_type;
stream->pes.list[ii].stream_kind = kind;
return ii;
}
static void update_pes_kind( hb_stream_t * stream, int idx )
{
kind_t kind = st2codec[stream->pes.list[idx].stream_type].kind;
if ( kind != U && kind != N )
{
stream->pes.list[idx].stream_kind = kind;
}
}
static void ts_pes_list_add( hb_stream_t *stream, int ts_idx, int pes_idx )
{
int ii = stream->ts.list[ts_idx].pes_list;
if ( ii == -1 )
{
stream->ts.list[ts_idx].pes_list = pes_idx;
return;
}
int idx;
while ( ii != -1 )
{
if ( ii == pes_idx ) // Already in list
return;
idx = ii;
ii = stream->pes.list[ii].next;
}
stream->pes.list[idx].next = pes_idx;
}
static int update_ts_streams( hb_stream_t * stream, int pid, int stream_id_ext, int stream_type, int in_kind, int *out_pes_idx )
{
int ii;
int pes_idx = update_ps_streams( stream, pid, stream_id_ext,
stream_type, in_kind );
if ( out_pes_idx )
*out_pes_idx = pes_idx;
if ( pes_idx < 0 )
return -1;
kind_t kind = stream->pes.list[pes_idx].stream_kind;
for ( ii = 0; ii < stream->ts.count; ii++ )
{
if ( pid == stream->ts.list[ii].pid )
{
break;
}
// Resolve multiple videos
if ( kind == V && ts_stream_kind( stream, ii ) == V &&
pes_idx < stream->ts.list[ii].pes_list )
{
// We have a new candidate for the primary video. Move
// the current video to the end of the list. And put the
// new video in this slot
int jj = new_pid( stream );
memcpy( &stream->ts.list[jj], &stream->ts.list[ii],
sizeof( hb_ts_stream_t ) );
break;
}
}
if ( ii == stream->ts.count )
ii = new_pid( stream );
stream->ts.list[ii].pid = pid;
ts_pes_list_add( stream, ii, pes_idx );
if ( in_kind == P )
stream->ts.list[ii].is_pcr = 1;
return ii;
}
static int decode_ps_map( hb_stream_t * stream, uint8_t *buf, int len )
{
int retval = 1;
bitbuf_t bb;
bits_init(&bb, buf, len, 0);
if ( bits_bytes_left(&bb) < 10 )
return 0;
// Skip stuff not needed
bits_skip(&bb, 8 * 8);
int info_len = bits_get(&bb, 16);
if ( bits_bytes_left(&bb) < info_len )
return 0;
if ( info_len )
{
bitbuf_t cc;
bits_clone( &cc, &bb, info_len );
while ( bits_bytes_left(&cc) >= 2 )
{
uint8_t tag, len;
tag = bits_get(&cc, 8);
len = bits_get(&cc, 8);
if ( bits_bytes_left(&cc) < len )
return 0;
if (tag == 0x05 && len >= 4)
{
// registration descriptor
stream->reg_desc = bits_get(&cc, 32);
bits_skip(&cc, 8 * (len - 4));
}
else
{
bits_skip(&cc, 8 * len);
}
}
bits_skip(&bb, 8 * info_len);
}
int map_len = bits_get(&bb, 16);
if ( bits_bytes_left(&bb) < map_len )
return 0;
// Process the map
int ii = 0;
while ( bits_bytes_left(&bb) >= 8 )
{
int pes_idx;
int stream_type = bits_get(&bb, 8);
int stream_id = bits_get(&bb, 8);
info_len = bits_get(&bb, 16);
if ( info_len > bits_bytes_left(&bb) )
return 0;
int substream_id = 0;
switch ( stream_type )
{
case 0x81: // ac3
case 0x82: // dts
case 0x83: // lpcm
case 0x87: // eac3
// If the stream_id isn't one of the standard mpeg
// stream ids, assume it is an private stream 1 substream id.
// This is how most PS streams specify this type of audio.
//
// TiVo sets the stream id to 0xbd and does not
// give a substream id. This limits them to one audio
// stream and differs from how everyone else specifies
// this type of audio.
if ( stream_id < 0xb9 )
{
substream_id = stream_id;
stream_id = 0xbd;
}
break;
default:
break;
}
pes_idx = update_ps_streams( stream, stream_id, substream_id,
stream_type, -1 );
if ( pes_idx >= 0 )
stream->pes.list[pes_idx].map_idx = ii;
if ( info_len > 0 )
{
bitbuf_t bb_desc;
bits_clone( &bb_desc, &bb, info_len );
if ( pes_idx >= 0 )
decode_element_descriptors( stream, pes_idx, &bb_desc );
bits_skip(&bb, 8 * info_len);
}
ii++;
}
// skip CRC 32
return retval;
}
static void hb_ps_stream_find_streams(hb_stream_t *stream)
{
int ii, jj;
hb_buffer_t *buf = hb_buffer_init(HB_DVD_READ_BUFFER_SIZE);
fseeko( stream->file_handle, 0, SEEK_SET );
// Scan beginning of file, then if no program stream map is found
// seek to 20% and scan again since there's occasionally no
// audio at the beginning (particularly for vobs).
for ( ii = 0; ii < 2; ii++ )
{
for ( jj = 0; jj < MAX_PS_PROBE_SIZE; jj += buf->size )
{
int stream_type;
int len;
hb_pes_info_t pes_info;
buf->size = 0;
len = hb_ps_read_packet( stream, buf );
if ( len == 0 )
{
// Must have reached EOF
break;
}
if ( !hb_parse_ps( stream, buf->data, buf->size, &pes_info ) )
{
hb_deep_log( 2, "hb_ps_stream_find_streams: Error parsing PS packet");
continue;
}
if ( pes_info.stream_id == 0xba )
{
stream->ts_flags |= TS_HAS_PCR;
}
else if ( pes_info.stream_id == 0xbc )
{
// program stream map
// Note that if there is a program map, any
// extrapolation that is made below based on
// stream id may be overridden by entry in the map.
if ( decode_ps_map( stream, buf->data, buf->size ) )
{
hb_log("Found program stream map");
// Normally, we could quit here since the program
// stream map *should* map all streams. But once
// again Tivo breaks things by not always creating
// complete maps. So continue processing...
}
else
{
hb_error("Error parsing program stream map");
}
}
else if ( ( pes_info.stream_id & 0xe0 ) == 0xc0 )
{
// MPeg audio (c0 - df)
stream_type = 0x04;
update_ps_streams( stream, pes_info.stream_id,
pes_info.stream_id_ext, stream_type, -1 );
}
else if ( pes_info.stream_id == 0xbd )
{
int ssid = pes_info.bd_substream_id;
// Add a potentail audio stream
// Check dvd substream id
if ( ssid >= 0x20 && ssid <= 0x37 )
{
int idx = update_ps_streams( stream, pes_info.stream_id,
pes_info.bd_substream_id, 0, -1 );
stream->pes.list[idx].stream_kind = S;
stream->pes.list[idx].codec = WORK_DECVOBSUB;
strncpy(stream->pes.list[idx].codec_name,
"DVD Subtitle", 80);
continue;
}
if ( ssid >= 0x80 && ssid <= 0x87 )
{
stream_type = 0x81; // ac3
}
else if ( ( ssid >= 0x88 && ssid <= 0x8f ) ||
( ssid >= 0x98 && ssid <= 0x9f ) )
{
// Could be either dts or dts-hd
// will have to probe to resolve
int idx = update_ps_streams( stream, pes_info.stream_id,
pes_info.bd_substream_id, 0, U );
stream->pes.list[idx].codec = HB_ACODEC_DCA_HD;
stream->pes.list[idx].codec_param = AV_CODEC_ID_DTS;
continue;
}
else if ( ssid >= 0xa0 && ssid <= 0xaf )
{
stream_type = 0x83; // lpcm
// This is flagged as an unknown stream type in
// st2codec because it can be either LPCM or
// BD TrueHD. In this case it is LPCM.
update_ps_streams( stream, pes_info.stream_id,
pes_info.bd_substream_id, stream_type, A );
continue;
}
else if ( ssid >= 0xb0 && ssid <= 0xbf )
{
// HD-DVD TrueHD
int idx = update_ps_streams( stream, pes_info.stream_id,
pes_info.bd_substream_id, 0, A );
stream->pes.list[idx].codec = HB_ACODEC_FFMPEG;
stream->pes.list[idx].codec_param = AV_CODEC_ID_TRUEHD;
continue;
}
else if ( ssid >= 0xc0 && ssid <= 0xcf )
{
// HD-DVD uses this for both ac3 and eac3.
// Check ac3 bitstream_id to distinguish between them.
bitbuf_t bb;
bits_init(&bb, buf->data + pes_info.header_len,
buf->size - pes_info.header_len, 0);
int sync = bits_get(&bb, 16);
if ( sync == 0x0b77 )
{
bits_skip(&bb, 24);
int bsid = bits_get(&bb, 5);
if ( bsid <= 10 )
{
// ac3
stream_type = 0x81; // ac3
}
else
{
// eac3
stream_type = 0x87; // eac3
}
}
else
{
// Doesn't look like an ac3 stream. Probe it.
stream_type = 0x00;
}
}
else
{
// Unknown. Probe it.
stream_type = 0x00;
}
update_ps_streams( stream, pes_info.stream_id,
pes_info.bd_substream_id, stream_type, -1 );
}
else if ( ( pes_info.stream_id & 0xf0 ) == 0xe0 )
{
// Normally this is MPEG video, but MPEG-1 PS streams
// (which do not have a program stream map) may use
// this for other types of video.
//
// Also, the hddvd tards decided to use 0xe2 and 0xe3 for
// h.264 video :( and the twits decided not to put a
// program stream map in the stream :'(
//
// So set this to an unknown stream type and probe.
stream_type = 0x00;
update_ps_streams( stream, pes_info.stream_id,
pes_info.stream_id_ext, stream_type, -1 );
}
else if ( pes_info.stream_id == 0xfd )
{
if ( pes_info.stream_id_ext == 0x55 ||
pes_info.stream_id_ext == 0x56 )
{
// hddvd uses this for vc-1.
stream_type = 0xea;
}
else
{
// mark as unknown and probe.
stream_type = 0x00;
}
update_ps_streams( stream, pes_info.stream_id,
pes_info.stream_id_ext, stream_type, -1 );
}
}
hb_stream_seek( stream, 0.2 );
}
hb_buffer_close( &buf );
}
static int probe_dts_profile( hb_pes_stream_t *pes )
{
hb_work_info_t info;
hb_work_object_t *w = hb_codec_decoder( pes->codec );
w->codec_param = pes->codec_param;
int ret = w->bsinfo( w, pes->probe_buf, &info );
if ( ret < 0 )
{
hb_log( "probe_dts_profile: no info type %d/0x%x for id 0x%x",
pes->codec, pes->codec_param, pes->stream_id );
}
switch (info.profile)
{
case FF_PROFILE_DTS:
case FF_PROFILE_DTS_ES:
case FF_PROFILE_DTS_96_24:
pes->codec = HB_ACODEC_DCA;
pes->stream_type = 0x82;
pes->stream_kind = A;
break;
case FF_PROFILE_DTS_HD_HRA:
case FF_PROFILE_DTS_HD_MA:
pes->stream_type = 0;
pes->stream_kind = A;
break;
default:
return 0;
}
const char *profile_name;
AVCodec *codec = avcodec_find_decoder( pes->codec_param );
profile_name = av_get_profile_name( codec, info.profile );
if ( profile_name )
{
strncpy(pes->codec_name, profile_name, 80);
pes->codec_name[79] = 0;
}
return 1;
}
static int do_probe( hb_pes_stream_t *pes, hb_buffer_t *buf )
{
// Check upper limit of per stream data to probe
if ( pes->probe_buf == NULL )
{
pes->probe_buf = hb_buffer_init( 0 );
}
if ( pes->probe_buf->size > HB_MAX_PROBE_SIZE )
{
pes->stream_kind = N;
hb_buffer_close( &pes->probe_buf );
return 1;
}
// Add this stream buffer to probe buffer and perform probe
AVInputFormat *fmt = NULL;
int score = 0;
AVProbeData pd = {0,};
int size = pes->probe_buf->size + buf->size;
hb_buffer_realloc(pes->probe_buf, size + AVPROBE_PADDING_SIZE );
memcpy( pes->probe_buf->data + pes->probe_buf->size, buf->data, buf->size );
pes->probe_buf->size = size;
if ( pes->codec == HB_ACODEC_DCA_HD )
{
// We need to probe for the profile of DTS audio in this stream.
return probe_dts_profile( pes );
}
// Probing is slow, so we don't want to re-probe the probe
// buffer for every packet we add to it. Grow the buffer
// by a factor of 2 before probing again.
if ( pes->probe_buf->size < pes->probe_next_size )
return 0;
pes->probe_next_size = pes->probe_buf->size * 2;
pd.buf = pes->probe_buf->data;
pd.buf_size = pes->probe_buf->size;
fmt = av_probe_input_format2( &pd, 1, &score );
if ( fmt && score > AVPROBE_SCORE_MAX / 2 )
{
AVCodec *codec = avcodec_find_decoder_by_name( fmt->name );
if( !codec )
{
int i;
static const struct
{
const char *name;
enum AVCodecID id;
}
fmt_id_type[] =
{
{ "g722" , AV_CODEC_ID_ADPCM_G722 },
{ "mlp" , AV_CODEC_ID_MLP },
{ "truehd" , AV_CODEC_ID_TRUEHD },
{ "shn" , AV_CODEC_ID_SHORTEN },
{ "aac" , AV_CODEC_ID_AAC },
{ "ac3" , AV_CODEC_ID_AC3 },
{ "dts" , AV_CODEC_ID_DTS },
{ "eac3" , AV_CODEC_ID_EAC3 },
{ "h264" , AV_CODEC_ID_H264 },
{ "m4v" , AV_CODEC_ID_MPEG4 },
{ "mp3" , AV_CODEC_ID_MP3 },
{ "mpegvideo", AV_CODEC_ID_MPEG2VIDEO },
{ "cavsvideo", AV_CODEC_ID_CAVS },
{ "dnxhd" , AV_CODEC_ID_DNXHD },
{ "h261" , AV_CODEC_ID_H261 },
{ "h263" , AV_CODEC_ID_H263 },
{ "mjpeg" , AV_CODEC_ID_MJPEG },
{ "vc1" , AV_CODEC_ID_VC1 },
{ 0 },
};
for( i = 0; fmt_id_type[i].name; i++ )
{
if( !strcmp(fmt->name, fmt_id_type[i].name ) )
{
codec = avcodec_find_decoder( fmt_id_type[i].id );
break;
}
}
}
if( codec )
{
pes->codec_param = codec->id;
if ( codec->type == AVMEDIA_TYPE_VIDEO )
{
pes->stream_kind = V;
switch ( codec->id )
{
case AV_CODEC_ID_MPEG1VIDEO:
pes->codec = WORK_DECAVCODECV;
pes->stream_type = 0x01;
break;
case AV_CODEC_ID_MPEG2VIDEO:
pes->codec = WORK_DECAVCODECV;
pes->stream_type = 0x02;
break;
case AV_CODEC_ID_H264:
pes->codec = WORK_DECAVCODECV;
pes->stream_type = 0x1b;
break;
case AV_CODEC_ID_VC1:
pes->codec = WORK_DECAVCODECV;
pes->stream_type = 0xea;
break;
default:
pes->codec = WORK_DECAVCODECV;
}
}
else if ( codec->type == AVMEDIA_TYPE_AUDIO )
{
pes->stream_kind = A;
switch ( codec->id )
{
case AV_CODEC_ID_AC3:
pes->codec = HB_ACODEC_AC3;
break;
default:
pes->codec = HB_ACODEC_FFMPEG;
}
}
else
{
pes->stream_kind = N;
}
strncpy(pes->codec_name, codec->name, 79);
pes->codec_name[79] = 0;
}
else
{
pes->stream_kind = N;
}
hb_buffer_close( &pes->probe_buf );
return 1;
}
return 0;
}
static void hb_ts_resolve_pid_types(hb_stream_t *stream)
{
int ii, probe = 0;
for ( ii = 0; ii < stream->ts.count; ii++ )
{
int pid = stream->ts.list[ii].pid;
int stype = ts_stream_type( stream, ii );
int pes_idx;
if ( stype == 0x80 &&
stream->reg_desc == STR4_TO_UINT32("HDMV") )
{
// LPCM audio in bluray have an stype of 0x80
// 0x80 is used for other DigiCipher normally
// To distinguish, Bluray streams have a reg_desc of HDMV
update_ts_streams( stream, pid, 0, stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_FFMPEG;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_PCM_BLURAY;
continue;
}
// The blu ray consortium apparently forgot to read the portion
// of the MPEG spec that says one PID should map to one media
// stream and multiplexed multiple types of audio into one PID
// using the extended stream identifier of the PES header to
// distinguish them. So we have to check if that's happening and
// if so tell the runtime what esid we want.
if ( stype == 0x83 &&
stream->reg_desc == STR4_TO_UINT32("HDMV") )
{
// This is an interleaved TrueHD/AC-3 stream and the esid of
// the AC-3 is 0x76
update_ts_streams( stream, pid, HB_SUBSTREAM_BD_AC3,
stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_AC3;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_AC3;
update_ts_streams( stream, pid, HB_SUBSTREAM_BD_TRUEHD,
stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_FFMPEG;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_TRUEHD;
continue;
}
if ( ( stype == 0x84 || stype == 0xa1 ) &&
stream->reg_desc == STR4_TO_UINT32("HDMV") )
{
// EAC3 audio in bluray has an stype of 0x84
// which conflicts with SDDS
// To distinguish, Bluray streams have a reg_desc of HDMV
update_ts_streams( stream, pid, 0, stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_FFMPEG;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_EAC3;
continue;
}
// 0xa2 is DTS-HD LBR used in HD-DVD and bluray for
// secondary audio streams. Libav can not decode yet.
// Having it in the audio list causes delays during scan
// while we try to get stream parameters. So skip
// this type for now.
if ( stype == 0x85 &&
stream->reg_desc == STR4_TO_UINT32("HDMV") )
{
// DTS-HD HRA audio in bluray has an stype of 0x85
// which conflicts with ATSC Program ID
// To distinguish, Bluray streams have a reg_desc of HDMV
// This is an interleaved DTS-HD HRA/DTS stream and the
// esid of the DTS is 0x71
update_ts_streams( stream, pid, HB_SUBSTREAM_BD_DTS,
stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_DCA;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_DTS;
update_ts_streams( stream, pid, 0, stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_DCA_HD;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_DTS;
continue;
}
if ( stype == 0x86 &&
stream->reg_desc == STR4_TO_UINT32("HDMV") )
{
// This is an interleaved DTS-HD MA/DTS stream and the
// esid of the DTS is 0x71
update_ts_streams( stream, pid, HB_SUBSTREAM_BD_DTS,
stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_DCA;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_DTS;
update_ts_streams( stream, pid, 0, stype, A, &pes_idx );
stream->pes.list[pes_idx].codec = HB_ACODEC_DCA_HD;
stream->pes.list[pes_idx].codec_param = AV_CODEC_ID_DTS;
continue;
}
// stype == 0 indicates a type not in st2codec table
if ( stype != 0 &&
( ts_stream_kind( stream, ii ) == A ||
ts_stream_kind( stream, ii ) == S ||
ts_stream_kind( stream, ii ) == V ) )
{
// Assuming there are no substreams.
// This should be true before probing.
// This function is only called before
// probing.
pes_idx = stream->ts.list[ii].pes_list;
stream->pes.list[pes_idx].codec = st2codec[stype].codec;
stream->pes.list[pes_idx].codec_param = st2codec[stype].codec_param;
continue;
}
if ( ts_stream_kind( stream, ii ) == U )
{
probe++;
}
}
// Probe remaining unknown streams for stream types
hb_stream_seek( stream, 0.0 );
stream->need_keyframe = 0;
int total_size = 0;
hb_buffer_t *buf;
if ( probe )
hb_log("Probing %d unknown stream%s", probe, probe > 1 ? "s" : "" );
while ( probe && ( buf = hb_ts_stream_decode( stream ) ) != NULL )
{
// Check upper limit of total data to probe
total_size += buf->size;
if ( total_size > HB_MAX_PROBE_SIZE * 2 )
{
hb_buffer_close(&buf);
break;
}
int idx;
idx = index_of_id( stream, buf->s.id );
if (idx < 0 || stream->pes.list[idx].stream_kind != U )
{
hb_buffer_close(&buf);
continue;
}
hb_pes_stream_t *pes = &stream->pes.list[idx];
if ( do_probe( pes, buf ) )
{
probe--;
if ( pes->stream_kind != N )
{
hb_log(" Probe: Found stream %s. stream id 0x%x-0x%x",
pes->codec_name, pes->stream_id, pes->stream_id_ext);
}
else
{
hb_log(" Probe: Unsupported stream %s. stream id 0x%x-0x%x",
pes->codec_name, pes->stream_id, pes->stream_id_ext);
}
}
hb_buffer_close(&buf);
}
// Clean up any probe buffers and set all remaining unknown
// streams to 'kind' N
for ( ii = 0; ii < stream->pes.count; ii++ )
{
if ( stream->pes.list[ii].stream_kind == U )
stream->pes.list[ii].stream_kind = N;
hb_buffer_close( &stream->pes.list[ii].probe_buf );
stream->pes.list[ii].probe_next_size = 0;
}
}
static void hb_ps_resolve_stream_types(hb_stream_t *stream)
{
int ii, probe = 0;
for ( ii = 0; ii < stream->pes.count; ii++ )
{
int stype = stream->pes.list[ii].stream_type;
// stype == 0 indicates a type not in st2codec table
if ( stype != 0 &&
( stream->pes.list[ii].stream_kind == A ||
stream->pes.list[ii].stream_kind == S ||
stream->pes.list[ii].stream_kind == V ) )
{
stream->pes.list[ii].codec = st2codec[stype].codec;
stream->pes.list[ii].codec_param = st2codec[stype].codec_param;
continue;
}
if ( stream->pes.list[ii].stream_kind == U )
{
probe++;
}
}
// Probe remaining unknown streams for stream types
hb_stream_seek( stream, 0.0 );
stream->need_keyframe = 0;
int total_size = 0;
hb_buffer_t *buf;
if ( probe )
hb_log("Probing %d unknown stream%s", probe, probe > 1 ? "s" : "" );
while ( probe && ( buf = hb_ps_stream_decode( stream ) ) != NULL )
{
// Check upper limit of total data to probe
total_size += buf->size;
if ( total_size > HB_MAX_PROBE_SIZE * 2 )
break;
int idx;
idx = index_of_id( stream, buf->s.id );
if (idx < 0 || stream->pes.list[idx].stream_kind != U )
continue;
hb_pes_stream_t *pes = &stream->pes.list[idx];
if ( do_probe( pes, buf ) )
{
probe--;
if ( pes->stream_kind != N )
{
hb_log(" Probe: Found stream %s. stream id 0x%x-0x%x",
pes->codec_name, pes->stream_id, pes->stream_id_ext);
}
else
{
hb_log(" Probe: Unsupported stream %s. stream id 0x%x-0x%x",
pes->codec_name, pes->stream_id, pes->stream_id_ext);
}
}
}
// Clean up any probe buffers and set all remaining unknown
// streams to 'kind' N
for ( ii = 0; ii < stream->pes.count; ii++ )
{
if ( stream->pes.list[ii].stream_kind == U )
stream->pes.list[ii].stream_kind = N;
hb_buffer_close( &stream->pes.list[ii].probe_buf );
stream->pes.list[ii].probe_next_size = 0;
}
}
static int hb_ts_stream_find_pids(hb_stream_t *stream)
{
// To be different from every other broadcaster in the world, New Zealand TV
// changes PMTs (and thus video & audio PIDs) when 'programs' change. Since
// we may have the tail of the previous program at the beginning of this
// file, take our PMT from the middle of the file.
fseeko(stream->file_handle, 0, SEEK_END);
uint64_t fsize = ftello(stream->file_handle);
fseeko(stream->file_handle, fsize >> 1, SEEK_SET);
align_to_next_packet(stream);
// Read the Transport Stream Packets (188 bytes each) looking at first for PID 0 (the PAT PID), then decode that
// to find the program map PID and then decode that to get the list of audio and video PIDs
for (;;)
{
const uint8_t *buf = next_packet( stream );
if ( buf == NULL )
{
hb_log("hb_ts_stream_find_pids - end of file");
break;
}
// Get pid
int pid = (((buf[1] & 0x1F) << 8) | buf[2]) & 0x1FFF;
if ((pid == 0x0000) && (stream->ts_number_pat_entries == 0))
{
decode_PAT(buf, stream);
continue;
}
int pat_index = 0;
for (pat_index = 0; pat_index < stream->ts_number_pat_entries; pat_index++)
{
// There are some streams where the PAT table has multiple
// entries as if their are multiple programs in the same
// transport stream, and yet there's actually only one
// program really in the stream. This seems to be true for
// transport streams that originate in the HDHomeRun but have
// been output by EyeTV's export utility. What I think is
// happening is that the HDHomeRun is sending the entire
// transport stream as broadcast, but the EyeTV is only
// recording a single (selected) program number and not
// rewriting the PAT info on export to match what's actually
// on the stream. Until we have a way of handling multiple
// programs per transport stream elegantly we'll match on the
// first pat entry for which we find a matching program map PID.
// The ideal solution would be to build a title choice popup
// from the PAT program number details and then select from
// their - but right now the API's not capable of that.
if (stream->pat_info[pat_index].program_number != 0 &&
pid == stream->pat_info[pat_index].program_map_PID)
{
if (build_program_map(buf, stream) > 0)
{
break;
}
}
}
// Keep going until we have a complete set of PIDs
if ( ts_index_of_video( stream ) >= 0 )
break;
}
if ( ts_index_of_video( stream ) < 0 )
return -1;
update_ts_streams( stream, stream->pmt_info.PCR_PID, 0, -1, P, NULL );
return 0;
}
// convert a PES PTS or DTS to an int64
static int64_t pes_timestamp( const uint8_t *buf )
{
int64_t ts;
ts = ( (uint64_t) ( buf[0] & 0x0e ) << 29 ) +
( buf[1] << 22 ) +
( ( buf[2] >> 1 ) << 15 ) +
( buf[3] << 7 ) +
( buf[4] >> 1 );
return ts;
}
static hb_buffer_t * generate_output_data(hb_stream_t *stream, int curstream)
{
hb_buffer_t *buf = NULL, *first = NULL;
hb_pes_info_t pes_info;
hb_buffer_t * b = stream->ts.list[curstream].buf;
if ( !hb_parse_ps( stream, b->data, b->size, &pes_info ) )
{
b->size = 0;
return NULL;
}
uint8_t *tdat = b->data + pes_info.header_len;
int size = b->size - pes_info.header_len;
if ( size <= 0 )
{
b->size = 0;
return NULL;
}
int pes_idx;
pes_idx = stream->ts.list[curstream].pes_list;
if( stream->need_keyframe )
{
// we're looking for the first video frame because we're
// doing random access during 'scan'
int kind = stream->pes.list[pes_idx].stream_kind;
if( kind != V || !isIframe( stream, tdat, size ) )
{
// not the video stream or didn't find an I frame
// but we'll only wait 255 video frames for an I frame.
if ( kind != V || ++stream->need_keyframe < 512 )
{
b->size = 0;
return NULL;
}
}
stream->need_keyframe = 0;
}
// Check all substreams to see if this packet matches
for ( pes_idx = stream->ts.list[curstream].pes_list; pes_idx != -1;
pes_idx = stream->pes.list[pes_idx].next )
{
if ( stream->pes.list[pes_idx].stream_id_ext != pes_info.stream_id_ext &&
stream->pes.list[pes_idx].stream_id_ext != 0 )
{
continue;
}
// The substreams match.
// Note that when stream->pes.list[pes_idx].stream_id_ext == 0,
// we want the whole TS stream including all substreams.
// DTS-HD is an example of this.
if ( first == NULL )
first = buf = hb_buffer_init( size );
else
{
hb_buffer_t *tmp = hb_buffer_init( size );
buf->next = tmp;
buf = tmp;
}
buf->s.id = get_id( &stream->pes.list[pes_idx] );
switch (stream->pes.list[pes_idx].stream_kind)
{
case A:
buf->s.type = AUDIO_BUF;
break;
case V:
buf->s.type = VIDEO_BUF;
break;
default:
buf->s.type = OTHER_BUF;
break;
}
if( b->sequence > stream->ts.pcr_out )
{
// we have a new pcr
stream->ts.pcr_out = b->sequence;
buf->s.pcr = b->s.pcr;
if( b->sequence >= stream->ts.pcr_discontinuity )
stream->ts.pcr_current = stream->ts.pcr_discontinuity;
}
else
{
buf->s.pcr = AV_NOPTS_VALUE;
}
// check if this packet was referenced to an older pcr and if that
// pcr was prior to a discontinuity.
if( b->sequence < stream->ts.pcr_current )
{
// we've sent up a new pcr but have a packet referenced to an
// old pcr and the difference was enough to trigger a discontinuity
// correction. smash the timestamps or we'll mess up the correction.
buf->s.start = AV_NOPTS_VALUE;
buf->s.renderOffset = AV_NOPTS_VALUE;
buf->s.stop = AV_NOPTS_VALUE;
buf->s.pcr = AV_NOPTS_VALUE;
}
else
{
// put the PTS & possible DTS into 'start' & 'renderOffset'
// then strip off the PES header.
buf->s.start = pes_info.pts;
buf->s.renderOffset = pes_info.dts;
}
memcpy( buf->data, tdat, size );
}
b->size = 0;
return first;
}
static void hb_ts_stream_append_pkt(hb_stream_t *stream, int idx, const uint8_t *buf, int len)
{
if (stream->ts.list[idx].buf->size + len > stream->ts.list[idx].buf->alloc)
{
int size;
size = MAX( stream->ts.list[idx].buf->alloc * 2,
stream->ts.list[idx].buf->size + len);
hb_buffer_realloc(stream->ts.list[idx].buf, size);
}
memcpy( stream->ts.list[idx].buf->data + stream->ts.list[idx].buf->size,
buf, len);
stream->ts.list[idx].buf->size += len;
}
/***********************************************************************
* hb_ts_stream_decode
***********************************************************************
*
**********************************************************************/
hb_buffer_t * hb_ts_decode_pkt( hb_stream_t *stream, const uint8_t * pkt )
{
/*
* stash the output buffer pointer in our stream so we don't have to
* pass it & its original value to everything we call.
*/
int video_index = ts_index_of_video(stream);
int curstream;
hb_buffer_t *buf;
/* This next section validates the packet */
// Get pid and use it to find stream state.
int pid = ((pkt[1] & 0x1F) << 8) | pkt[2];
if ( ( curstream = index_of_pid( stream, pid ) ) < 0 )
{
return NULL;
}
// Get error
int errorbit = (pkt[1] & 0x80) != 0;
if (errorbit)
{
ts_err( stream, curstream, "packet error bit set");
return NULL;
}
// Get adaption header info
int adaption = (pkt[3] & 0x30) >> 4;
int adapt_len = 0;
if (adaption == 0)
{
ts_err( stream, curstream, "adaptation code 0");
return NULL;
}
else if (adaption == 0x2)
adapt_len = 184;
else if (adaption == 0x3)
{
adapt_len = pkt[4] + 1;
if (adapt_len > 184)
{
ts_err( stream, curstream, "invalid adapt len %d", adapt_len);
return NULL;
}
}
if ( adapt_len > 0 )
{
if ( pkt[5] & 0x40 )
{
// found a random access point
}
// if there's an adaptation header & PCR_flag is set
// get the PCR (Program Clock Reference)
//
// JAS: I have a badly mastered BD that does adaptation field
// stuffing incorrectly which results in invalid PCRs. Test
// for all 0xff to guard against this.
if ( adapt_len > 7 && ( pkt[5] & 0x10 ) != 0 &&
!(pkt[5] == 0xff && pkt[6] == 0xff && pkt[7] == 0xff &&
pkt[8] == 0xff && pkt[9] == 0xff && pkt[10] == 0xff))
{
int64_t pcr;
pcr = ( (uint64_t)pkt[6] << (33 - 8) ) |
( (uint64_t)pkt[7] << (33 - 16) ) |
( (uint64_t)pkt[8] << (33 - 24) ) |
( (uint64_t)pkt[9] << (33 - 32) ) |
( pkt[10] >> 7 );
++stream->ts.pcr_in;
stream->ts.found_pcr = 1;
stream->ts_flags |= TS_HAS_PCR;
// Check for a pcr discontinuity.
// The reason for the uint cast on the pcr difference is that the
// difference is significant if it advanced by more than 200ms or
// if it went backwards by any amount. The negative numbers look
// like huge unsigned ints so the cast allows both conditions to
// be checked at once.
if ( (uint64_t)( pcr - stream->ts.pcr ) > 200*90LL )
{
stream->ts.pcr_discontinuity = stream->ts.pcr_in;
}
stream->ts.pcr = pcr;
}
}
// If we don't have a PCR yet but the stream has PCRs just loop
// so we don't process anything until we have a clock reference.
// Unfortunately the HD Home Run appears to null out the PCR so if
// we didn't detect a PCR during scan keep going and we'll use
// the video stream DTS for the PCR.
if ( !stream->ts.found_pcr && ( stream->ts_flags & TS_HAS_PCR ) )
{
return NULL;
}
// Get continuity
// Continuity only increments for adaption values of 0x3 or 0x01
// and is not checked for start packets.
int start = (pkt[1] & 0x40) != 0;
if ( (adaption & 0x01) != 0 )
{
int continuity = (pkt[3] & 0xF);
if ( continuity == stream->ts.list[curstream].continuity )
{
// Spliced transport streams can have duplicate
// continuity counts at the splice boundary.
// Test to see if the packet is really a duplicate
// by comparing packet summaries to see if they
// match.
uint8_t summary[8];
summary[0] = adaption;
summary[1] = adapt_len;
if (adapt_len + 4 + 6 + 9 <= 188)
{
memcpy(&summary[2], pkt+4+adapt_len+9, 6);
}
else
{
memset(&summary[2], 0, 6);
}
if ( memcmp( summary, stream->ts.list[curstream].pkt_summary, 8 ) == 0 )
{
// we got a duplicate packet (usually used to introduce
// a PCR when one is needed). The only thing that can
// change in the dup is the PCR which we grabbed above
// so ignore the rest.
return NULL;
}
}
if ( !start && (stream->ts.list[curstream].continuity != -1) &&
!stream->ts.list[curstream].skipbad &&
(continuity != ( (stream->ts.list[curstream].continuity + 1) & 0xf ) ) )
{
ts_err( stream, curstream, "continuity error: got %d expected %d",
(int)continuity,
(stream->ts.list[curstream].continuity + 1) & 0xf );
stream->ts.list[curstream].continuity = continuity;
return NULL;
}
stream->ts.list[curstream].continuity = continuity;
// Save a summary of this packet for later duplicate
// testing. The summary includes some header information
// and payload bytes. Should be enough to detect
// non-duplicates.
stream->ts.list[curstream].pkt_summary[0] = adaption;
stream->ts.list[curstream].pkt_summary[1] = adapt_len;
if (adapt_len + 4 + 6 + 9 <= 188)
{
memcpy(&stream->ts.list[curstream].pkt_summary[2],
pkt+4+adapt_len+9, 6);
}
else
{
memset(&stream->ts.list[curstream].pkt_summary[2], 0, 6);
}
}
if ( ts_stream_kind( stream, curstream ) == P )
{
// This is a stream that only contains PCRs. No need to process
// the remainder of the packet.
//
// I ran across a poorly mastered BD that does not properly pad
// the adaptation field and causes parsing errors below if we
// do not exit early here.
return NULL;
}
/* If we get here the packet is valid - process its data */
if ( start )
{
// Found a random access point or we have finished generating a PES
// and must start a new one.
// PES must begin with an mpeg start code
const uint8_t *pes = pkt + adapt_len + 4;
if ( pes[0] != 0x00 || pes[1] != 0x00 || pes[2] != 0x01 )
{
ts_err( stream, curstream, "missing start code" );
stream->ts.list[curstream].skipbad = 1;
return NULL;
}
// If we were skipping a bad packet, start fresh on this new PES packet
if (stream->ts.list[curstream].skipbad == 1)
{
stream->ts.list[curstream].skipbad = 0;
}
if ( curstream == video_index )
{
++stream->frames;
// if we don't have a pcr yet use the dts from this frame
// to attempt to detect discontinuities
if ( !stream->ts.found_pcr )
{
// PES must begin with an mpeg start code & contain
// a DTS or PTS.
const uint8_t *pes = pkt + adapt_len + 4;
if ( pes[0] != 0x00 || pes[1] != 0x00 || pes[2] != 0x01 ||
( pes[7] >> 6 ) == 0 )
{
return NULL;
}
// if we have a dts use it otherwise use the pts
int64_t timestamp;
timestamp = pes_timestamp( pes + ( pes[7] & 0x40?14:9 ) );
if( stream->ts.last_timestamp < 0 ||
timestamp - stream->ts.last_timestamp > 90 * 600 ||
stream->ts.last_timestamp - timestamp > 90 * 600 )
{
stream->ts.pcr = timestamp;
++stream->ts.pcr_in;
stream->ts.pcr_discontinuity = stream->ts.pcr_in;
}
stream->ts.last_timestamp = timestamp;
}
}
// If we have some data already on this stream, turn it into
// a program stream packet. Then add the payload for this
// packet to the current pid's buffer.
if ( stream->ts.list[curstream].buf->size )
{
// we have to ship the old packet before updating the pcr
// since the packet we've been accumulating is referenced
// to the old pcr.
buf = generate_output_data(stream, curstream);
if ( buf )
{
// Output data is ready.
// remember the pcr that was in effect when we started
// this packet.
stream->ts.list[curstream].buf->sequence = stream->ts.pcr_in;
stream->ts.list[curstream].buf->s.pcr = stream->ts.pcr;
hb_ts_stream_append_pkt(stream, curstream, pkt + 4 + adapt_len,
184 - adapt_len);
return buf;
}
}
// remember the pcr that was in effect when we started this packet.
stream->ts.list[curstream].buf->sequence = stream->ts.pcr_in;
stream->ts.list[curstream].buf->s.pcr = stream->ts.pcr;
}
// Add the payload for this packet to the current buffer
if (!stream->ts.list[curstream].skipbad && (184 - adapt_len) > 0)
{
hb_ts_stream_append_pkt(stream, curstream, pkt + 4 + adapt_len,
184 - adapt_len);
// see if we've hit the end of this PES packet
const uint8_t *pes = stream->ts.list[curstream].buf->data;
int len = ( pes[4] << 8 ) + pes[5] + 6;
if ( len > 6 && stream->ts.list[curstream].buf->size == len &&
pes[0] == 0x00 && pes[1] == 0x00 && pes[2] == 0x01 )
{
buf = generate_output_data(stream, curstream);
if ( buf )
return buf;
}
}
return NULL;
}
static hb_buffer_t * hb_ts_stream_decode( hb_stream_t *stream )
{
hb_buffer_t * b;
// spin until we get a packet of data from some stream or hit eof
while ( 1 )
{
const uint8_t *buf = next_packet(stream);
if ( buf == NULL )
{
// end of file - we didn't finish filling our ps write buffer
// so just discard the remainder (the partial buffer is useless)
hb_log("hb_ts_stream_decode - eof");
return NULL;
}
b = hb_ts_decode_pkt( stream, buf );
if ( b )
{
return b;
}
}
return NULL;
}
void hb_stream_set_need_keyframe(hb_stream_t *stream, int need_keyframe)
{
if ( stream->hb_stream_type == transport ||
stream->hb_stream_type == program )
{
// Only wait for a keyframe if the stream is known to have IDRs
stream->need_keyframe = !!need_keyframe & !!stream->has_IDRs;
}
else
{
stream->need_keyframe = need_keyframe;
}
}
void hb_ts_stream_reset(hb_stream_t *stream)
{
int i;
for (i=0; i < stream->ts.count; i++)
{
if ( stream->ts.list[i].buf )
stream->ts.list[i].buf->size = 0;
if ( stream->ts.list[i].extra_buf )
stream->ts.list[i].extra_buf->size = 0;
stream->ts.list[i].skipbad = 1;
stream->ts.list[i].continuity = -1;
}
stream->need_keyframe = 1;
stream->ts.found_pcr = 0;
stream->ts.pcr_out = 0;
stream->ts.pcr_in = 0;
stream->ts.pcr = AV_NOPTS_VALUE;
stream->ts.pcr_current = -1;
stream->ts.last_timestamp = AV_NOPTS_VALUE;
stream->frames = 0;
stream->errors = 0;
stream->last_error_frame = -10000;
stream->last_error_count = 0;
}
void hb_ps_stream_reset(hb_stream_t *stream)
{
stream->need_keyframe = 1;
stream->pes.found_scr = 0;
stream->pes.scr = AV_NOPTS_VALUE;
stream->frames = 0;
stream->errors = 0;
}
// ------------------------------------------------------------------
// Support for reading media files via the ffmpeg libraries.
static int ffmpeg_open( hb_stream_t *stream, hb_title_t *title, int scan )
{
AVFormatContext *info_ic = NULL;
av_log_set_level( AV_LOG_ERROR );
// Increase probe buffer size
// The default (5MB) is not big enough to successfully scan
// some files with large PNGs
AVDictionary * av_opts = NULL;
av_dict_set( &av_opts, "probesize", "15000000", 0 );
// FFMpeg has issues with seeking. After av_find_stream_info, the
// streams are left in an indeterminate position. So a seek is
// necessary to force things back to the beginning of the stream.
// But then the seek fails for some stream types. So the safest thing
// to do seems to be to open 2 AVFormatContext. One for probing info
// and the other for reading.
if ( avformat_open_input( &info_ic, stream->path, NULL, &av_opts ) < 0 )
{
return 0;
}
// libav populates av_opts with the things it didn't recognize.
AVDictionaryEntry *t = NULL;
while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX)) != NULL)
{
hb_log("ffmpeg_open: unknown option '%s'", t->key);
}
av_dict_free( &av_opts );
if ( avformat_find_stream_info( info_ic, NULL ) < 0 )
goto fail;
title->opaque_priv = (void*)info_ic;
stream->ffmpeg_ic = info_ic;
stream->hb_stream_type = ffmpeg;
stream->ffmpeg_pkt = malloc(sizeof(*stream->ffmpeg_pkt));
av_init_packet( stream->ffmpeg_pkt );
stream->chapter_end = INT64_MAX;
if ( !scan )
{
// we're opening for read. scan passed out codec params that
// indexed its stream so we need to remap them so they point
// to this stream.
stream->ffmpeg_video_id = title->video_id;
av_log_set_level( AV_LOG_ERROR );
}
else
{
// we're opening for scan. let ffmpeg put some info into the
// log about what we've got.
stream->ffmpeg_video_id = title->video_id;
av_log_set_level( AV_LOG_INFO );
av_dump_format( info_ic, 0, stream->path, 0 );
av_log_set_level( AV_LOG_ERROR );
// accept this file if it has at least one video stream we can decode
int i;
for (i = 0; i < info_ic->nb_streams; ++i )
{
if ( info_ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
{
break;
}
}
if ( i >= info_ic->nb_streams )
goto fail;
}
return 1;
fail:
if ( info_ic ) avformat_close_input( &info_ic );
return 0;
}
static void ffmpeg_close( hb_stream_t *d )
{
avformat_close_input( &d->ffmpeg_ic );
if ( d->ffmpeg_pkt != NULL )
{
free( d->ffmpeg_pkt );
d->ffmpeg_pkt = NULL;
}
}
static void add_ffmpeg_audio(hb_title_t *title, hb_stream_t *stream, int id)
{
AVStream *st = stream->ffmpeg_ic->streams[id];
AVCodecContext *codec = st->codec;
AVDictionaryEntry *tag = av_dict_get(st->metadata, "language", NULL, 0);
hb_audio_t *audio = calloc(1, sizeof(*audio));
audio->id = id;
audio->config.in.track = id;
audio->config.in.codec = HB_ACODEC_FFMPEG;
audio->config.in.codec_param = codec->codec_id;
// set the bitrate to 0; decavcodecaBSInfo will be called and fill the rest
audio->config.in.bitrate = 0;
// set the input codec and extradata for Passthru
switch (codec->codec_id)
{
case AV_CODEC_ID_AAC:
{
int len = MIN(codec->extradata_size, HB_CONFIG_MAX_SIZE);
memcpy(audio->priv.config.extradata.bytes, codec->extradata, len);
audio->priv.config.extradata.length = len;
audio->config.in.codec = HB_ACODEC_FFAAC;
} break;
case AV_CODEC_ID_AC3:
audio->config.in.codec = HB_ACODEC_AC3;
break;
case AV_CODEC_ID_DTS:
{
switch (codec->profile)
{
case FF_PROFILE_DTS:
case FF_PROFILE_DTS_ES:
case FF_PROFILE_DTS_96_24:
audio->config.in.codec = HB_ACODEC_DCA;
break;
case FF_PROFILE_DTS_HD_MA:
case FF_PROFILE_DTS_HD_HRA:
audio->config.in.codec = HB_ACODEC_DCA_HD;
break;
default:
break;
}
} break;
case AV_CODEC_ID_MP3:
audio->config.in.codec = HB_ACODEC_MP3;
break;
default:
break;
}
set_audio_description(audio,
lang_for_code2(tag != NULL ? tag->value : "und"));
hb_list_add(title->list_audio, audio);
}
/*
* Format:
* MkvVobSubtitlePrivateData = ( Line )*
* Line = FieldName ':' ' ' FieldValue '\n'
* FieldName = [^:]+
* FieldValue = [^\n]+
*
* The line of interest is:
* PaletteLine = "palette" ':' ' ' RRGGBB ( ',' ' ' RRGGBB )*
*
* More information on the format at:
* http://www.matroska.org/technical/specs/subtitles/images.html
*/
static int ffmpeg_parse_vobsub_extradata_mkv( AVCodecContext *codec, hb_subtitle_t *subtitle )
{
// lines = (string) codec->extradata;
char *lines = malloc( codec->extradata_size + 1 );
if ( lines == NULL )
return 1;
memcpy( lines, codec->extradata, codec->extradata_size );
lines[codec->extradata_size] = '\0';
uint32_t rgb[16];
int gotPalette = 0;
int gotDimensions = 0;
char *curLine, *curLine_parserData;
for ( curLine = strtok_r( lines, "\n", &curLine_parserData );
curLine;
curLine = strtok_r( NULL, "\n", &curLine_parserData ) )
{
if (!gotPalette)
{
int numElementsRead = sscanf(curLine, "palette: "
"%06x, %06x, %06x, %06x, "
"%06x, %06x, %06x, %06x, "
"%06x, %06x, %06x, %06x, "
"%06x, %06x, %06x, %06x",
&rgb[0], &rgb[1], &rgb[2], &rgb[3],
&rgb[4], &rgb[5], &rgb[6], &rgb[7],
&rgb[8], &rgb[9], &rgb[10], &rgb[11],
&rgb[12], &rgb[13], &rgb[14], &rgb[15]);
if (numElementsRead == 16) {
gotPalette = 1;
}
}
if (!gotDimensions)
{
int numElementsRead = sscanf(curLine, "size: %dx%d",
&subtitle->width, &subtitle->height);
if (numElementsRead == 2) {
gotDimensions = 1;
}
}
if (gotPalette && gotDimensions)
break;
}
if (subtitle->width == 0 || subtitle->height == 0)
{
subtitle->width = 720;
subtitle->height = 480;
}
free( lines );
if ( gotPalette )
{
int i;
for (i=0; i<16; i++)
subtitle->palette[i] = hb_rgb2yuv(rgb[i]);
subtitle->palette_set = 1;
return 0;
}
else
{
return 1;
}
}
/*
* Format: 8-bit {0,Y,Cb,Cr} x 16
*/
static int ffmpeg_parse_vobsub_extradata_mp4( AVCodecContext *codec, hb_subtitle_t *subtitle )
{
if ( codec->extradata_size != 4*16 )
return 1;
int i, j;
for ( i=0, j=0; i<16; i++, j+=4 )
{
subtitle->palette[i] =
codec->extradata[j+1] << 16 | // Y
codec->extradata[j+2] << 8 | // Cb
codec->extradata[j+3] << 0; // Cr
subtitle->palette_set = 1;
}
if (codec->width <= 0 || codec->height <= 0)
{
subtitle->width = 720;
subtitle->height = 480;
}
else
{
subtitle->width = codec->width;
subtitle->height = codec->height;
}
return 0;
}
/*
* Parses the 'subtitle->palette' information from the specific VOB subtitle track's private data.
* Returns 0 if successful or 1 if parsing failed or was incomplete.
*/
static int ffmpeg_parse_vobsub_extradata( AVCodecContext *codec, hb_subtitle_t *subtitle )
{
// XXX: Better if we actually chose the correct parser based on the input container
return
ffmpeg_parse_vobsub_extradata_mkv( codec, subtitle ) &&
ffmpeg_parse_vobsub_extradata_mp4( codec, subtitle );
}
static void add_ffmpeg_subtitle( hb_title_t *title, hb_stream_t *stream, int id )
{
AVStream *st = stream->ffmpeg_ic->streams[id];
AVCodecContext *codec = st->codec;
hb_subtitle_t *subtitle = calloc( 1, sizeof(*subtitle) );
subtitle->id = id;
switch ( codec->codec_id )
{
case AV_CODEC_ID_DVD_SUBTITLE:
subtitle->format = PICTURESUB;
subtitle->source = VOBSUB;
subtitle->config.dest = RENDERSUB; // By default render (burn-in) the VOBSUB.
subtitle->codec = WORK_DECVOBSUB;
if ( ffmpeg_parse_vobsub_extradata( codec, subtitle ) )
hb_log( "add_ffmpeg_subtitle: malformed extradata for VOB subtitle track; "
"subtitle colors likely to be wrong" );
break;
case AV_CODEC_ID_TEXT:
subtitle->format = TEXTSUB;
subtitle->source = UTF8SUB;
subtitle->config.dest = PASSTHRUSUB;
subtitle->codec = WORK_DECUTF8SUB;
break;
case AV_CODEC_ID_MOV_TEXT: // TX3G
subtitle->format = TEXTSUB;
subtitle->source = TX3GSUB;
subtitle->config.dest = PASSTHRUSUB;
subtitle->codec = WORK_DECTX3GSUB;
break;
case AV_CODEC_ID_SSA:
subtitle->format = TEXTSUB;
subtitle->source = SSASUB;
subtitle->config.dest = PASSTHRUSUB;
subtitle->codec = WORK_DECSSASUB;
break;
case AV_CODEC_ID_HDMV_PGS_SUBTITLE:
subtitle->format = PICTURESUB;
subtitle->source = PGSSUB;
subtitle->config.dest = RENDERSUB;
subtitle->codec = WORK_DECPGSSUB;
break;
default:
hb_log( "add_ffmpeg_subtitle: unknown subtitle stream type: 0x%x", (int) codec->codec_id );
free(subtitle);
return;
}
AVDictionaryEntry *tag;
iso639_lang_t *language;
tag = av_dict_get( st->metadata, "language", NULL, 0 );
language = lang_for_code2( tag ? tag->value : "und" );
strcpy( subtitle->lang, language->eng_name );
strncpy( subtitle->iso639_2, language->iso639_2, 4 );
// Copy the extradata for the subtitle track
if (codec->extradata != NULL)
{
subtitle->extradata = malloc( codec->extradata_size );
memcpy( subtitle->extradata, codec->extradata, codec->extradata_size );
subtitle->extradata_size = codec->extradata_size;
}
subtitle->track = id;
hb_list_add(title->list_subtitle, subtitle);
}
static char *get_ffmpeg_metadata_value( AVDictionary *m, char *key )
{
AVDictionaryEntry *tag = NULL;
while ( (tag = av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX)) )
{
if ( !strcmp( key, tag->key ) )
{
return tag->value;
}
}
return NULL;
}
static void add_ffmpeg_attachment( hb_title_t *title, hb_stream_t *stream, int id )
{
AVStream *st = stream->ffmpeg_ic->streams[id];
AVCodecContext *codec = st->codec;
enum attachtype type;
const char *name = get_ffmpeg_metadata_value( st->metadata, "filename" );
switch ( codec->codec_id )
{
case AV_CODEC_ID_TTF:
// Libav sets codec ID based on mime type of the attachment
type = FONT_TTF_ATTACH;
break;
default:
{
int len = name ? strlen( name ) : 0;
if( len >= 4 &&
( !strcmp( name + len - 4, ".ttc" ) ||
!strcmp( name + len - 4, ".TTC" ) ||
!strcmp( name + len - 4, ".ttf" ) ||
!strcmp( name + len - 4, ".TTF" ) ) )
{
// Some attachments don't have the right mime type.
// So also trigger on file name extension.
type = FONT_TTF_ATTACH;
break;
}
// Ignore unrecognized attachment type
return;
}
}
hb_attachment_t *attachment = calloc( 1, sizeof(*attachment) );
// Copy the attachment name and data
attachment->type = type;
attachment->name = strdup( name );
attachment->data = malloc( codec->extradata_size );
memcpy( attachment->data, codec->extradata, codec->extradata_size );
attachment->size = codec->extradata_size;
hb_list_add(title->list_attachment, attachment);
}
static int ffmpeg_decmetadata( AVDictionary *m, hb_title_t *title )
{
int result = 0;
AVDictionaryEntry *tag = NULL;
while ( (tag = av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX)) )
{
if ( !strcasecmp( "TITLE", tag->key ) )
{
hb_metadata_set_name(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "ARTIST", tag->key ) )
{
hb_metadata_set_artist(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "DIRECTOR", tag->key ) ||
!strcasecmp( "album_artist", tag->key ) )
{
hb_metadata_set_album_artist(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "COMPOSER", tag->key ) )
{
hb_metadata_set_composer(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "DATE_RELEASED", tag->key ) ||
!strcasecmp( "date", tag->key ) )
{
hb_metadata_set_release_date(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "SUMMARY", tag->key ) ||
!strcasecmp( "comment", tag->key ) )
{
hb_metadata_set_comment(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "GENRE", tag->key ) )
{
hb_metadata_set_genre(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "DESCRIPTION", tag->key ) )
{
hb_metadata_set_description(title->metadata, tag->value);
result = 1;
}
else if ( !strcasecmp( "SYNOPSIS", tag->key ) )
{
hb_metadata_set_long_description(title->metadata, tag->value);
result = 1;
}
}
return result;
}
static hb_title_t *ffmpeg_title_scan( hb_stream_t *stream, hb_title_t *title )
{
AVFormatContext *ic = stream->ffmpeg_ic;
// 'Barebones Title'
title->type = HB_FF_STREAM_TYPE;
title->index = 1;
// Copy part of the stream path to the title name
char *sep = hb_strr_dir_sep(stream->path);
if (sep)
strcpy(title->name, sep+1);
char *dot_term = strrchr(title->name, '.');
if (dot_term)
*dot_term = '\0';
uint64_t dur = ic->duration * 90000 / AV_TIME_BASE;
title->duration = dur;
dur /= 90000;
title->hours = dur / 3600;
title->minutes = ( dur % 3600 ) / 60;
title->seconds = dur % 60;
// set the title to decode the first video stream in the file
title->demuxer = HB_NULL_DEMUXER;
title->video_codec = 0;
int i;
int pix_fmt = -1;
for (i = 0; i < ic->nb_streams; ++i )
{
if ( ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
!(ic->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) &&
avcodec_find_decoder( ic->streams[i]->codec->codec_id ) &&
title->video_codec == 0 )
{
AVCodecContext *context = ic->streams[i]->codec;
pix_fmt = context->pix_fmt;
if ( context->pix_fmt != AV_PIX_FMT_YUV420P &&
!sws_isSupportedInput( context->pix_fmt ) )
{
hb_log( "ffmpeg_title_scan: Unsupported color space" );
continue;
}
title->video_id = i;
stream->ffmpeg_video_id = i;
if ( ic->streams[i]->sample_aspect_ratio.num &&
ic->streams[i]->sample_aspect_ratio.den )
{
title->pixel_aspect_width = ic->streams[i]->sample_aspect_ratio.num;
title->pixel_aspect_height = ic->streams[i]->sample_aspect_ratio.den;
}
title->video_codec = WORK_DECAVCODECV;
title->video_codec_param = context->codec_id;
}
else if ( ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
avcodec_find_decoder( ic->streams[i]->codec->codec_id ) )
{
add_ffmpeg_audio( title, stream, i );
}
else if ( ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_SUBTITLE )
{
add_ffmpeg_subtitle( title, stream, i );
}
else if ( ic->streams[i]->codec->codec_type == AVMEDIA_TYPE_ATTACHMENT )
{
add_ffmpeg_attachment( title, stream, i );
}
}
title->container_name = strdup( ic->iformat->name );
title->data_rate = ic->bit_rate;
hb_deep_log( 2, "Found ffmpeg %d chapters, container=%s", ic->nb_chapters, ic->iformat->name );
if( ic->nb_chapters != 0 )
{
AVChapter *m;
uint64_t duration_sum = 0;
for( i = 0; i < ic->nb_chapters; i++ )
if( ( m = ic->chapters[i] ) != NULL )
{
AVDictionaryEntry *tag;
hb_chapter_t * chapter;
chapter = calloc( sizeof( hb_chapter_t ), 1 );
chapter->index = i+1;
chapter->duration = ( m->end / ( (double) m->time_base.num * m->time_base.den ) ) * 90000 - duration_sum;
duration_sum += chapter->duration;
int seconds = ( chapter->duration + 45000 ) / 90000;
chapter->hours = ( seconds / 3600 );
chapter->minutes = ( seconds % 3600 ) / 60;
chapter->seconds = ( seconds % 60 );
tag = av_dict_get( m->metadata, "title", NULL, 0 );
/* Ignore generic chapter names set by MakeMKV
* ("Chapter 00" etc.).
* Our default chapter names are better. */
if( tag && tag->value &&
( strncmp( "Chapter ", tag->value, 8 ) ||
strlen( tag->value ) > 11 ) )
{
hb_chapter_set_title( chapter, tag->value );
}
else
{
char chapter_title[80];
sprintf( chapter_title, "Chapter %d", chapter->index );
hb_chapter_set_title( chapter, chapter_title );
}
hb_deep_log( 2, "Added chapter %i, name='%s', dur=%"PRIu64", (%02i:%02i:%02i)",
chapter->index, chapter->title, chapter->duration,
chapter->hours, chapter->minutes, chapter->seconds );
hb_list_add( title->list_chapter, chapter );
}
}
/*
* Fill the metadata.
*/
ffmpeg_decmetadata( ic->metadata, title );
if( hb_list_count( title->list_chapter ) == 0 )
{
// Need at least one chapter
hb_chapter_t * chapter;
chapter = calloc( sizeof( hb_chapter_t ), 1 );
chapter->index = 1;
chapter->duration = title->duration;
chapter->hours = title->hours;
chapter->minutes = title->minutes;
chapter->seconds = title->seconds;
hb_list_add( title->list_chapter, chapter );
}
#ifdef USE_HWD
hb_va_dxva2_t * dxva2 = NULL;
dxva2 = hb_va_create_dxva2( dxva2, title->video_codec_param );
if (dxva2)
{
title->hwd_support = 1;
hb_va_close(dxva2);
dxva2 = NULL;
}
else
title->hwd_support = 0;
if ( hb_check_hwd_fmt(pix_fmt) == 0)
title->hwd_support = 0;
#else
// Eliminate compiler warning "pix_fmt set but not used"
(void)pix_fmt;
title->hwd_support = 0;
#endif
return title;
}
static int64_t av_to_hb_pts( int64_t pts, double conv_factor )
{
if ( pts == AV_NOPTS_VALUE )
return AV_NOPTS_VALUE;
return (int64_t)( (double)pts * conv_factor );
}
static int ffmpeg_is_keyframe( hb_stream_t *stream )
{
uint8_t *pkt;
switch ( stream->ffmpeg_ic->streams[stream->ffmpeg_video_id]->codec->codec_id )
{
case AV_CODEC_ID_VC1:
// XXX the VC1 codec doesn't mark key frames so to get previews
// we do it ourselves here. The decoder gets messed up if it
// doesn't get a SEQ header first so we consider that to be a key frame.
pkt = stream->ffmpeg_pkt->data;
if ( !pkt[0] && !pkt[1] && pkt[2] == 1 && pkt[3] == 0x0f )
return 1;
return 0;
case AV_CODEC_ID_WMV3:
// XXX the ffmpeg WMV3 codec doesn't mark key frames.
// Only M$ could make I-frame detection this complicated: there
// are two to four bits of unused junk ahead of the frame type
// so we have to look at the sequence header to find out how much
// to skip. Then there are three different ways of coding the type
// depending on whether it's main or advanced profile then whether
// there are bframes or not so we have to look at the sequence
// header to get that.
pkt = stream->ffmpeg_pkt->data;
uint8_t *seqhdr = stream->ffmpeg_ic->streams[stream->ffmpeg_video_id]->codec->extradata;
int pshift = 2;
if ( ( seqhdr[3] & 0x02 ) == 0 )
// no FINTERPFLAG
++pshift;
if ( ( seqhdr[3] & 0x80 ) == 0 )
// no RANGEREDUCTION
++pshift;
if ( seqhdr[3] & 0x70 )
// stream has b-frames
return ( ( pkt[0] >> pshift ) & 0x3 ) == 0x01;
return ( ( pkt[0] >> pshift ) & 0x2 ) == 0;
default:
break;
}
return ( stream->ffmpeg_pkt->flags & AV_PKT_FLAG_KEY );
}
hb_buffer_t * hb_ffmpeg_read( hb_stream_t *stream )
{
int err;
hb_buffer_t * buf;
again:
if ( ( err = av_read_frame( stream->ffmpeg_ic, stream->ffmpeg_pkt )) < 0 )
{
// av_read_frame can return EAGAIN. In this case, it expects
// to be called again to get more data.
if ( err == AVERROR(EAGAIN) )
{
goto again;
}
// XXX the following conditional is to handle avi files that
// use M$ 'packed b-frames' and occasionally have negative
// sizes for the null frames these require.
if ( err != AVERROR(ENOMEM) || stream->ffmpeg_pkt->size >= 0 )
// eof
return NULL;
}
if ( stream->ffmpeg_pkt->stream_index == stream->ffmpeg_video_id )
{
if ( stream->need_keyframe )
{
// we've just done a seek (generally for scan or live preview) and
// want to start at a keyframe. Some ffmpeg codecs seek to a key
// frame but most don't. So we spin until we either get a keyframe
// or we've looked through 50 video frames without finding one.
if ( ! ffmpeg_is_keyframe( stream ) && ++stream->need_keyframe < 50 )
{
av_free_packet( stream->ffmpeg_pkt );
goto again;
}
stream->need_keyframe = 0;
}
++stream->frames;
}
if ( stream->ffmpeg_pkt->size <= 0 )
{
// M$ "invalid and inefficient" packed b-frames require 'null frames'
// following them to preserve the timing (since the packing puts two
// or more frames in what looks like one avi frame). The contents and
// size of these null frames are ignored by the ff_h263_decode_frame
// as long as they're < 20 bytes. We need a positive size so we use
// one byte if we're given a zero or negative size. We don't know
// if the pkt data points anywhere reasonable so we just stick a
// byte of zero in our outbound buf.
buf = hb_buffer_init( 1 );
*buf->data = 0;
}
else
{
// sometimes we get absurd sizes from ffmpeg
if ( stream->ffmpeg_pkt->size >= (1 << 25) )
{
hb_log( "ffmpeg_read: pkt too big: %d bytes", stream->ffmpeg_pkt->size );
av_free_packet( stream->ffmpeg_pkt );
return hb_ffmpeg_read( stream );
}
buf = hb_buffer_init( stream->ffmpeg_pkt->size );
memcpy( buf->data, stream->ffmpeg_pkt->data, stream->ffmpeg_pkt->size );
const uint8_t *palette;
int size;
palette = av_packet_get_side_data(stream->ffmpeg_pkt,
AV_PKT_DATA_PALETTE, &size);
if (palette != NULL)
{
buf->palette = hb_buffer_init( size );
memcpy( buf->palette->data, palette, size );
}
}
buf->s.id = stream->ffmpeg_pkt->stream_index;
// compute a conversion factor to go from the ffmpeg
// timebase for the stream to HB's 90kHz timebase.
AVStream *s = stream->ffmpeg_ic->streams[stream->ffmpeg_pkt->stream_index];
double tsconv = 90000. * (double)s->time_base.num / (double)s->time_base.den;
buf->s.start = av_to_hb_pts( stream->ffmpeg_pkt->pts, tsconv );
buf->s.renderOffset = av_to_hb_pts( stream->ffmpeg_pkt->dts, tsconv );
if ( buf->s.renderOffset >= 0 && buf->s.start == AV_NOPTS_VALUE )
{
buf->s.start = buf->s.renderOffset;
}
else if ( buf->s.renderOffset == AV_NOPTS_VALUE && buf->s.start >= 0 )
{
buf->s.renderOffset = buf->s.start;
}
/*
* Fill out buf->s.stop for subtitle packets
*
* libavcodec's MKV demuxer stores the duration of UTF-8 subtitles (AV_CODEC_ID_TEXT)
* in the 'convergence_duration' field for some reason.
*
* Other subtitles' durations are stored in the 'duration' field.
*
* VOB subtitles (AV_CODEC_ID_DVD_SUBTITLE) do not have their duration stored in
* either field. This is not a problem because the VOB decoder can extract this
* information from the packet payload itself.
*
* SSA subtitles (AV_CODEC_ID_SSA) do not have their duration stored in
* either field. This is not a problem because the SSA decoder can extract this
* information from the packet payload itself.
*/
enum AVCodecID ffmpeg_pkt_codec;
enum AVMediaType codec_type;
ffmpeg_pkt_codec = stream->ffmpeg_ic->streams[stream->ffmpeg_pkt->stream_index]->codec->codec_id;
codec_type = stream->ffmpeg_ic->streams[stream->ffmpeg_pkt->stream_index]->codec->codec_type;
switch ( codec_type )
{
case AVMEDIA_TYPE_VIDEO:
buf->s.type = VIDEO_BUF;
/*
* libav avcodec_decode_video2() needs AVPacket flagged with AV_PKT_FLAG_KEY
* for some codecs. For example, sequence of PNG in a mov container.
*/
if ( stream->ffmpeg_pkt->flags & AV_PKT_FLAG_KEY )
{
buf->s.frametype |= HB_FRAME_KEY;
}
break;
case AVMEDIA_TYPE_AUDIO:
buf->s.type = AUDIO_BUF;
break;
case AVMEDIA_TYPE_SUBTITLE:
buf->s.type = SUBTITLE_BUF;
break;
default:
buf->s.type = OTHER_BUF;
break;
}
if ( ffmpeg_pkt_codec == AV_CODEC_ID_TEXT ) {
int64_t ffmpeg_pkt_duration = stream->ffmpeg_pkt->convergence_duration;
int64_t buf_duration = av_to_hb_pts( ffmpeg_pkt_duration, tsconv );
buf->s.stop = buf->s.start + buf_duration;
}
if ( ffmpeg_pkt_codec == AV_CODEC_ID_MOV_TEXT ) {
int64_t ffmpeg_pkt_duration = stream->ffmpeg_pkt->duration;
int64_t buf_duration = av_to_hb_pts( ffmpeg_pkt_duration, tsconv );
buf->s.stop = buf->s.start + buf_duration;
}
/*
* Check to see whether this buffer is on a chapter
* boundary, if so mark it as such in the buffer then advance
* chapter_end to the end of the next chapter.
* If there are no chapters, chapter_end is always initialized to INT64_MAX
* (roughly 3 million years at our 90KHz clock rate) so the test
* below handles both the chapters & no chapters case.
*/
if ( stream->ffmpeg_pkt->stream_index == stream->ffmpeg_video_id &&
buf->s.start >= stream->chapter_end )
{
hb_chapter_t *chapter = hb_list_item( stream->title->list_chapter,
stream->chapter+1 );
if( chapter )
{
stream->chapter++;
stream->chapter_end += chapter->duration;
buf->s.new_chap = stream->chapter + 1;
hb_deep_log( 2, "ffmpeg_read starting chapter %i at %"PRId64,
stream->chapter + 1, buf->s.start);
} else {
// Must have run out of chapters, stop looking.
stream->chapter_end = INT64_MAX;
buf->s.new_chap = 0;
}
} else {
buf->s.new_chap = 0;
}
av_free_packet( stream->ffmpeg_pkt );
return buf;
}
static int ffmpeg_seek( hb_stream_t *stream, float frac )
{
AVFormatContext *ic = stream->ffmpeg_ic;
int res;
if ( frac > 0. )
{
int64_t pos = (double)stream->ffmpeg_ic->duration * (double)frac +
ffmpeg_initial_timestamp( stream );
res = avformat_seek_file( ic, -1, 0, pos, pos, AVSEEK_FLAG_BACKWARD);
if (res < 0)
{
hb_error("avformat_seek_file failed");
}
}
else
{
int64_t pos = ffmpeg_initial_timestamp( stream );
res = avformat_seek_file( ic, -1, 0, pos, pos, AVSEEK_FLAG_BACKWARD);
if (res < 0)
{
hb_error("avformat_seek_file failed");
}
}
stream->need_keyframe = 1;
return 1;
}
// Assumes that we are always seeking forward
static int ffmpeg_seek_ts( hb_stream_t *stream, int64_t ts )
{
AVFormatContext *ic = stream->ffmpeg_ic;
int64_t pos;
int ret;
pos = ts * AV_TIME_BASE / 90000 + ffmpeg_initial_timestamp( stream );
AVStream *st = stream->ffmpeg_ic->streams[stream->ffmpeg_video_id];
// timebase must be adjusted to match timebase of stream we are
// using for seeking.
pos = av_rescale(pos, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
stream->need_keyframe = 1;
// Seek to the nearest timestamp before that requested where
// there is an I-frame
ret = avformat_seek_file( ic, stream->ffmpeg_video_id, 0, pos, pos, 0);
return ret;
}
HandBrake-0.10.2/libhb/internal.h 0000664 0001752 0001752 00000041063 12464460407 017117 0 ustar handbrake handbrake /* internal.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hbffmpeg.h"
#include "extras/cl.h"
/***********************************************************************
* common.c
**********************************************************************/
void hb_log( char * log, ... ) HB_WPRINTF(1,2);
extern int global_verbosity_level; // Global variable for hb_deep_log
typedef enum hb_debug_level_s
{
HB_SUPPORT_LOG = 1, // helpful in tech support
HB_HOUSEKEEPING_LOG = 2, // stuff we hate scrolling through
HB_GRANULAR_LOG = 3 // sample-by-sample
} hb_debug_level_t;
void hb_valog( hb_debug_level_t level, const char * prefix, const char * log, va_list args) HB_WPRINTF(3,0);
void hb_deep_log( hb_debug_level_t level, char * log, ... ) HB_WPRINTF(2,3);
void hb_error( char * fmt, ...) HB_WPRINTF(1,2);
void hb_hexdump( hb_debug_level_t level, const char * label, const uint8_t * data, int len );
int hb_list_bytes( hb_list_t * );
void hb_list_seebytes( hb_list_t * l, uint8_t * dst, int size );
void hb_list_getbytes( hb_list_t * l, uint8_t * dst, int size,
uint64_t * pts, uint64_t * pos );
void hb_list_empty( hb_list_t ** );
hb_title_t * hb_title_init( char * dvd, int index );
void hb_title_close( hb_title_t ** );
/***********************************************************************
* hb.c
**********************************************************************/
int hb_get_pid( hb_handle_t * );
void hb_set_state( hb_handle_t *, hb_state_t * );
/***********************************************************************
* fifo.c
**********************************************************************/
/*
* Holds a packet of data that is moving through the transcoding process.
*
* May have metadata associated with it via extra fields
* that are conditionally used depending on the type of packet.
*/
struct hb_buffer_settings_s
{
enum { AUDIO_BUF, VIDEO_BUF, SUBTITLE_BUF, FRAME_BUF, OTHER_BUF } type;
int id; // ID of the track that the packet comes from
int64_t start; // start time of frame
double duration; // Actual duration, may be fractional ticks
int64_t stop; // stop time of frame
int64_t renderOffset; // DTS used by b-frame offsets in muxmp4
int64_t pcr;
uint8_t discontinuity;
int new_chap; // Video packets: if non-zero, is the index of the chapter whose boundary was crossed
#define HB_FRAME_IDR 0x01
#define HB_FRAME_I 0x02
#define HB_FRAME_AUDIO 0x04
#define HB_FRAME_SUBTITLE 0x08
#define HB_FRAME_P 0x10
#define HB_FRAME_B 0x20
#define HB_FRAME_BREF 0x40
#define HB_FRAME_KEY 0x0F
#define HB_FRAME_REF 0xF0
uint8_t frametype;
uint16_t flags;
};
struct hb_image_format_s
{
int x;
int y;
int width;
int height;
int fmt;
};
struct hb_buffer_s
{
int size; // size of this packet
int alloc; // used internally by the packet allocator (hb_buffer_init)
uint8_t * data; // packet data
int offset; // used internally by packet lists (hb_list_t)
/*
* Corresponds to the order that this packet was read from the demuxer.
*
* It is important that video decoder work-objects pass this value through
* from their input packets to the output packets they generate. Otherwise
* RENDERSUB subtitles (especially VOB subtitles) will break.
*
* Subtitle decoder work-objects that output a renderable subtitle
* format (ex: PICTURESUB) must also be careful to pass the sequence number
* through for the same reason.
*/
int64_t sequence;
hb_buffer_settings_t s;
hb_image_format_t f;
struct buffer_plane
{
uint8_t * data;
int stride;
int width;
int height;
int height_stride;
int size;
} plane[4]; // 3 Color components + alpha
struct qsv
{
void *qsv_atom;
void *filter_details;
} qsv_details;
/* OpenCL */
struct cl_data
{
cl_mem buffer;
cl_event last_event;
enum { HOST, DEVICE } buffer_location;
} cl;
// libav may attach AV_PKT_DATA_PALETTE side data to some AVPackets
// Store this data here when read and pass to decoder.
hb_buffer_t * palette;
// PICTURESUB subtitle packets:
// Video packets (after processing by the hb_sync_video work-object):
// A (copy of a) PICTURESUB subtitle packet that needs to be burned into
// this video packet by the vobsub renderer filter
//
// Subtitles that are simply passed thru are NOT attached to the
// associated video packets.
hb_buffer_t * sub;
// Packets in a list:
// the next packet in the list
hb_buffer_t * next;
};
void hb_buffer_pool_init( void );
void hb_buffer_pool_free( void );
hb_buffer_t * hb_buffer_init( int size );
hb_buffer_t * hb_frame_buffer_init( int pix_fmt, int w, int h);
void hb_buffer_init_planes( hb_buffer_t * b );
void hb_buffer_realloc( hb_buffer_t *, int size );
void hb_video_buffer_realloc( hb_buffer_t * b, int w, int h );
void hb_buffer_reduce( hb_buffer_t * b, int size );
void hb_buffer_close( hb_buffer_t ** );
hb_buffer_t * hb_buffer_dup( const hb_buffer_t * src );
int hb_buffer_copy( hb_buffer_t * dst, const hb_buffer_t * src );
void hb_buffer_swap_copy( hb_buffer_t *src, hb_buffer_t *dst );
void hb_buffer_move_subs( hb_buffer_t * dst, hb_buffer_t * src );
hb_image_t * hb_buffer_to_image(hb_buffer_t *buf);
hb_fifo_t * hb_fifo_init( int capacity, int thresh );
int hb_fifo_size( hb_fifo_t * );
int hb_fifo_size_bytes( hb_fifo_t * );
int hb_fifo_is_full( hb_fifo_t * );
float hb_fifo_percent_full( hb_fifo_t * f );
hb_buffer_t * hb_fifo_get( hb_fifo_t * );
hb_buffer_t * hb_fifo_get_wait( hb_fifo_t * );
hb_buffer_t * hb_fifo_see( hb_fifo_t * );
hb_buffer_t * hb_fifo_see_wait( hb_fifo_t * );
hb_buffer_t * hb_fifo_see2( hb_fifo_t * );
void hb_fifo_push( hb_fifo_t *, hb_buffer_t * );
void hb_fifo_push_wait( hb_fifo_t *, hb_buffer_t * );
int hb_fifo_full_wait( hb_fifo_t * f );
void hb_fifo_push_head( hb_fifo_t *, hb_buffer_t * );
void hb_fifo_push_list_element( hb_fifo_t *fifo, hb_buffer_t *buffer_list );
hb_buffer_t * hb_fifo_get_list_element( hb_fifo_t *fifo );
void hb_fifo_close( hb_fifo_t ** );
void hb_fifo_flush( hb_fifo_t * f );
static inline int hb_image_stride( int pix_fmt, int width, int plane )
{
int linesize = av_image_get_linesize( pix_fmt, width, plane );
// Make buffer SIMD friendly.
// Decomb requires stride aligned to 32 bytes
// TODO: eliminate extra buffer copies in decomb
linesize = MULTIPLE_MOD_UP( linesize, 32 );
return linesize;
}
static inline int hb_image_width(int pix_fmt, int width, int plane)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
if (desc != NULL && (plane == 1 || plane == 2))
{
// The wacky arithmatic assures rounding up.
width = -((-width) >> desc->log2_chroma_w);
}
return width;
}
static inline int hb_image_height_stride(int pix_fmt, int height, int plane)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
// Decomb requires 6 extra lines and stride aligned to 32 bytes
height = MULTIPLE_MOD_UP(height + 6, 32);
if (desc != NULL && (plane == 1 || plane == 2))
{
height = height >> desc->log2_chroma_h;
}
return height;
}
static inline int hb_image_height(int pix_fmt, int height, int plane)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
if (desc != NULL && (plane == 1 || plane == 2))
{
// The wacky arithmatic assures rounding up.
height = -((-height) >> desc->log2_chroma_h);
}
return height;
}
// this routine gets a buffer for an uncompressed YUV420 video frame
// with dimensions width x height.
static inline hb_buffer_t * hb_video_buffer_init( int width, int height )
{
return hb_frame_buffer_init( AV_PIX_FMT_YUV420P, width, height );
}
/***********************************************************************
* Threads: update.c, scan.c, work.c, reader.c, muxcommon.c
**********************************************************************/
hb_thread_t * hb_update_init( int * build, char * version );
hb_thread_t * hb_scan_init( hb_handle_t *, volatile int * die,
const char * path, int title_index,
hb_title_set_t * title_set, int preview_count,
int store_previews, uint64_t min_duration );
hb_thread_t * hb_work_init( hb_list_t * jobs,
volatile int * die, hb_error_code * error, hb_job_t ** job );
void ReadLoop( void * _w );
hb_work_object_t * hb_muxer_init( hb_job_t * );
hb_work_object_t * hb_get_work( int );
hb_work_object_t * hb_codec_decoder( int );
hb_work_object_t * hb_codec_encoder( int );
/***********************************************************************
* sync.c
**********************************************************************/
hb_work_object_t * hb_sync_init( hb_job_t * job );
/***********************************************************************
* mpegdemux.c
**********************************************************************/
typedef struct {
int64_t last_scr; /* unadjusted SCR from most recent pack */
int64_t scr_delta;
int64_t last_pts; /* last pts we saw */
int scr_changes; /* number of SCR discontinuities */
int dts_drops; /* number of drops because DTS too far from SCR */
int new_chap;
} hb_psdemux_t;
typedef void (*hb_muxer_t)(hb_buffer_t *, hb_list_t *, hb_psdemux_t*);
void hb_demux_ps( hb_buffer_t * ps_buf, hb_list_t * es_list, hb_psdemux_t * );
void hb_demux_ts( hb_buffer_t * ps_buf, hb_list_t * es_list, hb_psdemux_t * );
void hb_demux_null( hb_buffer_t * ps_buf, hb_list_t * es_list, hb_psdemux_t * );
extern const hb_muxer_t hb_demux[];
/***********************************************************************
* batch.c
**********************************************************************/
typedef struct hb_batch_s hb_batch_t;
hb_batch_t * hb_batch_init( char * path );
void hb_batch_close( hb_batch_t ** _d );
int hb_batch_title_count( hb_batch_t * d );
hb_title_t * hb_batch_title_scan( hb_batch_t * d, int t );
/***********************************************************************
* dvd.c
**********************************************************************/
typedef struct hb_bd_s hb_bd_t;
typedef union hb_dvd_s hb_dvd_t;
typedef struct hb_stream_s hb_stream_t;
hb_dvd_t * hb_dvd_init( char * path );
int hb_dvd_title_count( hb_dvd_t * );
hb_title_t * hb_dvd_title_scan( hb_dvd_t *, int title, uint64_t min_duration );
int hb_dvd_start( hb_dvd_t *, hb_title_t *title, int chapter );
void hb_dvd_stop( hb_dvd_t * );
int hb_dvd_seek( hb_dvd_t *, float );
hb_buffer_t * hb_dvd_read( hb_dvd_t * );
int hb_dvd_chapter( hb_dvd_t * );
int hb_dvd_is_break( hb_dvd_t * d );
void hb_dvd_close( hb_dvd_t ** );
int hb_dvd_angle_count( hb_dvd_t * d );
void hb_dvd_set_angle( hb_dvd_t * d, int angle );
int hb_dvd_main_feature( hb_dvd_t * d, hb_list_t * list_title );
hb_bd_t * hb_bd_init( char * path );
int hb_bd_title_count( hb_bd_t * d );
hb_title_t * hb_bd_title_scan( hb_bd_t * d, int t, uint64_t min_duration );
int hb_bd_start( hb_bd_t * d, hb_title_t *title );
void hb_bd_stop( hb_bd_t * d );
int hb_bd_seek( hb_bd_t * d, float f );
int hb_bd_seek_pts( hb_bd_t * d, uint64_t pts );
int hb_bd_seek_chapter( hb_bd_t * d, int chapter );
hb_buffer_t * hb_bd_read( hb_bd_t * d );
int hb_bd_chapter( hb_bd_t * d );
void hb_bd_close( hb_bd_t ** _d );
void hb_bd_set_angle( hb_bd_t * d, int angle );
int hb_bd_main_feature( hb_bd_t * d, hb_list_t * list_title );
hb_stream_t * hb_bd_stream_open( hb_title_t *title );
void hb_ts_stream_reset(hb_stream_t *stream);
hb_stream_t * hb_stream_open( char * path, hb_title_t *title, int scan );
void hb_stream_close( hb_stream_t ** );
hb_title_t * hb_stream_title_scan( hb_stream_t *, hb_title_t *);
hb_buffer_t * hb_stream_read( hb_stream_t * );
int hb_stream_seek( hb_stream_t *, float );
int hb_stream_seek_ts( hb_stream_t * stream, int64_t ts );
int hb_stream_seek_chapter( hb_stream_t *, int );
int hb_stream_chapter( hb_stream_t * );
hb_buffer_t * hb_ts_decode_pkt( hb_stream_t *stream, const uint8_t * pkt );
void hb_stream_set_need_keyframe( hb_stream_t *stream, int need_keyframe );
#define STR4_TO_UINT32(p) \
((((const uint8_t*)(p))[0] << 24) | \
(((const uint8_t*)(p))[1] << 16) | \
(((const uint8_t*)(p))[2] << 8) | \
((const uint8_t*)(p))[3])
/***********************************************************************
* Work objects
**********************************************************************/
#define HB_CONFIG_MAX_SIZE (2*8192)
union hb_esconfig_u
{
struct
{
uint8_t bytes[HB_CONFIG_MAX_SIZE];
int length;
} mpeg4;
struct
{
uint8_t sps[HB_CONFIG_MAX_SIZE];
int sps_length;
uint8_t pps[HB_CONFIG_MAX_SIZE];
int pps_length;
int init_delay;
} h264;
struct
{
uint8_t headers[HB_CONFIG_MAX_SIZE];
int headers_length;
} h265;
struct
{
uint8_t headers[3][HB_CONFIG_MAX_SIZE];
} theora;
struct
{
uint8_t bytes[HB_CONFIG_MAX_SIZE];
int length;
} extradata;
struct
{
uint8_t headers[3][HB_CONFIG_MAX_SIZE];
char *language;
} vorbis;
};
enum
{
WORK_NONE = 0,
WORK_SYNC_VIDEO,
WORK_SYNC_AUDIO,
WORK_DECCC608,
WORK_DECVOBSUB,
WORK_DECSRTSUB,
WORK_DECUTF8SUB,
WORK_DECTX3GSUB,
WORK_DECSSASUB,
WORK_ENCVOBSUB,
WORK_RENDER,
WORK_ENCAVCODEC,
WORK_ENCQSV,
WORK_ENCX264,
WORK_ENCX265,
WORK_ENCTHEORA,
WORK_DECAVCODEC,
WORK_DECAVCODECV,
WORK_DECLPCM,
WORK_ENCLAME,
WORK_ENCVORBIS,
WORK_ENC_CA_AAC,
WORK_ENC_CA_HAAC,
WORK_ENCAVCODEC_AUDIO,
WORK_MUX,
WORK_READER,
WORK_DECPGSSUB
};
extern hb_filter_object_t hb_filter_detelecine;
extern hb_filter_object_t hb_filter_deinterlace;
extern hb_filter_object_t hb_filter_deblock;
extern hb_filter_object_t hb_filter_denoise;
extern hb_filter_object_t hb_filter_nlmeans;
extern hb_filter_object_t hb_filter_decomb;
extern hb_filter_object_t hb_filter_rotate;
extern hb_filter_object_t hb_filter_crop_scale;
extern hb_filter_object_t hb_filter_render_sub;
extern hb_filter_object_t hb_filter_vfr;
#ifdef USE_QSV
extern hb_filter_object_t hb_filter_qsv;
extern hb_filter_object_t hb_filter_qsv_pre;
extern hb_filter_object_t hb_filter_qsv_post;
#endif
// Picture flags used by filters
#ifndef PIC_FLAG_REPEAT_FIRST_FIELD
#define PIC_FLAG_REPEAT_FIRST_FIELD 256
#endif
#ifndef PIC_FLAG_TOP_FIELD_FIRST
#define PIC_FLAG_TOP_FIELD_FIRST 8
#endif
#ifndef PIC_FLAG_PROGRESSIVE_FRAME
#define PIC_FLAG_PROGRESSIVE_FRAME 16
#endif
#define PIC_FLAG_REPEAT_FRAME 512
extern hb_work_object_t * hb_objects;
#define HB_WORK_IDLE 0
#define HB_WORK_OK 1
#define HB_WORK_ERROR 2
#define HB_WORK_DONE 3
/***********************************************************************
* Muxers
**********************************************************************/
typedef struct hb_mux_object_s hb_mux_object_t;
typedef struct hb_mux_data_s hb_mux_data_t;
#define HB_MUX_COMMON \
int (*init) ( hb_mux_object_t * ); \
int (*mux) ( hb_mux_object_t *, hb_mux_data_t *, \
hb_buffer_t * ); \
int (*end) ( hb_mux_object_t * );
#define DECLARE_MUX( a ) \
hb_mux_object_t * hb_mux_##a##_init( hb_job_t * );
DECLARE_MUX( mp4 );
DECLARE_MUX( mkv );
DECLARE_MUX( avformat );
void hb_muxmp4_process_subtitle_style( uint8_t *input,
uint8_t *output,
uint8_t *style, uint16_t *stylesize );
void hb_deinterlace(hb_buffer_t *dst, hb_buffer_t *src);
HandBrake-0.10.2/libhb/oclnv12toyuv.c 0000664 0001752 0001752 00000024064 12463330511 017662 0 ustar handbrake handbrake /* oclnv12toyuv.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifdef USE_HWD
#include "opencl.h"
#include "vadxva2.h"
#include "oclnv12toyuv.h"
/**
* It creates are opencl bufs w is input frame width, h is input frame height
*/
static int hb_nv12toyuv_create_cl_buf( KernelEnv *kenv, int w, int h, hb_va_dxva2_t *dxva2 );
/**
* It creates are opencl kernel. kernel name is nv12toyuv
*/
static int hb_nv12toyuv_create_cl_kernel( KernelEnv *kenv, hb_va_dxva2_t *dxva2 );
/**
* It set opencl arg, input data,output data, input width, output height
*/
static int hb_nv12toyuv_setkernelarg( KernelEnv *kenv, int w, int h, hb_va_dxva2_t *dxva2 );
/**
* It initialize nv12 to yuv kernel.
*/
static int hb_init_nv12toyuv_ocl( KernelEnv *kenv, int w, int h, hb_va_dxva2_t *dxva2 );
/**
* Run nv12 to yuv kernel.
*/
static int hb_nv12toyuv( void **userdata, KernelEnv *kenv );
/**
* register nv12 to yuv kernel.
*/
static int hb_nv12toyuv_reg_kernel( void );
/**
* It creates are opencl bufs w is input frame width, h is input frame height
*/
static int hb_nv12toyuv_create_cl_buf( KernelEnv *kenv, int w, int h, hb_va_dxva2_t *dxva2 )
{
if (hb_ocl == NULL)
{
hb_error("hb_nv12toyuv_create_cl_kernel: OpenCL support not available");
return 1;
}
cl_int status = CL_SUCCESS;
int in_bytes = w*h*3/2;
HB_OCL_BUF_CREATE(hb_ocl, dxva2->cl_mem_nv12, CL_MEM_READ_ONLY|CL_MEM_ALLOC_HOST_PTR, in_bytes);
HB_OCL_BUF_CREATE(hb_ocl, dxva2->cl_mem_yuv, CL_MEM_READ_WRITE|CL_MEM_ALLOC_HOST_PTR, in_bytes);
return 0;
}
/**
* It creates are opencl kernel. kernel name is nv12toyuv
*/
static int hb_nv12toyuv_create_cl_kernel( KernelEnv *kenv, hb_va_dxva2_t *dxva2 )
{
if (hb_ocl == NULL)
{
hb_error("hb_nv12toyuv_create_cl_kernel: OpenCL support not available");
return 1;
}
int ret;
dxva2->nv12toyuv = hb_ocl->clCreateKernel(kenv->program, "nv12toyuv", &ret);
return ret;
}
/**
* It set opencl arg, input data,output data, input width, output height
*/
static int hb_nv12toyuv_setkernelarg( KernelEnv *kenv, int w, int h, hb_va_dxva2_t *dxva2 )
{
int arg = 0, status;
kenv->kernel = dxva2->nv12toyuv;
if (hb_ocl == NULL)
{
hb_error("hb_nv12toyuv_setkernelarg: OpenCL support not available");
return 1;
}
HB_OCL_CHECK(hb_ocl->clSetKernelArg, kenv->kernel, arg++, sizeof(cl_mem), &dxva2->cl_mem_nv12);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, kenv->kernel, arg++, sizeof(cl_mem), &dxva2->cl_mem_yuv);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, kenv->kernel, arg++, sizeof(int), &w);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, kenv->kernel, arg++, sizeof(int), &h);
return 0;
}
/**
* It initialize nv12 to yuv kernel.
*/
static int hb_init_nv12toyuv_ocl( KernelEnv *kenv, int w, int h, hb_va_dxva2_t *dxva2 )
{
if( !dxva2->nv12toyuv )
{
if( hb_nv12toyuv_create_cl_buf( kenv, w, h, dxva2 ) )
{
hb_log( "OpenCL: nv12toyuv_create_cl_buf fail" );
return -1;
}
if (!dxva2->nv12toyuv_tmp_in)
{
dxva2->nv12toyuv_tmp_in = malloc (w*h*3/2);
}
if (!dxva2->nv12toyuv_tmp_out)
{
dxva2->nv12toyuv_tmp_out = malloc (w*h*3/2);
}
hb_nv12toyuv_create_cl_kernel( kenv, dxva2 );
}
return 0;
}
/**
* copy_plane
* @param dst -
* @param src -
* @param dstride -
* @param sstride -
* @param h -
*/
static uint8_t *copy_plane( uint8_t *dst, uint8_t* src, int dstride, int sstride,
int h )
{
if ( dstride == sstride )
{
memcpy( dst, src, dstride * h );
return dst + dstride * h;
}
int lbytes = dstride <= sstride? dstride : sstride;
while ( --h >= 0 )
{
memcpy( dst, src, lbytes );
src += sstride;
dst += dstride;
}
return dst;
}
/**
* Run nv12 to yuv kernel.
*/
static int hb_nv12toyuv( void **userdata, KernelEnv *kenv )
{
int status;
int w = (int)userdata[0];
int h = (int)userdata[1];
uint8_t *bufi1 = userdata[2];
int *crop = userdata[3];
hb_va_dxva2_t *dxva2 = userdata[4];
uint8_t *bufi2 = userdata[5];
int p = (int)userdata[6];
int decomb = (int)userdata[7];
int detelecine = (int)userdata[8];
int i;
if( hb_init_nv12toyuv_ocl( kenv, w, h, dxva2 ) )
{
return -1;
}
if( hb_nv12toyuv_setkernelarg( kenv, w, h, dxva2 ) )
{
return -1;
}
if (hb_ocl == NULL)
{
hb_error("hb_nv12toyuv: OpenCL support not available");
return -1;
}
int in_bytes = w*h*3/2;
if( kenv->isAMD )
{
void *data = hb_ocl->clEnqueueMapBuffer(kenv->command_queue,
dxva2->cl_mem_nv12,
CL_MAP_WRITE_INVALIDATE_REGION,
CL_TRUE, 0, in_bytes, 0, NULL, NULL, NULL);
for ( i = 0; i < dxva2->height; i++ )
{
memcpy( data + i * dxva2->width, bufi1 + i * p, dxva2->width );
if ( i < dxva2->height >> 1 )
{
memcpy( data + ( dxva2->width * dxva2->height ) + i * dxva2->width, bufi2 + i * p, dxva2->width );
}
}
hb_ocl->clEnqueueUnmapMemObject(kenv->command_queue, dxva2->cl_mem_nv12,
data, 0, NULL, NULL);
}
else
{
uint8_t *tmp = (uint8_t*)malloc( dxva2->width * dxva2->height * 3 / 2 );
for( i = 0; i < dxva2->height; i++ )
{
memcpy( tmp + i * dxva2->width, bufi1 + i * p, dxva2->width );
if( i < dxva2->height >> 1 )
{
memcpy( tmp + (dxva2->width * dxva2->height) + i * dxva2->width, bufi2 + i * p, dxva2->width );
}
}
HB_OCL_CHECK(hb_ocl->clEnqueueWriteBuffer, kenv->command_queue,
dxva2->cl_mem_nv12, CL_TRUE, 0, in_bytes, tmp, 0, NULL, NULL);
free( tmp );
}
size_t gdim[2] = {w>>1, h>>1};
HB_OCL_CHECK(hb_ocl->clEnqueueNDRangeKernel, kenv->command_queue,
kenv->kernel, 2, NULL, gdim, NULL, 0, NULL, NULL );
if( (crop[0] || crop[1] || crop[2] || crop[3]) && (decomb == 0) && (detelecine == 0) )
{
AVPicture pic_in;
AVPicture pic_crop;
hb_ocl->clEnqueueReadBuffer(kenv->command_queue, dxva2->cl_mem_yuv,
CL_TRUE, 0, in_bytes, dxva2->nv12toyuv_tmp_out,
0, NULL, NULL);
hb_buffer_t *in = hb_video_buffer_init( w, h );
int wmp = in->plane[0].stride;
int hmp = in->plane[0].height;
copy_plane( in->plane[0].data, dxva2->nv12toyuv_tmp_out, wmp, w, hmp );
wmp = in->plane[1].stride;
hmp = in->plane[1].height;
copy_plane( in->plane[1].data, dxva2->nv12toyuv_tmp_out + w * h, wmp, w>>1, hmp );
wmp = in->plane[2].stride;
hmp = in->plane[2].height;
copy_plane( in->plane[2].data, dxva2->nv12toyuv_tmp_out + w * h +( ( w * h )>>2 ), wmp, w>>1, hmp );
hb_avpicture_fill( &pic_in, in );
av_picture_crop( &pic_crop, &pic_in, in->f.fmt, crop[0], crop[2] );
int i, ww = w - ( crop[2] + crop[3] ), hh = h - ( crop[0] + crop[1] );
for( i = 0; i< hh >> 1; i++ )
{
memcpy( dxva2->nv12toyuv_tmp_in + ( ( i << 1 ) + 0 ) * ww, pic_crop.data[0]+ ( ( i << 1 ) + 0 ) * pic_crop.linesize[0], ww );
memcpy( dxva2->nv12toyuv_tmp_in + ( ( i << 1 ) + 1 ) * ww, pic_crop.data[0]+ ( ( i << 1 ) + 1 ) * pic_crop.linesize[0], ww );
memcpy( dxva2->nv12toyuv_tmp_in + ( ww * hh ) + i * ( ww >> 1 ), pic_crop.data[1] + i * pic_crop.linesize[1], ww >> 1 );
memcpy( dxva2->nv12toyuv_tmp_in + ( ww * hh ) + ( ( ww * hh )>>2 ) + i * ( ww >> 1 ), pic_crop.data[2] + i * pic_crop.linesize[2], ww >> 1 );
}
if( kenv->isAMD )
{
void *data = hb_ocl->clEnqueueMapBuffer(kenv->command_queue,
dxva2->cl_mem_yuv,
CL_MAP_WRITE_INVALIDATE_REGION,
CL_TRUE, 0, ww * hh * 3 / 2, 0,
NULL, NULL, NULL);
memcpy( data, dxva2->nv12toyuv_tmp_in, ww * hh * 3 / 2 );
hb_ocl->clEnqueueUnmapMemObject(kenv->command_queue,
dxva2->cl_mem_yuv, data, 0, NULL, NULL);
}
else
{
HB_OCL_CHECK(hb_ocl->clEnqueueWriteBuffer, kenv->command_queue,
dxva2->cl_mem_yuv, CL_TRUE, 0, in_bytes,
dxva2->nv12toyuv_tmp_in, 0, NULL, NULL);
}
hb_buffer_close( &in );
}
return 0;
}
/**
* register nv12 to yuv kernel.
*/
static int hb_nv12toyuv_reg_kernel( void )
{
int st = hb_register_kernel_wrapper( "nv12toyuv", hb_nv12toyuv );
if( !st )
{
hb_log( "OpenCL: register kernel[%s] failed", "nv12toyuv" );
return -1;
}
return 0;
}
/**
* nv12 to yuv interface
* bufi is input frame of nv12, w is input frame width, h is input frame height
*/
int hb_ocl_nv12toyuv( uint8_t *bufi[], int p, int w, int h, int *crop, hb_va_dxva2_t *dxva2, int decomb, int detelecine )
{
void *userdata[9];
userdata[0] = (void*)w;
userdata[1] = (void*)h;
userdata[2] = bufi[0];
userdata[3] = crop;
userdata[4] = dxva2;
userdata[5] = bufi[1];
userdata[6] = (void*)p;
userdata[7] = decomb;
userdata[8] = detelecine;
if( hb_nv12toyuv_reg_kernel() )
{
return -1;
}
if( hb_run_kernel( "nv12toyuv", userdata ) )
{
hb_log( "OpenCL: run kernel[nv12toyuv] failed" );
return -1;
}
return 0;
}
#endif // USE_HWD
HandBrake-0.10.2/libhb/lang.c 0000664 0001752 0001752 00000021511 12463330511 016202 0 ustar handbrake handbrake /* lang.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "lang.h"
#include
#include
static const iso639_lang_t languages[] =
{ { "Unknown", "", "", "und" },
{ "Afar", "", "aa", "aar" },
{ "Abkhazian", "", "ab", "abk" },
{ "Afrikaans", "", "af", "afr" },
{ "Akan", "", "ak", "aka" },
{ "Albanian", "", "sq", "sqi", "alb" },
{ "Amharic", "", "am", "amh" },
{ "Arabic", "", "ar", "ara" },
{ "Aragonese", "", "an", "arg" },
{ "Armenian", "", "hy", "hye", "arm" },
{ "Assamese", "", "as", "asm" },
{ "Avaric", "", "av", "ava" },
{ "Avestan", "", "ae", "ave" },
{ "Aymara", "", "ay", "aym" },
{ "Azerbaijani", "", "az", "aze" },
{ "Bashkir", "", "ba", "bak" },
{ "Bambara", "", "bm", "bam" },
{ "Basque", "", "eu", "eus", "baq" },
{ "Belarusian", "", "be", "bel" },
{ "Bengali", "", "bn", "ben" },
{ "Bihari", "", "bh", "bih" },
{ "Bislama", "", "bi", "bis" },
{ "Bosnian", "", "bs", "bos" },
{ "Breton", "", "br", "bre" },
{ "Bulgarian", "", "bg", "bul" },
{ "Burmese", "", "my", "mya", "bur" },
{ "Catalan", "", "ca", "cat" },
{ "Chamorro", "", "ch", "cha" },
{ "Chechen", "", "ce", "che" },
{ "Chinese", "", "zh", "zho", "chi" },
{ "Church Slavic", "", "cu", "chu" },
{ "Chuvash", "", "cv", "chv" },
{ "Cornish", "", "kw", "cor" },
{ "Corsican", "", "co", "cos" },
{ "Cree", "", "cr", "cre" },
{ "Czech", "", "cs", "ces", "cze" },
{ "Danish", "Dansk", "da", "dan" },
{ "Divehi", "", "dv", "div" },
{ "Dutch", "Nederlands", "nl", "nld", "dut" },
{ "Dzongkha", "", "dz", "dzo" },
{ "English", "English", "en", "eng" },
{ "Esperanto", "", "eo", "epo" },
{ "Estonian", "", "et", "est" },
{ "Ewe", "", "ee", "ewe" },
{ "Faroese", "", "fo", "fao" },
{ "Fijian", "", "fj", "fij" },
{ "Finnish", "Suomi", "fi", "fin" },
{ "French", "Francais", "fr", "fra", "fre" },
{ "Western Frisian", "", "fy", "fry" },
{ "Fulah", "", "ff", "ful" },
{ "Georgian", "", "ka", "kat", "geo" },
{ "German", "Deutsch", "de", "deu", "ger" },
{ "Gaelic (Scots)", "", "gd", "gla" },
{ "Irish", "", "ga", "gle" },
{ "Galician", "", "gl", "glg" },
{ "Manx", "", "gv", "glv" },
{ "Greek, Modern", "", "el", "ell", "gre" },
{ "Guarani", "", "gn", "grn" },
{ "Gujarati", "", "gu", "guj" },
{ "Haitian", "", "ht", "hat" },
{ "Hausa", "", "ha", "hau" },
{ "Hebrew", "", "he", "heb" },
{ "Herero", "", "hz", "her" },
{ "Hindi", "", "hi", "hin" },
{ "Hiri Motu", "", "ho", "hmo" },
{ "Hungarian", "Magyar", "hu", "hun" },
{ "Igbo", "", "ig", "ibo" },
{ "Icelandic", "Islenska", "is", "isl", "ice" },
{ "Ido", "", "io", "ido" },
{ "Sichuan Yi", "", "ii", "iii" },
{ "Inuktitut", "", "iu", "iku" },
{ "Interlingue", "", "ie", "ile" },
{ "Interlingua", "", "ia", "ina" },
{ "Indonesian", "", "id", "ind" },
{ "Inupiaq", "", "ik", "ipk" },
{ "Italian", "Italiano", "it", "ita" },
{ "Javanese", "", "jv", "jav" },
{ "Japanese", "", "ja", "jpn" },
{ "Kalaallisut (Greenlandic)", "", "kl", "kal" },
{ "Kannada", "", "kn", "kan" },
{ "Kashmiri", "", "ks", "kas" },
{ "Kanuri", "", "kr", "kau" },
{ "Kazakh", "", "kk", "kaz" },
{ "Central Khmer", "", "km", "khm" },
{ "Kikuyu", "", "ki", "kik" },
{ "Kinyarwanda", "", "rw", "kin" },
{ "Kirghiz", "", "ky", "kir" },
{ "Komi", "", "kv", "kom" },
{ "Kongo", "", "kg", "kon" },
{ "Korean", "", "ko", "kor" },
{ "Kuanyama", "", "kj", "kua" },
{ "Kurdish", "", "ku", "kur" },
{ "Lao", "", "lo", "lao" },
{ "Latin", "", "la", "lat" },
{ "Latvian", "", "lv", "lav" },
{ "Limburgan", "", "li", "lim" },
{ "Lingala", "", "ln", "lin" },
{ "Lithuanian", "", "lt", "lit" },
{ "Luxembourgish", "", "lb", "ltz" },
{ "Luba-Katanga", "", "lu", "lub" },
{ "Ganda", "", "lg", "lug" },
{ "Macedonian", "", "mk", "mkd", "mac" },
{ "Marshallese", "", "mh", "mah" },
{ "Malayalam", "", "ml", "mal" },
{ "Maori", "", "mi", "mri", "mao" },
{ "Marathi", "", "mr", "mar" },
{ "Malay", "", "ms", "msa", "msa" },
{ "Malagasy", "", "mg", "mlg" },
{ "Maltese", "", "mt", "mlt" },
{ "Moldavian", "", "mo", "mol" },
{ "Mongolian", "", "mn", "mon" },
{ "Nauru", "", "na", "nau" },
{ "Navajo", "", "nv", "nav" },
{ "Ndebele, South", "", "nr", "nbl" },
{ "Ndebele, North", "", "nd", "nde" },
{ "Ndonga", "", "ng", "ndo" },
{ "Nepali", "", "ne", "nep" },
{ "Norwegian Nynorsk", "", "nn", "nno" },
{ "Norwegian Bokmål", "", "nb", "nob" },
{ "Norwegian", "Norsk", "no", "nor" },
{ "Chichewa; Nyanja", "", "ny", "nya" },
{ "Occitan (post 1500); Provençal", "", "oc", "oci" },
{ "Ojibwa", "", "oj", "oji" },
{ "Oriya", "", "or", "ori" },
{ "Oromo", "", "om", "orm" },
{ "Ossetian; Ossetic", "", "os", "oss" },
{ "Panjabi", "", "pa", "pan" },
{ "Persian", "", "fa", "fas", "per" },
{ "Pali", "", "pi", "pli" },
{ "Polish", "", "pl", "pol" },
{ "Portuguese", "Portugues", "pt", "por" },
{ "Pushto", "", "ps", "pus" },
{ "Quechua", "", "qu", "que" },
{ "Romansh", "", "rm", "roh" },
{ "Romanian", "", "ro", "ron", "rum" },
{ "Rundi", "", "rn", "run" },
{ "Russian", "", "ru", "rus" },
{ "Sango", "", "sg", "sag" },
{ "Sanskrit", "", "sa", "san" },
{ "Serbian", "", "sr", "srp", "scc" },
{ "Croatian", "Hrvatski", "hr", "hrv", "scr" },
{ "Sinhala", "", "si", "sin" },
{ "Slovak", "", "sk", "slk", "slo" },
{ "Slovenian", "", "sl", "slv" },
{ "Northern Sami", "", "se", "sme" },
{ "Samoan", "", "sm", "smo" },
{ "Shona", "", "sn", "sna" },
{ "Sindhi", "", "sd", "snd" },
{ "Somali", "", "so", "som" },
{ "Sotho, Southern", "", "st", "sot" },
{ "Spanish", "Espanol", "es", "spa" },
{ "Sardinian", "", "sc", "srd" },
{ "Swati", "", "ss", "ssw" },
{ "Sundanese", "", "su", "sun" },
{ "Swahili", "", "sw", "swa" },
{ "Swedish", "Svenska", "sv", "swe" },
{ "Tahitian", "", "ty", "tah" },
{ "Tamil", "", "ta", "tam" },
{ "Tatar", "", "tt", "tat" },
{ "Telugu", "", "te", "tel" },
{ "Tajik", "", "tg", "tgk" },
{ "Tagalog", "", "tl", "tgl" },
{ "Thai", "", "th", "tha" },
{ "Tibetan", "", "bo", "bod", "tib" },
{ "Tigrinya", "", "ti", "tir" },
{ "Tonga (Tonga Islands)", "", "to", "ton" },
{ "Tswana", "", "tn", "tsn" },
{ "Tsonga", "", "ts", "tso" },
{ "Turkmen", "", "tk", "tuk" },
{ "Turkish", "", "tr", "tur" },
{ "Twi", "", "tw", "twi" },
{ "Uighur", "", "ug", "uig" },
{ "Ukrainian", "", "uk", "ukr" },
{ "Urdu", "", "ur", "urd" },
{ "Uzbek", "", "uz", "uzb" },
{ "Venda", "", "ve", "ven" },
{ "Vietnamese", "", "vi", "vie" },
{ "Volapük", "", "vo", "vol" },
{ "Welsh", "", "cy", "cym", "wel" },
{ "Walloon", "", "wa", "wln" },
{ "Wolof", "", "wo", "wol" },
{ "Xhosa", "", "xh", "xho" },
{ "Yiddish", "", "yi", "yid" },
{ "Yoruba", "", "yo", "yor" },
{ "Zhuang", "", "za", "zha" },
{ "Zulu", "", "zu", "zul" },
{ NULL, NULL, NULL } };
static const int lang_count = sizeof(languages) / sizeof(languages[0]);
iso639_lang_t * lang_for_code( int code )
{
char code_string[2];
iso639_lang_t * lang;
code_string[0] = tolower( ( code >> 8 ) & 0xFF );
code_string[1] = tolower( code & 0xFF );
for( lang = (iso639_lang_t*) languages; lang->eng_name; lang++ )
{
if( !strncmp( lang->iso639_1, code_string, 2 ) )
{
return lang;
}
}
return (iso639_lang_t*) languages;
}
iso639_lang_t * lang_for_code2( const char *code )
{
char code_string[4];
iso639_lang_t * lang;
code_string[0] = tolower( code[0] );
code_string[1] = tolower( code[1] );
code_string[2] = tolower( code[2] );
code_string[3] = 0;
for( lang = (iso639_lang_t*) languages; lang->eng_name; lang++ )
{
if( !strcmp( lang->iso639_2, code_string ) )
{
return lang;
}
if( lang->iso639_2b && !strcmp( lang->iso639_2b, code_string ) )
{
return lang;
}
}
return (iso639_lang_t*) languages;
}
int lang_to_code(const iso639_lang_t *lang)
{
int code = 0;
if (lang)
code = (lang->iso639_1[0] << 8) | lang->iso639_1[1];
return code;
}
iso639_lang_t * lang_for_english( const char * english )
{
iso639_lang_t * lang;
for( lang = (iso639_lang_t*) languages; lang->eng_name; lang++ )
{
if( !strcmp( lang->eng_name, english ) )
{
return lang;
}
}
return (iso639_lang_t*) languages;
}
const iso639_lang_t* lang_get_next(const iso639_lang_t *last)
{
if (last == NULL)
{
return (const iso639_lang_t*)languages;
}
if (last < languages || // out of bounds
last >= languages + lang_count - 2) // last valid language
{
return NULL;
}
return ++last;
}
HandBrake-0.10.2/libhb/encx264.h 0000664 0001752 0001752 00000006021 12463330511 016456 0 ustar handbrake handbrake /* encx264.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "x264.h"
#include "h264_common.h"
/* x264 preferred option names (left) and synonyms (right).
* The "preferred" names match names used in x264's param2string function more
* closely than their corresponding synonyms, or are just shorter. */
static const char * const hb_x264_encopt_synonyms[][2] =
{
{ "deterministic", "n-deterministic", },
{ "level", "level-idc", },
{ "ref", "frameref", },
{ "keyint-min", "min-keyint", },
{ "no-deblock", "nf", },
{ "deblock", "filter", },
{ "cqm", "cqmfile", },
{ "analyse", "partitions", },
{ "weightb", "weight-b", },
{ "direct", "direct-pred", },
{ "merange", "me-range", },
{ "mvrange", "mv-range", },
{ "mvrange-thread", "mv-range-thread", },
{ "subme", "subq", },
{ "qp", "qp_constant", },
{ "qpmin", "qp-min", },
{ "qpmax", "qp-max", },
{ "qpstep", "qp-step", },
{ "ipratio", "ip-factor", },
{ "pbratio", "pb-factor", },
{ "cplxblur", "cplx-blur", },
{ NULL, NULL, },
};
/*
* Check whether a valid h264_level is compatible with the given framerate,
* resolution and interlaced compression/flags combination.
*
* width, height, fps_num and fps_den should be greater than zero.
*
* interlacing parameters can be set to zero when the information is
* unavailable, as hb_apply_h264_level() will disable interlacing if necessary.
*
* Returns 0 if the level is valid and compatible, 1 otherwise.
*/
int hb_check_h264_level(const char *h264_level, int width, int height,
int fps_num, int fps_den, int interlaced,
int fake_interlaced);
/*
* Applies the restrictions of the requested H.264 level to an x264_param_t.
*
* Returns -1 if an invalid level (or no level) is specified. GUIs should be
* capable of always providing a valid level.
*
* Does not modify resolution/framerate but warns when they exceed level limits.
*
* Based on a x264_param_apply_level() draft and other x264 code.
*/
int hb_apply_h264_level(x264_param_t *param, const char *h264_level,
const char *x264_profile, int verbose);
/*
* Applies the restrictions of the requested H.264 profile to an x264_param_t.
*
* x264_param_apply_profile wrapper designed to always succeed when a valid
* H.264 profile is specified (unlike x264's function).
*/
int hb_apply_h264_profile(x264_param_t *param, const char *h264_profile,
int verbose);
HandBrake-0.10.2/libhb/decavcodec.c 0000664 0001752 0001752 00000226574 12464263311 017365 0 ustar handbrake handbrake /* decavcodec.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
/* This module is Handbrake's interface to the ffmpeg decoder library
(libavcodec & small parts of libavformat). It contains four Handbrake
"work objects":
decavcodeca connects HB to an ffmpeg audio decoder
decavcodecv connects HB to an ffmpeg video decoder
(Two different routines are needed because the ffmpeg library
has different decoder calling conventions for audio & video.
These work objects are self-contained & follow all
of HB's conventions for a decoder module. They can be used like
any other HB decoder
These decoders handle 2 kinds of input. Streams that are demuxed
by HandBrake and streams that are demuxed by libavformat. In the
case of streams that are demuxed by HandBrake, there is an extra
parse step required that happens in decodeVideo and decavcodecaWork.
In the case of streams that are demuxed by libavformat, there is context
information that we need from the libavformat. This information is
propagated from hb_stream_open to these decoders through title->opaque_priv.
A consequence of the above is that the streams that are demuxed by HandBrake
*can't* use information from the AVStream because there isn't one - they
get their data from either the dvd reader or the mpeg reader, not the ffmpeg
stream reader. That means that they have to make up for deficiencies in the
AVCodecContext info by using stuff kept in the HB "title" struct. It
also means that ffmpeg codecs that randomly scatter state needed by
the decoder across both the AVCodecContext & the AVStream (e.g., the
VC1 decoder) can't easily be used by the HB mpeg stream reader.
*/
#include "hb.h"
#include "hbffmpeg.h"
#include "audio_resample.h"
#ifdef USE_HWD
#include "opencl.h"
#include "vadxva2.h"
#endif
#ifdef USE_QSV
#include "qsv_common.h"
#endif
static void compute_frame_duration( hb_work_private_t *pv );
static void flushDelayQueue( hb_work_private_t *pv );
static int decavcodecaInit( hb_work_object_t *, hb_job_t * );
static int decavcodecaWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
static void decavcodecClose( hb_work_object_t * );
static int decavcodecaInfo( hb_work_object_t *, hb_work_info_t * );
static int decavcodecaBSInfo( hb_work_object_t *, const hb_buffer_t *, hb_work_info_t * );
hb_work_object_t hb_decavcodeca =
{
.id = WORK_DECAVCODEC,
.name = "Audio decoder (libavcodec)",
.init = decavcodecaInit,
.work = decavcodecaWork,
.close = decavcodecClose,
.info = decavcodecaInfo,
.bsinfo = decavcodecaBSInfo
};
#define HEAP_SIZE 8
typedef struct {
// there are nheap items on the heap indexed 1..nheap (i.e., top of
// heap is 1). The 0th slot is unused - a marker is put there to check
// for overwrite errs.
int64_t h[HEAP_SIZE+1];
int nheap;
} pts_heap_t;
struct hb_work_private_s
{
hb_job_t *job;
hb_title_t *title;
AVCodecContext *context;
AVCodecParserContext *parser;
AVFrame *frame;
hb_buffer_t *palette;
int threads;
int video_codec_opened;
hb_list_t *list;
double duration; // frame duration (for video)
double field_duration; // field duration (for video)
int frame_duration_set; // Indicates valid timing was found in stream
double pts_next; // next pts we expect to generate
int64_t chap_time; // time of next chap mark (if new_chap != 0)
int new_chap; // output chapter mark pending
uint32_t nframes;
uint32_t ndrops;
uint32_t decode_errors;
int64_t prev_pts;
int brokenTS; // video stream may contain packed b-frames
hb_buffer_t* delayq[HEAP_SIZE];
int queue_primed;
pts_heap_t pts_heap;
void* buffer;
struct SwsContext *sws_context; // if we have to rescale or convert color space
int sws_width;
int sws_height;
int sws_pix_fmt;
int cadence[12];
int wait_for_keyframe;
#ifdef USE_HWD
hb_va_dxva2_t *dxva2;
uint8_t *dst_frame;
hb_oclscale_t *opencl_scale;
#endif
hb_audio_resample_t *resample;
#ifdef USE_QSV
// QSV-specific settings
struct
{
int decode;
av_qsv_config config;
const char *codec_name;
#define USE_QSV_PTS_WORKAROUND // work around out-of-order output timestamps
#ifdef USE_QSV_PTS_WORKAROUND
hb_list_t *pts_list;
#endif
} qsv;
#endif
hb_list_t * list_subtitle;
};
#ifdef USE_QSV_PTS_WORKAROUND
// save/restore PTS if the decoder may not attach the right PTS to the frame
static void hb_av_add_new_pts(hb_list_t *list, int64_t new_pts)
{
int index = 0;
int64_t *cur_item, *new_item;
if (list != NULL && new_pts != AV_NOPTS_VALUE)
{
new_item = malloc(sizeof(int64_t));
if (new_item != NULL)
{
*new_item = new_pts;
// sort chronologically
for (index = 0; index < hb_list_count(list); index++)
{
cur_item = hb_list_item(list, index);
if (cur_item != NULL)
{
if (*cur_item == *new_item)
{
// no duplicates
free(new_item);
return;
}
if (*cur_item > *new_item)
{
// insert here
break;
}
}
}
hb_list_insert(list, index, new_item);
}
}
}
static int64_t hb_av_pop_next_pts(hb_list_t *list)
{
int64_t *item, next_pts = AV_NOPTS_VALUE;
if (list != NULL && hb_list_count(list) > 0)
{
item = hb_list_item(list, 0);
if (item != NULL)
{
next_pts = *item;
hb_list_rem(list, item);
free(item);
}
}
return next_pts;
}
#endif
static void decodeAudio( hb_audio_t * audio, hb_work_private_t *pv, uint8_t *data, int size, int64_t pts );
static hb_buffer_t *link_buf_list( hb_work_private_t *pv );
static int64_t heap_pop( pts_heap_t *heap )
{
int64_t result;
if ( heap->nheap <= 0 )
{
return AV_NOPTS_VALUE;
}
// return the top of the heap then put the bottom element on top,
// decrease the heap size by one & rebalence the heap.
result = heap->h[1];
int64_t v = heap->h[heap->nheap--];
int parent = 1;
int child = parent << 1;
while ( child <= heap->nheap )
{
// find the smallest of the two children of parent
if (child < heap->nheap && heap->h[child] > heap->h[child+1] )
++child;
if (v <= heap->h[child])
// new item is smaller than either child so it's the new parent.
break;
// smallest child is smaller than new item so move it up then
// check its children.
int64_t hp = heap->h[child];
heap->h[parent] = hp;
parent = child;
child = parent << 1;
}
heap->h[parent] = v;
return result;
}
static void heap_push( pts_heap_t *heap, int64_t v )
{
if ( heap->nheap < HEAP_SIZE )
{
++heap->nheap;
}
// stick the new value on the bottom of the heap then bubble it
// up to its correct spot.
int child = heap->nheap;
while (child > 1) {
int parent = child >> 1;
if (heap->h[parent] <= v)
break;
// move parent down
int64_t hp = heap->h[parent];
heap->h[child] = hp;
child = parent;
}
heap->h[child] = v;
}
/***********************************************************************
* hb_work_decavcodec_init
***********************************************************************
*
**********************************************************************/
static int decavcodecaInit( hb_work_object_t * w, hb_job_t * job )
{
AVCodec * codec;
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
if (job)
pv->title = job->title;
else
pv->title = w->title;
pv->list = hb_list_init();
codec = avcodec_find_decoder(w->codec_param);
pv->context = avcodec_alloc_context3(codec);
if (pv->title->opaque_priv != NULL)
{
AVFormatContext *ic = (AVFormatContext*)pv->title->opaque_priv;
avcodec_copy_context(pv->context, ic->streams[w->audio->id]->codec);
// libav's eac3 parser toggles the codec_id in the context as
// it reads eac3 data between AV_CODEC_ID_AC3 and AV_CODEC_ID_EAC3.
// It detects an AC3 sync pattern sometimes in ac3_sync() which
// causes it to eventually set avctx->codec_id to AV_CODEC_ID_AC3
// in ff_aac_ac3_parse(). Since we are parsing some data before
// we get here, the codec_id may have flipped. This will cause an
// error in hb_avcodec_open(). So flip it back!
pv->context->codec_id = w->codec_param;
}
else
{
pv->parser = av_parser_init(w->codec_param);
}
hb_ff_set_sample_fmt(pv->context, codec, AV_SAMPLE_FMT_FLT);
/* Downmixing & sample_fmt conversion */
if (!(w->audio->config.out.codec & HB_ACODEC_PASS_FLAG))
{
pv->resample =
hb_audio_resample_init(AV_SAMPLE_FMT_FLT,
w->audio->config.out.mixdown,
w->audio->config.out.normalize_mix_level);
if (pv->resample == NULL)
{
hb_error("decavcodecaInit: hb_audio_resample_init() failed");
return 1;
}
/*
* Some audio decoders can downmix using embedded coefficients,
* or dedicated audio substreams for a specific channel layout.
*
* But some will e.g. use normalized mix coefficients unconditionally,
* so we need to make sure this matches what the user actually requested.
*/
int avcodec_downmix = 0;
switch (w->codec_param)
{
case AV_CODEC_ID_AC3:
case AV_CODEC_ID_EAC3:
avcodec_downmix = w->audio->config.out.normalize_mix_level != 0;
break;
case AV_CODEC_ID_DTS:
avcodec_downmix = w->audio->config.out.normalize_mix_level == 0;
break;
case AV_CODEC_ID_TRUEHD:
avcodec_downmix = (w->audio->config.out.normalize_mix_level == 0 ||
w->audio->config.out.mixdown == HB_AMIXDOWN_MONO ||
w->audio->config.out.mixdown == HB_AMIXDOWN_DOLBY ||
w->audio->config.out.mixdown == HB_AMIXDOWN_DOLBYPLII);
break;
default:
break;
}
if (avcodec_downmix)
{
switch (w->audio->config.out.mixdown)
{
case HB_AMIXDOWN_MONO:
if (w->codec_param == AV_CODEC_ID_TRUEHD)
{
// libavcodec can't decode TrueHD Mono (bug #356)
// work around it by requesting Stereo and downmixing
pv->context->request_channel_layout = AV_CH_LAYOUT_STEREO;
break;
}
pv->context->request_channel_layout = AV_CH_LAYOUT_MONO;
break;
// request 5.1 before downmixing to dpl1/dpl2
case HB_AMIXDOWN_DOLBY:
case HB_AMIXDOWN_DOLBYPLII:
pv->context->request_channel_layout = AV_CH_LAYOUT_5POINT1;
break;
// request the layout corresponding to the selected mixdown
default:
pv->context->request_channel_layout =
hb_ff_mixdown_xlat(w->audio->config.out.mixdown, NULL);
break;
}
}
}
// Set decoder opts...
AVDictionary * av_opts = NULL;
av_dict_set( &av_opts, "refcounted_frames", "1", 0 );
// Dynamic Range Compression
if (w->audio->config.out.dynamic_range_compression >= 0.0f &&
hb_audio_can_apply_drc(w->audio->config.in.codec,
w->audio->config.in.codec_param, 0))
{
float drc_scale_max = 1.0f;
/*
* avcodec_open will fail if the value for any of the options is out of
* range, so assume a conservative maximum of 1 and try to determine the
* option's actual upper limit.
*/
if (codec != NULL && codec->priv_class != NULL)
{
const AVOption *opt;
opt = av_opt_find2((void*)&codec->priv_class, "drc_scale", NULL,
AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_AUDIO_PARAM,
AV_OPT_SEARCH_FAKE_OBJ, NULL);
if (opt != NULL)
{
drc_scale_max = opt->max;
}
}
if (w->audio->config.out.dynamic_range_compression > drc_scale_max)
{
hb_log("decavcodecaInit: track %d, sanitizing out-of-range DRC %.2f to %.2f",
w->audio->config.out.track,
w->audio->config.out.dynamic_range_compression, drc_scale_max);
w->audio->config.out.dynamic_range_compression = drc_scale_max;
}
char drc_scale[5]; // "?.??\n"
snprintf(drc_scale, sizeof(drc_scale), "%.2f",
w->audio->config.out.dynamic_range_compression);
av_dict_set(&av_opts, "drc_scale", drc_scale, 0);
}
if (hb_avcodec_open(pv->context, codec, &av_opts, 0))
{
av_dict_free( &av_opts );
hb_log("decavcodecaInit: avcodec_open failed");
return 1;
}
// avcodec_open populates av_opts with the things it didn't recognize.
AVDictionaryEntry *t = NULL;
while ((t = av_dict_get(av_opts, "", t, AV_DICT_IGNORE_SUFFIX)) != NULL)
{
hb_log("decavcodecaInit: unknown option '%s'", t->key);
}
av_dict_free( &av_opts );
pv->frame = av_frame_alloc();
if (pv->frame == NULL)
{
hb_log("decavcodecaInit: av_frame_alloc failed");
return 1;
}
return 0;
}
/***********************************************************************
* Close
***********************************************************************
*
**********************************************************************/
static void closePrivData( hb_work_private_t ** ppv )
{
hb_work_private_t * pv = *ppv;
if ( pv )
{
flushDelayQueue( pv );
hb_buffer_t *buf = link_buf_list( pv );
hb_buffer_close( &buf );
if ( pv->job && pv->context && pv->context->codec )
{
hb_log( "%s-decoder done: %u frames, %u decoder errors, %u drops",
pv->context->codec->name, pv->nframes, pv->decode_errors,
pv->ndrops );
}
av_frame_free(&pv->frame);
if ( pv->sws_context )
{
sws_freeContext( pv->sws_context );
}
if ( pv->parser )
{
av_parser_close(pv->parser);
}
if ( pv->context && pv->context->codec )
{
#ifdef USE_QSV
/*
* FIXME: knowingly leaked.
*
* If we're using our Libav QSV wrapper, qsv_decode_end() will call
* MFXClose() on the QSV session. Even if decoding is complete, we
* still need that session for QSV filtering and/or encoding, so we
* we can't close the context here until we implement a proper fix.
*/
if (!pv->qsv.decode)
#endif
{
hb_avcodec_close(pv->context);
}
}
if ( pv->context )
{
av_freep( &pv->context->extradata );
av_freep( &pv->context );
}
if ( pv->list )
{
hb_list_empty( &pv->list );
}
hb_audio_resample_free(pv->resample);
#ifdef USE_HWD
if (pv->opencl_scale != NULL)
{
free(pv->opencl_scale);
}
if (pv->dxva2 != NULL)
{
if (hb_ocl != NULL)
{
HB_OCL_BUF_FREE(hb_ocl, pv->dxva2->cl_mem_nv12);
}
hb_va_close(pv->dxva2);
}
#endif
#ifdef USE_QSV_PTS_WORKAROUND
if (pv->qsv.decode && pv->qsv.pts_list != NULL)
{
while (hb_list_count(pv->qsv.pts_list) > 0)
{
int64_t *item = hb_list_item(pv->qsv.pts_list, 0);
hb_list_rem(pv->qsv.pts_list, item);
free(item);
}
hb_list_close(&pv->qsv.pts_list);
}
#endif
free(pv);
}
*ppv = NULL;
}
static void decavcodecClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
#ifdef USE_HWD
if( pv->dst_frame ) free( pv->dst_frame );
#endif
if ( pv )
{
closePrivData( &pv );
w->private_data = NULL;
}
}
/***********************************************************************
* Work
***********************************************************************
*
**********************************************************************/
static int decavcodecaWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_buffer_t * in = *buf_in;
if ( in->size <= 0 )
{
/* EOF on input stream - send it downstream & say that we're done */
*buf_out = in;
*buf_in = NULL;
return HB_WORK_DONE;
}
*buf_out = NULL;
if ( in->s.start < 0 && pv->pts_next <= 0 )
{
// discard buffers that start before video time 0
return HB_WORK_OK;
}
int pos, len;
for ( pos = 0; pos < in->size; pos += len )
{
uint8_t *pout;
int pout_len;
int64_t cur;
cur = in->s.start;
if ( pv->parser != NULL )
{
len = av_parser_parse2( pv->parser, pv->context, &pout, &pout_len,
in->data + pos, in->size - pos, cur, cur, 0 );
cur = pv->parser->pts;
}
else
{
pout = in->data;
len = pout_len = in->size;
}
if (pout != NULL && pout_len > 0)
{
decodeAudio( w->audio, pv, pout, pout_len, cur );
}
}
*buf_out = link_buf_list( pv );
return HB_WORK_OK;
}
static int decavcodecaInfo( hb_work_object_t *w, hb_work_info_t *info )
{
hb_work_private_t *pv = w->private_data;
memset( info, 0, sizeof(*info) );
if ( pv && pv->context )
{
AVCodecContext *context = pv->context;
info->bitrate = context->bit_rate;
info->rate = context->time_base.num;
info->rate_base = context->time_base.den;
info->profile = context->profile;
info->level = context->level;
return 1;
}
return 0;
}
static int decavcodecaBSInfo( hb_work_object_t *w, const hb_buffer_t *buf,
hb_work_info_t *info )
{
hb_work_private_t *pv = w->private_data;
int ret = 0;
hb_audio_t *audio = w->audio;
memset( info, 0, sizeof(*info) );
if ( pv && pv->context )
{
return decavcodecaInfo( w, info );
}
AVCodec *codec = avcodec_find_decoder( w->codec_param );
if ( ! codec )
{
// there's no ffmpeg codec for this audio type - give up
return -1;
}
static char codec_name[64];
info->name = strncpy( codec_name, codec->name, sizeof(codec_name)-1 );
AVCodecContext *context = avcodec_alloc_context3(codec);
AVCodecParserContext *parser = NULL;
if (w->title && w->title->opaque_priv != NULL)
{
AVFormatContext *ic = (AVFormatContext*)w->title->opaque_priv;
avcodec_copy_context(context, ic->streams[audio->id]->codec);
// libav's eac3 parser toggles the codec_id in the context as
// it reads eac3 data between AV_CODEC_ID_AC3 and AV_CODEC_ID_EAC3.
// It detects an AC3 sync pattern sometimes in ac3_sync() which
// causes it to eventually set avctx->codec_id to AV_CODEC_ID_AC3
// in ff_aac_ac3_parse(). Since we are parsing some data before
// we get here, the codec_id may have flipped. This will cause an
// error in hb_avcodec_open(). So flip it back!
context->codec_id = w->codec_param;
}
else
{
parser = av_parser_init(codec->id);
}
hb_ff_set_sample_fmt( context, codec, AV_SAMPLE_FMT_FLT );
AVDictionary * av_opts = NULL;
av_dict_set( &av_opts, "err_detect", "crccheck+explode", 0 );
if ( hb_avcodec_open( context, codec, &av_opts, 0 ) )
{
av_dict_free( &av_opts );
return -1;
}
av_dict_free( &av_opts );
unsigned char *parse_buffer;
int parse_pos, dec_pos, parse_buffer_size;
while (buf != NULL && !ret)
{
parse_pos = 0;
while (parse_pos < buf->size)
{
int parse_len, truehd_mono = 0;
if (parser != NULL)
{
parse_len = av_parser_parse2(parser, context,
&parse_buffer, &parse_buffer_size,
buf->data + parse_pos, buf->size - parse_pos,
buf->s.start, buf->s.start, 0);
}
else
{
parse_buffer = buf->data + parse_pos;
parse_len = parse_buffer_size = buf->size - parse_pos;
}
// libavcodec can't decode TrueHD Mono (bug #356)
// work around it by requesting Stereo before decoding
if (context->codec_id == AV_CODEC_ID_TRUEHD &&
context->channel_layout == AV_CH_LAYOUT_MONO)
{
truehd_mono = 1;
context->request_channel_layout = AV_CH_LAYOUT_STEREO;
}
else
{
context->request_channel_layout = 0;
}
dec_pos = 0;
while (dec_pos < parse_buffer_size)
{
int dec_len;
int got_frame;
AVFrame *frame = av_frame_alloc();
AVPacket avp;
av_init_packet(&avp);
avp.data = parse_buffer + dec_pos;
avp.size = parse_buffer_size - dec_pos;
dec_len = avcodec_decode_audio4(context, frame, &got_frame, &avp);
if (dec_len < 0)
{
av_frame_free(&frame);
break;
}
if (dec_len > 0 && got_frame)
{
info->rate_base = 1;
// libavcoded doesn't consistently set frame->sample_rate
if (frame->sample_rate != 0)
{
info->rate = frame->sample_rate;
}
else
{
info->rate = context->sample_rate;
hb_log("decavcodecaBSInfo: warning: invalid frame sample_rate! Using context sample_rate.");
}
info->samples_per_frame = frame->nb_samples;
int bps = av_get_bits_per_sample(context->codec_id);
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
if (bps > 0)
{
info->bitrate = (bps * channels * info->rate);
}
else if (context->bit_rate > 0)
{
info->bitrate = context->bit_rate;
}
else
{
info->bitrate = 1;
}
if (truehd_mono)
{
info->channel_layout = AV_CH_LAYOUT_MONO;
info->matrix_encoding = AV_MATRIX_ENCODING_NONE;
}
else
{
AVFrameSideData *side_data;
if ((side_data =
av_frame_get_side_data(frame,
AV_FRAME_DATA_MATRIXENCODING)) != NULL)
{
info->matrix_encoding = *side_data->data;
}
else
{
info->matrix_encoding = AV_MATRIX_ENCODING_NONE;
}
if (info->matrix_encoding == AV_MATRIX_ENCODING_DOLBY ||
info->matrix_encoding == AV_MATRIX_ENCODING_DPLII)
{
info->channel_layout = AV_CH_LAYOUT_STEREO_DOWNMIX;
}
else
{
info->channel_layout = frame->channel_layout;
}
}
if (context->codec_id == AV_CODEC_ID_AC3 ||
context->codec_id == AV_CODEC_ID_EAC3)
{
if (context->audio_service_type == AV_AUDIO_SERVICE_TYPE_KARAOKE)
{
info->mode = 7;
}
else
{
info->mode = context->audio_service_type;
}
}
else if (context->codec_id == AV_CODEC_ID_AAC &&
context->extradata_size == 0)
{
// Parse ADTS AAC streams for AudioSpecificConfig.
// This data is required in order to write
// proper headers in MP4 and MKV files.
AVBitStreamFilterContext* aac_adtstoasc;
aac_adtstoasc = av_bitstream_filter_init("aac_adtstoasc");
if (aac_adtstoasc)
{
int ret, size;
uint8_t *data;
ret = av_bitstream_filter_filter(aac_adtstoasc, context,
NULL, &data, &size, avp.data, avp.size, 0);
if (ret >= 0 &&
context->extradata_size > 0 &&
audio->priv.config.extradata.length == 0)
{
int len;
len = MIN(context->extradata_size, HB_CONFIG_MAX_SIZE);
memcpy(audio->priv.config.extradata.bytes,
context->extradata, len);
audio->priv.config.extradata.length = len;
}
av_bitstream_filter_close(aac_adtstoasc);
}
}
ret = 1;
av_frame_free(&frame);
break;
}
dec_pos += dec_len;
av_frame_free(&frame);
}
parse_pos += parse_len;
}
buf = buf->next;
}
info->profile = context->profile;
info->level = context->level;
info->channel_map = &hb_libav_chan_map;
if ( parser != NULL )
av_parser_close( parser );
hb_avcodec_close( context );
av_freep( &context->extradata );
av_freep( &context );
return ret;
}
/* -------------------------------------------------------------
* General purpose video decoder using libavcodec
*/
static uint8_t *copy_plane( uint8_t *dst, uint8_t* src, int dstride, int sstride,
int h )
{
if ( dstride == sstride )
{
memcpy( dst, src, dstride * h );
return dst + dstride * h;
}
int lbytes = dstride <= sstride? dstride : sstride;
while ( --h >= 0 )
{
memcpy( dst, src, lbytes );
src += sstride;
dst += dstride;
}
return dst;
}
// copy one video frame into an HB buf. If the frame isn't in our color space
// or at least one of its dimensions is odd, use sws_scale to convert/rescale it.
// Otherwise just copy the bits.
static hb_buffer_t *copy_frame( hb_work_private_t *pv )
{
AVCodecContext *context = pv->context;
int w, h;
if ( ! pv->job )
{
// HandBrake's video pipeline uses yuv420 color. This means all
// dimensions must be even. So we must adjust the dimensions
// of incoming video if not even.
w = context->width & ~1;
h = context->height & ~1;
}
else
{
w = pv->job->title->width;
h = pv->job->title->height;
}
#ifdef USE_HWD
if (pv->dxva2 && pv->job)
{
hb_buffer_t *buf;
int ww, hh;
buf = hb_video_buffer_init( w, h );
ww = w;
hh = h;
if( !pv->dst_frame )
{
pv->dst_frame = malloc( ww * hh * 3 / 2 );
}
if( hb_va_extract( pv->dxva2, pv->dst_frame, pv->frame, pv->job->width, pv->job->height, pv->job->title->crop, pv->opencl_scale, pv->job->use_opencl, pv->job->use_decomb, pv->job->use_detelecine ) == HB_WORK_ERROR )
{
hb_log( "hb_va_Extract failed!!!!!!" );
}
w = buf->plane[0].stride;
h = buf->plane[0].height;
uint8_t *dst = buf->plane[0].data;
copy_plane( dst, pv->dst_frame, w, ww, h );
w = buf->plane[1].stride;
h = buf->plane[1].height;
dst = buf->plane[1].data;
copy_plane( dst, pv->dst_frame + ww * hh, w, ww >> 1, h );
w = buf->plane[2].stride;
h = buf->plane[2].height;
dst = buf->plane[2].data;
copy_plane( dst, pv->dst_frame + ww * hh +( ( ww * hh ) >> 2 ), w, ww >> 1, h );
return buf;
}
else
#endif
{
hb_buffer_t *buf = hb_video_buffer_init( w, h );
#ifdef USE_QSV
// no need to copy the frame data when decoding with QSV to opaque memory
if (pv->qsv.decode &&
pv->qsv.config.io_pattern == MFX_IOPATTERN_OUT_OPAQUE_MEMORY)
{
buf->qsv_details.qsv_atom = pv->frame->data[2];
return buf;
}
#endif
uint8_t *dst = buf->data;
if (context->pix_fmt != AV_PIX_FMT_YUV420P || w != context->width ||
h != context->height)
{
// have to convert to our internal color space and/or rescale
AVPicture dstpic;
hb_avpicture_fill(&dstpic, buf);
if (pv->sws_context == NULL ||
pv->sws_width != context->width ||
pv->sws_height != context->height ||
pv->sws_pix_fmt != context->pix_fmt)
{
if (pv->sws_context != NULL)
sws_freeContext(pv->sws_context);
pv->sws_context = hb_sws_get_context(context->width,
context->height,
context->pix_fmt,
w, h, AV_PIX_FMT_YUV420P,
SWS_LANCZOS|SWS_ACCURATE_RND);
pv->sws_width = context->width;
pv->sws_height = context->height;
pv->sws_pix_fmt = context->pix_fmt;
}
sws_scale(pv->sws_context,
(const uint8_t* const *)pv->frame->data,
pv->frame->linesize,
0, context->height, dstpic.data, dstpic.linesize);
}
else
{
w = buf->plane[0].stride;
h = buf->plane[0].height;
dst = buf->plane[0].data;
copy_plane( dst, pv->frame->data[0], w, pv->frame->linesize[0], h );
w = buf->plane[1].stride;
h = buf->plane[1].height;
dst = buf->plane[1].data;
copy_plane( dst, pv->frame->data[1], w, pv->frame->linesize[1], h );
w = buf->plane[2].stride;
h = buf->plane[2].height;
dst = buf->plane[2].data;
copy_plane( dst, pv->frame->data[2], w, pv->frame->linesize[2], h );
}
return buf;
}
}
#ifdef USE_HWD
static int get_frame_buf_hwd( AVCodecContext *context, AVFrame *frame )
{
hb_work_private_t *pv = (hb_work_private_t*)context->opaque;
if ( (pv != NULL) && pv->dxva2 )
{
int result = HB_WORK_ERROR;
hb_work_private_t *pv = (hb_work_private_t*)context->opaque;
result = hb_va_get_frame_buf( pv->dxva2, context, frame );
if( result == HB_WORK_ERROR )
return avcodec_default_get_buffer( context, frame );
return 0;
}
else
return avcodec_default_get_buffer( context, frame );
}
static void hb_ffmpeg_release_frame_buf( struct AVCodecContext *p_context, AVFrame *frame )
{
hb_work_private_t *p_dec = (hb_work_private_t*)p_context->opaque;
int i;
if( p_dec->dxva2 )
{
hb_va_release( p_dec->dxva2, frame );
}
else if( !frame->opaque )
{
if( frame->type == FF_BUFFER_TYPE_INTERNAL )
avcodec_default_release_buffer( p_context, frame );
}
for( i = 0; i < 4; i++ )
frame->data[i] = NULL;
}
#endif
static void log_chapter( hb_work_private_t *pv, int chap_num, int64_t pts )
{
hb_chapter_t *c;
if ( !pv->job )
return;
c = hb_list_item( pv->job->list_chapter, chap_num - 1 );
if ( c && c->title )
{
hb_log( "%s: \"%s\" (%d) at frame %u time %"PRId64,
pv->context->codec->name, c->title, chap_num, pv->nframes, pts );
}
else
{
hb_log( "%s: Chapter %d at frame %u time %"PRId64,
pv->context->codec->name, chap_num, pv->nframes, pts );
}
}
static void flushDelayQueue( hb_work_private_t *pv )
{
hb_buffer_t *buf;
int slot = pv->queue_primed ? pv->nframes & (HEAP_SIZE-1) : 0;
// flush all the video packets left on our timestamp-reordering delay q
while ( ( buf = pv->delayq[slot] ) != NULL )
{
buf->s.start = heap_pop( &pv->pts_heap );
hb_list_add( pv->list, buf );
pv->delayq[slot] = NULL;
slot = ( slot + 1 ) & (HEAP_SIZE-1);
}
}
#define TOP_FIRST PIC_FLAG_TOP_FIELD_FIRST
#define PROGRESSIVE PIC_FLAG_PROGRESSIVE_FRAME
#define REPEAT_FIRST PIC_FLAG_REPEAT_FIRST_FIELD
#define TB 8
#define BT 16
#define BT_PROG 32
#define BTB_PROG 64
#define TB_PROG 128
#define TBT_PROG 256
static void checkCadence( int * cadence, uint16_t flags, int64_t start )
{
/* Rotate the cadence tracking. */
int i = 0;
for(i=11; i > 0; i--)
{
cadence[i] = cadence[i-1];
}
if ( !(flags & PROGRESSIVE) && !(flags & TOP_FIRST) )
{
/* Not progressive, not top first...
That means it's probably bottom
first, 2 fields displayed.
*/
//hb_log("MPEG2 Flag: Bottom field first, 2 fields displayed.");
cadence[0] = BT;
}
else if ( !(flags & PROGRESSIVE) && (flags & TOP_FIRST) )
{
/* Not progressive, top is first,
Two fields displayed.
*/
//hb_log("MPEG2 Flag: Top field first, 2 fields displayed.");
cadence[0] = TB;
}
else if ( (flags & PROGRESSIVE) && !(flags & TOP_FIRST) && !( flags & REPEAT_FIRST ) )
{
/* Progressive, but noting else.
That means Bottom first,
2 fields displayed.
*/
//hb_log("MPEG2 Flag: Progressive. Bottom field first, 2 fields displayed.");
cadence[0] = BT_PROG;
}
else if ( (flags & PROGRESSIVE) && !(flags & TOP_FIRST) && ( flags & REPEAT_FIRST ) )
{
/* Progressive, and repeat. .
That means Bottom first,
3 fields displayed.
*/
//hb_log("MPEG2 Flag: Progressive repeat. Bottom field first, 3 fields displayed.");
cadence[0] = BTB_PROG;
}
else if ( (flags & PROGRESSIVE) && (flags & TOP_FIRST) && !( flags & REPEAT_FIRST ) )
{
/* Progressive, top first.
That means top first,
2 fields displayed.
*/
//hb_log("MPEG2 Flag: Progressive. Top field first, 2 fields displayed.");
cadence[0] = TB_PROG;
}
else if ( (flags & PROGRESSIVE) && (flags & TOP_FIRST) && ( flags & REPEAT_FIRST ) )
{
/* Progressive, top, repeat.
That means top first,
3 fields displayed.
*/
//hb_log("MPEG2 Flag: Progressive repeat. Top field first, 3 fields displayed.");
cadence[0] = TBT_PROG;
}
if ( (cadence[2] <= TB) && (cadence[1] <= TB) && (cadence[0] > TB) && (cadence[11]) )
hb_log("%fs: Video -> Film", (float)start / 90000);
if ( (cadence[2] > TB) && (cadence[1] <= TB) && (cadence[0] <= TB) && (cadence[11]) )
hb_log("%fs: Film -> Video", (float)start / 90000);
}
// send cc_buf to the CC decoder(s)
static void cc_send_to_decoder(hb_work_private_t *pv, hb_buffer_t *buf)
{
if (buf == NULL)
return;
// if there's more than one decoder for the captions send a copy
// of the buffer to all.
hb_subtitle_t *subtitle;
int ii = 0, n = hb_list_count(pv->list_subtitle);
while (--n > 0)
{
// make a copy of the buf then forward it to the decoder
hb_buffer_t *cpy = hb_buffer_dup(buf);
subtitle = hb_list_item(pv->list_subtitle, ii++);
hb_fifo_push(subtitle->fifo_in, cpy);
}
subtitle = hb_list_item(pv->list_subtitle, ii);
hb_fifo_push( subtitle->fifo_in, buf );
}
static hb_buffer_t * cc_fill_buffer(hb_work_private_t *pv, uint8_t *cc, int size, int64_t pts)
{
int cc_count[4] = {0,};
int ii;
hb_buffer_t *buf = NULL;
for (ii = 0; ii < size; ii += 3)
{
if ((cc[ii] & 0x04) == 0) // not valid
continue;
if ((cc[ii+1] & 0x7f) == 0 && (cc[ii+2] & 0x7f) == 0) // stuffing
continue;
int type = cc[ii] & 0x03;
cc_count[type]++;
}
// Only handles CC1 for now.
if (cc_count[0] > 0)
{
buf = hb_buffer_init(cc_count[0] * 2);
buf->s.start = pts;
int jj = 0;
for (ii = 0; ii < size; ii += 3)
{
if ((cc[ii] & 0x04) == 0) // not valid
continue;
if ((cc[ii+1] & 0x7f) == 0 && (cc[ii+2] & 0x7f) == 0) // stuffing
continue;
int type = cc[ii] & 0x03;
if (type == 0)
{
buf->data[jj++] = cc[ii+1];
buf->data[jj++] = cc[ii+2];
}
}
}
return buf;
}
static int get_frame_type(int type)
{
switch(type)
{
case AV_PICTURE_TYPE_I:
return HB_FRAME_I;
case AV_PICTURE_TYPE_B:
return HB_FRAME_B;
case AV_PICTURE_TYPE_P:
return HB_FRAME_P;
}
return 0;
}
/*
* Decodes a video frame from the specified raw packet data
* ('data', 'size', 'sequence').
* The output of this function is stored in 'pv->list', which contains a list
* of zero or more decoded packets.
*
* The returned packets are guaranteed to have their timestamps in the correct
* order, even if the original packets decoded by libavcodec have misordered
* timestamps, due to the use of 'packed B-frames'.
*
* Internally the set of decoded packets may be buffered in 'pv->delayq'
* until enough packets have been decoded so that the timestamps can be
* correctly rewritten, if this is necessary.
*/
static int decodeFrame( hb_work_object_t *w, uint8_t *data, int size, int sequence, int64_t pts, int64_t dts, uint8_t frametype )
{
hb_work_private_t *pv = w->private_data;
int got_picture, oldlevel = 0;
AVPacket avp;
if ( global_verbosity_level <= 1 )
{
oldlevel = av_log_get_level();
av_log_set_level( AV_LOG_QUIET );
}
av_init_packet(&avp);
avp.data = data;
avp.size = size;
avp.pts = pts;
avp.dts = dts;
if (pv->palette != NULL)
{
uint8_t * palette;
int size;
palette = av_packet_new_side_data(&avp, AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
size = MIN(pv->palette->size, AVPALETTE_SIZE);
memcpy(palette, pv->palette->data, size);
hb_buffer_close(&pv->palette);
}
/*
* libav avcodec_decode_video2() needs AVPacket flagged with AV_PKT_FLAG_KEY
* for some codecs. For example, sequence of PNG in a mov container.
*/
if ( frametype & HB_FRAME_KEY )
{
avp.flags |= AV_PKT_FLAG_KEY;
}
#ifdef USE_QSV_PTS_WORKAROUND
/*
* The MediaSDK decoder will return decoded frames in the correct order,
* but *sometimes* with the incorrect timestamp assigned to them.
*
* We work around it by saving the input timestamps (in chronological order)
* and restoring them after decoding.
*/
if (pv->qsv.decode && avp.data != NULL)
{
hb_av_add_new_pts(pv->qsv.pts_list, avp.pts);
}
#endif
if ( avcodec_decode_video2( pv->context, pv->frame, &got_picture, &avp ) < 0 )
{
++pv->decode_errors;
}
#ifdef USE_QSV
if (pv->qsv.decode && pv->job->qsv.ctx == NULL && pv->video_codec_opened > 0)
{
// this is quite late, but we can't be certain that the QSV context is
// available until after we call avcodec_decode_video2() at least once
pv->job->qsv.ctx = pv->context->priv_data;
}
#endif
#ifdef USE_QSV_PTS_WORKAROUND
if (pv->qsv.decode && got_picture)
{
// we got a decoded frame, restore the lowest available PTS
pv->frame->pkt_pts = hb_av_pop_next_pts(pv->qsv.pts_list);
}
#endif
if ( global_verbosity_level <= 1 )
{
av_log_set_level( oldlevel );
}
if( got_picture )
{
uint16_t flags = 0;
// ffmpeg makes it hard to attach a pts to a frame. if the MPEG ES
// packet had a pts we handed it to av_parser_parse (if the packet had
// no pts we set it to AV_NOPTS_VALUE, but before the parse we can't
// distinguish between the start of a video frame with no pts & an
// intermediate packet of some frame which never has a pts). we hope
// that when parse returns the frame to us the pts we originally
// handed it will be in parser->pts. we put this pts into avp.pts so
// that when avcodec_decode_video finally gets around to allocating an
// AVFrame to hold the decoded frame, avcodec_default_get_buffer can
// stuff that pts into the it. if all of these relays worked at this
// point frame.pts should hold the frame's pts from the original data
// stream or AV_NOPTS_VALUE if it didn't have one. in the latter case
// we generate the next pts in sequence for it.
if ( !pv->frame_duration_set )
compute_frame_duration( pv );
double pts;
double frame_dur = pv->duration;
if ( pv->frame->repeat_pict )
{
frame_dur += pv->frame->repeat_pict * pv->field_duration;
}
#ifdef USE_HWD
if( pv->dxva2 && pv->dxva2->do_job == HB_WORK_OK )
{
if( avp.pts>0 )
{
if( pv->dxva2->input_pts[0] != 0 && pv->dxva2->input_pts[1] == 0 )
pv->frame->pkt_pts = pv->dxva2->input_pts[0];
else
pv->frame->pkt_pts = pv->dxva2->input_pts[0]dxva2->input_pts[1] ? pv->dxva2->input_pts[0] : pv->dxva2->input_pts[1];
}
}
#endif
// If there was no pts for this frame, assume constant frame rate
// video & estimate the next frame time from the last & duration.
if (pv->frame->pkt_pts == AV_NOPTS_VALUE || hb_gui_use_hwd_flag == 1)
{
pts = pv->pts_next;
}
else
{
pts = pv->frame->pkt_pts;
// Detect streams with broken out of order timestamps
if (!pv->brokenTS && pv->frame->pkt_pts < pv->prev_pts)
{
hb_log("Broken timestamps detected. Reordering.");
pv->brokenTS = 1;
}
pv->prev_pts = pv->frame->pkt_pts;
}
pv->pts_next = pts + frame_dur;
if ( pv->frame->top_field_first )
{
flags |= PIC_FLAG_TOP_FIELD_FIRST;
}
if ( !pv->frame->interlaced_frame )
{
flags |= PIC_FLAG_PROGRESSIVE_FRAME;
}
if ( pv->frame->repeat_pict == 1 )
{
flags |= PIC_FLAG_REPEAT_FIRST_FIELD;
}
if ( pv->frame->repeat_pict == 2 )
{
flags |= PIC_FLAG_REPEAT_FRAME;
}
int frametype = get_frame_type(pv->frame->pict_type);
// Check for CC data
AVFrameSideData *sd;
sd = av_frame_get_side_data(pv->frame, AV_FRAME_DATA_A53_CC);
if (sd != NULL)
{
if (!pv->job && pv->title && sd->size > 0)
{
hb_subtitle_t *subtitle;
int i = 0;
while ((subtitle = hb_list_item(pv->title->list_subtitle, i++)))
{
/*
* Let's call them 608 subs for now even if they aren't,
* since they are the only types we grok.
*/
if (subtitle->source == CC608SUB)
{
break;
}
}
if (subtitle == NULL)
{
subtitle = calloc(sizeof( hb_subtitle_t ), 1);
subtitle->track = 0;
subtitle->id = 0;
subtitle->format = TEXTSUB;
subtitle->source = CC608SUB;
subtitle->config.dest = PASSTHRUSUB;
subtitle->codec = WORK_DECCC608;
subtitle->type = 5;
snprintf(subtitle->lang, sizeof( subtitle->lang ),
"Closed Captions");
/*
* The language of the subtitles will be the same as the
* first audio track, i.e. the same as the video.
*/
hb_audio_t *audio = hb_list_item(pv->title->list_audio, 0);
if (audio != NULL)
{
snprintf(subtitle->iso639_2, sizeof(subtitle->iso639_2),
"%s", audio->config.lang.iso639_2);
} else {
snprintf(subtitle->iso639_2, sizeof(subtitle->iso639_2),
"und");
}
hb_list_add(pv->title->list_subtitle, subtitle);
}
}
if (pv->list_subtitle != NULL && sd->size > 0)
{
hb_buffer_t *cc_buf;
cc_buf = cc_fill_buffer(pv, sd->data, sd->size, pts);
cc_send_to_decoder(pv, cc_buf);
}
}
hb_buffer_t *buf;
// if we're doing a scan or this content couldn't have been broken
// by Microsoft we don't worry about timestamp reordering
if ( ! pv->job || ! pv->brokenTS )
{
buf = copy_frame( pv );
av_frame_unref(pv->frame);
buf->s.start = pts;
buf->sequence = sequence;
buf->s.flags = flags;
buf->s.frametype = frametype;
if ( pv->new_chap && buf->s.start >= pv->chap_time )
{
buf->s.new_chap = pv->new_chap;
log_chapter( pv, pv->new_chap, buf->s.start );
pv->new_chap = 0;
pv->chap_time = 0;
}
else if ( pv->nframes == 0 && pv->job )
{
log_chapter( pv, pv->job->chapter_start, buf->s.start );
}
checkCadence( pv->cadence, flags, buf->s.start );
hb_list_add( pv->list, buf );
++pv->nframes;
return got_picture;
}
// XXX This following probably addresses a libavcodec bug but I don't
// see an easy fix so we workaround it here.
//
// The M$ 'packed B-frames' atrocity results in decoded frames with
// the wrong timestamp. E.g., if there are 2 b-frames the timestamps
// we see here will be "2 3 1 5 6 4 ..." instead of "1 2 3 4 5 6".
// The frames are actually delivered in the right order but with
// the wrong timestamp. To get the correct timestamp attached to
// each frame we have a delay queue (longer than the max number of
// b-frames) & a sorting heap for the timestamps. As each frame
// comes out of the decoder the oldest frame in the queue is removed
// and associated with the smallest timestamp. Then the new frame is
// added to the queue & its timestamp is pushed on the heap.
// This does nothing if the timestamps are correct (i.e., the video
// uses a codec that Micro$oft hasn't broken yet) but the frames
// get timestamped correctly even when M$ has munged them.
// remove the oldest picture from the frame queue (if any) &
// give it the smallest timestamp from our heap. The queue size
// is a power of two so we get the slot of the oldest by masking
// the frame count & this will become the slot of the newest
// once we've removed & processed the oldest.
int slot = pv->nframes & (HEAP_SIZE-1);
if ( ( buf = pv->delayq[slot] ) != NULL )
{
pv->queue_primed = 1;
buf->s.start = heap_pop( &pv->pts_heap );
if ( pv->new_chap && buf->s.start >= pv->chap_time )
{
buf->s.new_chap = pv->new_chap;
log_chapter( pv, pv->new_chap, buf->s.start );
pv->new_chap = 0;
pv->chap_time = 0;
}
else if ( pv->nframes == 0 && pv->job )
{
log_chapter( pv, pv->job->chapter_start, buf->s.start );
}
checkCadence( pv->cadence, buf->s.flags, buf->s.start );
hb_list_add( pv->list, buf );
}
// add the new frame to the delayq & push its timestamp on the heap
buf = copy_frame( pv );
av_frame_unref(pv->frame);
buf->sequence = sequence;
/* Store picture flags for later use by filters */
buf->s.flags = flags;
buf->s.frametype = frametype;
pv->delayq[slot] = buf;
heap_push( &pv->pts_heap, pts );
++pv->nframes;
}
return got_picture;
}
static void decodeVideo( hb_work_object_t *w, uint8_t *data, int size, int sequence, int64_t pts, int64_t dts, uint8_t frametype )
{
hb_work_private_t *pv = w->private_data;
/*
* The following loop is a do..while because we need to handle both
* data & the flush at the end (signaled by size=0). At the end there's
* generally a frame in the parser & one or more frames in the decoder
* (depending on the bframes setting).
*/
int pos = 0;
do {
uint8_t *pout;
int pout_len, len;
int64_t parser_pts, parser_dts;
if ( pv->parser )
{
len = av_parser_parse2( pv->parser, pv->context, &pout, &pout_len,
data + pos, size - pos, pts, dts, 0 );
parser_pts = pv->parser->pts;
parser_dts = pv->parser->dts;
}
else
{
pout = data;
len = pout_len = size;
parser_pts = pts;
parser_dts = dts;
}
pos += len;
if ( pout_len > 0 )
{
decodeFrame( w, pout, pout_len, sequence, parser_pts, parser_dts, frametype );
}
} while ( pos < size );
/* the stuff above flushed the parser, now flush the decoder */
if (size <= 0)
{
while (decodeFrame(w, NULL, 0, sequence, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0))
{
continue;
}
#ifdef USE_QSV
if (pv->qsv.decode)
{
// flush a second time
while (decodeFrame(w, NULL, 0, sequence, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0))
{
continue;
}
}
#endif
flushDelayQueue(pv);
if (pv->list_subtitle != NULL)
cc_send_to_decoder(pv, hb_buffer_init(0));
}
}
/*
* Removes all packets from 'pv->list', links them together into
* a linked-list, and returns the first packet in the list.
*/
static hb_buffer_t *link_buf_list( hb_work_private_t *pv )
{
hb_buffer_t *head = hb_list_item( pv->list, 0 );
if ( head )
{
hb_list_rem( pv->list, head );
hb_buffer_t *last = head, *buf;
while ( ( buf = hb_list_item( pv->list, 0 ) ) != NULL )
{
hb_list_rem( pv->list, buf );
last->next = buf;
last = buf;
}
}
return head;
}
static int decavcodecvInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t *pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->wait_for_keyframe = 60;
pv->job = job;
if ( job )
pv->title = job->title;
else
pv->title = w->title;
pv->list = hb_list_init();
#ifdef USE_QSV
if (hb_qsv_decode_is_enabled(job))
{
// determine which encoder we're using
hb_qsv_info_t *info = hb_qsv_info_get(job->vcodec);
pv->qsv.decode = info != NULL;
if (pv->qsv.decode)
{
// setup the QSV configuration
pv->qsv.config.io_pattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
pv->qsv.config.impl_requested = info->implementation;
pv->qsv.config.async_depth = job->qsv.async_depth;
pv->qsv.config.sync_need = 0;
pv->qsv.config.usage_threaded = 1;
pv->qsv.config.additional_buffers = 64; // FIFO_LARGE
if (info->capabilities & HB_QSV_CAP_RATECONTROL_LA)
{
// more surfaces may be needed for the lookahead
pv->qsv.config.additional_buffers = 160;
}
pv->qsv.codec_name = hb_qsv_decode_get_codec_name(w->codec_param);
}
}
else
{
pv->qsv.decode = 0;
}
#endif
if( pv->job && pv->job->title && !pv->job->title->has_resolution_change )
{
pv->threads = HB_FFMPEG_THREADS_AUTO;
}
AVCodec *codec = NULL;
#ifdef USE_QSV
if (pv->qsv.decode)
{
codec = avcodec_find_decoder_by_name(pv->qsv.codec_name);
}
else
#endif
{
codec = avcodec_find_decoder(w->codec_param);
}
if ( codec == NULL )
{
hb_log( "decavcodecvInit: failed to find codec for id (%d)", w->codec_param );
return 1;
}
if ( pv->title->opaque_priv )
{
AVFormatContext *ic = (AVFormatContext*)pv->title->opaque_priv;
pv->context = avcodec_alloc_context3(codec);
avcodec_copy_context( pv->context, ic->streams[pv->title->video_id]->codec);
pv->context->workaround_bugs = FF_BUG_AUTODETECT;
pv->context->err_recognition = AV_EF_CRCCHECK;
pv->context->error_concealment = FF_EC_GUESS_MVS|FF_EC_DEBLOCK;
#ifdef USE_HWD
// QSV decoding is faster, so prefer it to DXVA2
if (pv->job != NULL && !pv->qsv.decode && pv->job->use_hwd &&
hb_use_dxva(pv->title))
{
pv->dxva2 = hb_va_create_dxva2( pv->dxva2, w->codec_param );
if( pv->dxva2 && pv->dxva2->do_job == HB_WORK_OK )
{
hb_va_new_dxva2( pv->dxva2, pv->context );
pv->context->slice_flags |= SLICE_FLAG_ALLOW_FIELD;
pv->context->opaque = pv;
pv->context->get_buffer = get_frame_buf_hwd;
pv->context->release_buffer = hb_ffmpeg_release_frame_buf;
pv->context->get_format = hb_ffmpeg_get_format;
pv->opencl_scale = ( hb_oclscale_t * )malloc( sizeof( hb_oclscale_t ) );
memset( pv->opencl_scale, 0, sizeof( hb_oclscale_t ) );
pv->threads = 1;
}
}
#endif
#ifdef USE_QSV
if (pv->qsv.decode)
{
#ifdef USE_QSV_PTS_WORKAROUND
pv->qsv.pts_list = hb_list_init();
#endif
// set the QSV configuration before opening the decoder
pv->context->hwaccel_context = &pv->qsv.config;
}
#endif
// Set encoder opts...
AVDictionary * av_opts = NULL;
av_dict_set( &av_opts, "refcounted_frames", "1", 0 );
if (pv->title->flags & HBTF_NO_IDR)
{
av_dict_set( &av_opts, "flags", "output_corrupt", 0 );
}
if ( hb_avcodec_open( pv->context, codec, &av_opts, pv->threads ) )
{
av_dict_free( &av_opts );
hb_log( "decavcodecvInit: avcodec_open failed" );
return 1;
}
av_dict_free( &av_opts );
pv->video_codec_opened = 1;
// avi, mkv and possibly mp4 containers can contain the M$ VFW packed
// b-frames abortion that messes up frame ordering and timestamps.
// XXX ffmpeg knows which streams are broken but doesn't expose the
// info externally. We should patch ffmpeg to add a flag to the
// codec context for this but until then we mark all ffmpeg streams
// as suspicious.
pv->brokenTS = 1;
}
else
{
pv->parser = av_parser_init( w->codec_param );
}
pv->frame = av_frame_alloc();
if (pv->frame == NULL)
{
hb_log("decavcodecvInit: av_frame_alloc failed");
return 1;
}
/*
* If not scanning, then are we supposed to extract Closed Captions
* and send them to the decoder?
*/
if (job != NULL && hb_list_count(job->list_subtitle) > 0)
{
hb_subtitle_t *subtitle;
int i = 0;
while ((subtitle = hb_list_item(job->list_subtitle, i++)) != NULL)
{
if (subtitle->source == CC608SUB)
{
if (pv->list_subtitle == NULL)
{
pv->list_subtitle = hb_list_init();
}
hb_list_add(pv->list_subtitle, subtitle);
}
}
}
return 0;
}
static int setup_extradata( hb_work_object_t *w, hb_buffer_t *in )
{
hb_work_private_t *pv = w->private_data;
// we can't call the avstream funcs but the read_header func in the
// AVInputFormat may set up some state in the AVContext. In particular
// vc1t_read_header allocates 'extradata' to deal with header issues
// related to Microsoft's bizarre engineering notions. We alloc a chunk
// of space to make vc1 work then associate the codec with the context.
if (pv->context->extradata == NULL)
{
if (pv->parser == NULL || pv->parser == NULL ||
pv->parser->parser->split == NULL)
{
return 0;
}
else
{
int size;
size = pv->parser->parser->split(pv->context, in->data, in->size);
if (size > 0)
{
pv->context->extradata_size = size;
pv->context->extradata =
av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
if (pv->context->extradata == NULL)
return 1;
memcpy(pv->context->extradata, in->data, size);
return 0;
}
}
return 1;
}
return 0;
}
static int decavcodecvWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t *pv = w->private_data;
hb_buffer_t *in = *buf_in;
int64_t pts = AV_NOPTS_VALUE;
int64_t dts = pts;
*buf_in = NULL;
*buf_out = NULL;
/* if we got an empty buffer signaling end-of-stream send it downstream */
if ( in->size == 0 )
{
if (pv->context != NULL && pv->context->codec != NULL)
{
decodeVideo( w, in->data, in->size, in->sequence, pts, dts, in->s.frametype );
}
hb_list_add( pv->list, in );
*buf_out = link_buf_list( pv );
return HB_WORK_DONE;
}
// if this is the first frame open the codec (we have to wait for the
// first frame because of M$ VC1 braindamage).
if ( !pv->video_codec_opened )
{
AVCodec *codec = NULL;
#ifdef USE_QSV
if (pv->qsv.decode)
{
codec = avcodec_find_decoder_by_name(pv->qsv.codec_name);
}
else
#endif
{
codec = avcodec_find_decoder(w->codec_param);
}
if ( codec == NULL )
{
hb_log( "decavcodecvWork: failed to find codec for id (%d)", w->codec_param );
*buf_out = hb_buffer_init( 0 );;
return HB_WORK_DONE;
}
pv->context = avcodec_alloc_context3( codec );
pv->context->workaround_bugs = FF_BUG_AUTODETECT;
pv->context->err_recognition = AV_EF_CRCCHECK;
pv->context->error_concealment = FF_EC_GUESS_MVS|FF_EC_DEBLOCK;
if ( setup_extradata( w, in ) )
{
// we didn't find the headers needed to set up extradata.
// the codec will abort if we open it so just free the buf
// and hope we eventually get the info we need.
hb_buffer_close( &in );
return HB_WORK_OK;
}
#ifdef USE_QSV
if (pv->qsv.decode)
{
#ifdef USE_QSV_PTS_WORKAROUND
pv->qsv.pts_list = hb_list_init();
#endif
// set the QSV configuration before opening the decoder
pv->context->hwaccel_context = &pv->qsv.config;
}
#endif
AVDictionary * av_opts = NULL;
av_dict_set( &av_opts, "refcounted_frames", "1", 0 );
if (pv->title->flags & HBTF_NO_IDR)
{
av_dict_set( &av_opts, "flags", "output_corrupt", 0 );
}
// disable threaded decoding for scan, can cause crashes
if ( hb_avcodec_open( pv->context, codec, &av_opts, pv->threads ) )
{
av_dict_free( &av_opts );
hb_log( "decavcodecvWork: avcodec_open failed" );
*buf_out = hb_buffer_init( 0 );;
return HB_WORK_DONE;
}
av_dict_free( &av_opts );
pv->video_codec_opened = 1;
}
if( in->s.start >= 0 )
{
pts = in->s.start;
dts = in->s.renderOffset;
}
if ( in->s.new_chap )
{
pv->new_chap = in->s.new_chap;
pv->chap_time = pts >= 0? pts : pv->pts_next;
}
#ifdef USE_HWD
if( pv->dxva2 && pv->dxva2->do_job == HB_WORK_OK )
{
if( pv->dxva2->input_pts[0] <= pv->dxva2->input_pts[1] )
pv->dxva2->input_pts[0] = pts;
else if( pv->dxva2->input_pts[0] > pv->dxva2->input_pts[1] )
pv->dxva2->input_pts[1] = pts;
pv->dxva2->input_dts = dts;
}
#endif
if (in->palette != NULL)
{
pv->palette = in->palette;
in->palette = NULL;
}
decodeVideo( w, in->data, in->size, in->sequence, pts, dts, in->s.frametype );
hb_buffer_close( &in );
*buf_out = link_buf_list( pv );
return HB_WORK_OK;
}
static void compute_frame_duration( hb_work_private_t *pv )
{
double duration = 0.;
int64_t max_fps = 64L;
// context->time_base may be in fields, so set the max *fields* per second
if ( pv->context->ticks_per_frame > 1 )
max_fps *= pv->context->ticks_per_frame;
if ( pv->title->opaque_priv )
{
// If ffmpeg is demuxing for us, it collects some additional
// information about framerates that is often more accurate
// than context->time_base.
AVFormatContext *ic = (AVFormatContext*)pv->title->opaque_priv;
AVStream *st = ic->streams[pv->title->video_id];
if ( st->nb_frames && st->duration )
{
// compute the average frame duration from the total number
// of frames & the total duration.
duration = ( (double)st->duration * (double)st->time_base.num ) /
( (double)st->nb_frames * (double)st->time_base.den );
}
// Raw demuxers set a default fps of 25 and do not parse
// a value from the container. So use the codec time_base
// for raw demuxers.
else if (ic->iformat->raw_codec_id == AV_CODEC_ID_NONE)
{
// XXX We don't have a frame count or duration so try to use the
// far less reliable time base info in the stream.
// Because the time bases are so screwed up, we only take values
// in the range 8fps - 64fps.
AVRational *tb = NULL;
if ( st->avg_frame_rate.den * 64L > st->avg_frame_rate.num &&
st->avg_frame_rate.num > st->avg_frame_rate.den * 8L )
{
tb = &(st->avg_frame_rate);
duration = (double)tb->den / (double)tb->num;
}
else if ( st->time_base.num * 64L > st->time_base.den &&
st->time_base.den > st->time_base.num * 8L )
{
tb = &(st->time_base);
duration = (double)tb->num / (double)tb->den;
}
}
if ( !duration &&
pv->context->time_base.num * max_fps > pv->context->time_base.den &&
pv->context->time_base.den > pv->context->time_base.num * 8L )
{
duration = (double)pv->context->time_base.num /
(double)pv->context->time_base.den;
if ( pv->context->ticks_per_frame > 1 )
{
// for ffmpeg 0.5 & later, the H.264 & MPEG-2 time base is
// field rate rather than frame rate so convert back to frames.
duration *= pv->context->ticks_per_frame;
}
}
}
else
{
if ( pv->context->time_base.num * max_fps > pv->context->time_base.den &&
pv->context->time_base.den > pv->context->time_base.num * 8L )
{
duration = (double)pv->context->time_base.num /
(double)pv->context->time_base.den;
if ( pv->context->ticks_per_frame > 1 )
{
// for ffmpeg 0.5 & later, the H.264 & MPEG-2 time base is
// field rate rather than frame rate so convert back to frames.
duration *= pv->context->ticks_per_frame;
}
}
}
if ( duration == 0 )
{
// No valid timing info found in the stream, so pick some value
duration = 1001. / 24000.;
}
else
{
pv->frame_duration_set = 1;
}
pv->duration = duration * 90000.;
pv->field_duration = pv->duration;
if ( pv->context->ticks_per_frame > 1 )
{
pv->field_duration /= pv->context->ticks_per_frame;
}
}
static int decavcodecvInfo( hb_work_object_t *w, hb_work_info_t *info )
{
hb_work_private_t *pv = w->private_data;
memset( info, 0, sizeof(*info) );
if (pv->context == NULL)
return 0;
info->bitrate = pv->context->bit_rate;
// HandBrake's video pipeline uses yuv420 color. This means all
// dimensions must be even. So we must adjust the dimensions
// of incoming video if not even.
info->width = pv->context->width & ~1;
info->height = pv->context->height & ~1;
info->pixel_aspect_width = pv->context->sample_aspect_ratio.num;
info->pixel_aspect_height = pv->context->sample_aspect_ratio.den;
compute_frame_duration( pv );
info->rate = 27000000;
info->rate_base = pv->duration * 300.;
info->profile = pv->context->profile;
info->level = pv->context->level;
info->name = pv->context->codec->name;
switch( pv->context->color_primaries )
{
case AVCOL_PRI_BT709:
info->color_prim = HB_COLR_PRI_BT709;
break;
case AVCOL_PRI_BT470BG:
info->color_prim = HB_COLR_PRI_EBUTECH;
break;
case AVCOL_PRI_BT470M:
case AVCOL_PRI_SMPTE170M:
case AVCOL_PRI_SMPTE240M:
info->color_prim = HB_COLR_PRI_SMPTEC;
break;
default:
{
if( ( info->width >= 1280 || info->height >= 720 ) ||
( info->width > 720 && info->height > 576 ) )
// ITU BT.709 HD content
info->color_prim = HB_COLR_PRI_BT709;
else if( info->rate_base == 1080000 )
// ITU BT.601 DVD or SD TV content (PAL)
info->color_prim = HB_COLR_PRI_EBUTECH;
else
// ITU BT.601 DVD or SD TV content (NTSC)
info->color_prim = HB_COLR_PRI_SMPTEC;
break;
}
}
switch( pv->context->color_trc )
{
case AVCOL_TRC_SMPTE240M:
info->color_transfer = HB_COLR_TRA_SMPTE240M;
break;
default:
// ITU BT.601, BT.709, anything else
info->color_transfer = HB_COLR_TRA_BT709;
break;
}
switch( pv->context->colorspace )
{
case AVCOL_SPC_BT709:
info->color_matrix = HB_COLR_MAT_BT709;
break;
case AVCOL_SPC_FCC:
case AVCOL_SPC_BT470BG:
case AVCOL_SPC_SMPTE170M:
case AVCOL_SPC_RGB: // libswscale rgb2yuv
info->color_matrix = HB_COLR_MAT_SMPTE170M;
break;
case AVCOL_SPC_SMPTE240M:
info->color_matrix = HB_COLR_MAT_SMPTE240M;
break;
default:
{
if( ( info->width >= 1280 || info->height >= 720 ) ||
( info->width > 720 && info->height > 576 ) )
// ITU BT.709 HD content
info->color_matrix = HB_COLR_MAT_BT709;
else
// ITU BT.601 DVD or SD TV content (PAL)
// ITU BT.601 DVD or SD TV content (NTSC)
info->color_matrix = HB_COLR_MAT_SMPTE170M;
break;
}
}
info->video_decode_support = HB_DECODE_SUPPORT_SW;
switch (pv->context->codec_id)
{
case AV_CODEC_ID_H264:
if (pv->context->pix_fmt == AV_PIX_FMT_YUV420P ||
pv->context->pix_fmt == AV_PIX_FMT_YUVJ420P)
{
#ifdef USE_QSV
info->video_decode_support |= HB_DECODE_SUPPORT_QSV;
#endif
}
break;
default:
break;
}
return 1;
}
static int decavcodecvBSInfo( hb_work_object_t *w, const hb_buffer_t *buf,
hb_work_info_t *info )
{
return 0;
}
static void decavcodecvFlush( hb_work_object_t *w )
{
hb_work_private_t *pv = w->private_data;
if (pv->context != NULL && pv->context->codec != NULL)
{
flushDelayQueue( pv );
hb_buffer_t *buf = link_buf_list( pv );
hb_buffer_close( &buf );
if ( pv->title->opaque_priv == NULL )
{
pv->video_codec_opened = 0;
hb_avcodec_close( pv->context );
av_freep( &pv->context->extradata );
av_freep( &pv->context );
if ( pv->parser )
{
av_parser_close(pv->parser);
}
pv->parser = av_parser_init( w->codec_param );
}
else
{
avcodec_flush_buffers( pv->context );
}
}
pv->wait_for_keyframe = 60;
}
hb_work_object_t hb_decavcodecv =
{
.id = WORK_DECAVCODECV,
.name = "Video decoder (libavcodec)",
.init = decavcodecvInit,
.work = decavcodecvWork,
.close = decavcodecClose,
.flush = decavcodecvFlush,
.info = decavcodecvInfo,
.bsinfo = decavcodecvBSInfo
};
static void decodeAudio(hb_audio_t *audio, hb_work_private_t *pv, uint8_t *data,
int size, int64_t pts)
{
AVCodecContext *context = pv->context;
int loop_limit = 256;
int pos = 0;
// If we are given a pts, use it; but don't lose partial ticks.
if (pts != AV_NOPTS_VALUE && (int64_t)pv->pts_next != pts)
pv->pts_next = pts;
while (pos < size)
{
int got_frame;
AVPacket avp;
av_init_packet(&avp);
avp.data = data + pos;
avp.size = size - pos;
avp.pts = pv->pts_next;
avp.dts = AV_NOPTS_VALUE;
int len = avcodec_decode_audio4(context, pv->frame, &got_frame, &avp);
if ((len < 0) || (!got_frame && !(loop_limit--)))
{
return;
}
else
{
loop_limit = 256;
}
pos += len;
if (got_frame)
{
hb_buffer_t *out;
int samplerate;
// libavcoded doesn't yet consistently set frame->sample_rate
if (pv->frame->sample_rate != 0)
{
samplerate = pv->frame->sample_rate;
}
else
{
samplerate = context->sample_rate;
}
double duration = (90000. * pv->frame->nb_samples /
(double)samplerate);
if (audio->config.out.codec & HB_ACODEC_PASS_FLAG)
{
// Note that even though we are doing passthru, we had to decode
// so that we know the stop time and the pts of the next audio
// packet.
out = hb_buffer_init(avp.size);
memcpy(out->data, avp.data, avp.size);
}
else
{
AVFrameSideData *side_data;
if ((side_data =
av_frame_get_side_data(pv->frame,
AV_FRAME_DATA_DOWNMIX_INFO)) != NULL)
{
double surround_mix_level, center_mix_level;
AVDownmixInfo *downmix_info = (AVDownmixInfo*)side_data->data;
if (audio->config.out.mixdown == HB_AMIXDOWN_DOLBY ||
audio->config.out.mixdown == HB_AMIXDOWN_DOLBYPLII)
{
surround_mix_level = downmix_info->surround_mix_level_ltrt;
center_mix_level = downmix_info->center_mix_level_ltrt;
}
else
{
surround_mix_level = downmix_info->surround_mix_level;
center_mix_level = downmix_info->center_mix_level;
}
hb_audio_resample_set_mix_levels(pv->resample,
surround_mix_level,
center_mix_level,
downmix_info->lfe_mix_level);
}
hb_audio_resample_set_channel_layout(pv->resample,
pv->frame->channel_layout);
hb_audio_resample_set_sample_fmt(pv->resample,
pv->frame->format);
if (hb_audio_resample_update(pv->resample))
{
hb_log("decavcodec: hb_audio_resample_update() failed");
av_frame_unref(pv->frame);
return;
}
out = hb_audio_resample(pv->resample, pv->frame->extended_data,
pv->frame->nb_samples);
}
av_frame_unref(pv->frame);
if (out != NULL)
{
out->s.start = pv->pts_next;
out->s.duration = duration;
out->s.stop = duration + pv->pts_next;
pv->pts_next = duration + pv->pts_next;
hb_list_add(pv->list, out);
}
}
}
}
HandBrake-0.10.2/libhb/fifo.c 0000664 0001752 0001752 00000064541 12463330511 016216 0 ustar handbrake handbrake /* fifo.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "openclwrapper.h"
#ifndef SYS_DARWIN
#include
#endif
#define FIFO_TIMEOUT 200
//#define HB_FIFO_DEBUG 1
//#define HB_BUFFER_DEBUG 1
/* Fifo */
struct hb_fifo_s
{
hb_lock_t * lock;
hb_cond_t * cond_full;
int wait_full;
hb_cond_t * cond_empty;
int wait_empty;
uint32_t capacity;
uint32_t thresh;
uint32_t size;
uint32_t buffer_size;
hb_buffer_t * first;
hb_buffer_t * last;
#if defined(HB_FIFO_DEBUG)
// Fifo list for debugging
hb_fifo_t * next;
#endif
};
#if defined(HB_FIFO_DEBUG)
static hb_fifo_t fifo_list =
{
.next = NULL
};
#endif
/* we round the requested buffer size up to the next power of 2 so there can
* be at most 32 possible pools when the size is a 32 bit int. To avoid a lot
* of slow & error-prone run-time checking we allow for all 32. */
#define MAX_BUFFER_POOLS 32
#define BUFFER_POOL_FIRST 10
#define BUFFER_POOL_LAST 25
/* the buffer pool only exists to avoid the two malloc and two free calls that
* it would otherwise take to allocate & free a buffer. but we don't want to
* tie up a lot of memory in the pool because this allocator isn't as general
* as malloc so memory tied up here puts more pressure on the malloc pool.
* A pool of 16 elements will avoid 94% of the malloc/free calls without wasting
* too much memory. */
#define BUFFER_POOL_MAX_ELEMENTS 32
struct hb_buffer_pools_s
{
int64_t allocated;
hb_lock_t *lock;
hb_fifo_t *pool[MAX_BUFFER_POOLS];
#if defined(HB_BUFFER_DEBUG)
hb_list_t *alloc_list;
#endif
} buffers;
void hb_buffer_pool_init( void )
{
buffers.lock = hb_lock_init();
buffers.allocated = 0;
#if defined(HB_BUFFER_DEBUG)
buffers.alloc_list = hb_list_init();
#endif
/* we allocate pools for sizes 2^10 through 2^25. requests larger than
* 2^25 will get passed through to malloc. */
int i;
// Create larger queue for 2^10 bucket since all allocations smaller than
// 2^10 come from here.
buffers.pool[BUFFER_POOL_FIRST] = hb_fifo_init(BUFFER_POOL_MAX_ELEMENTS*10, 1);
buffers.pool[BUFFER_POOL_FIRST]->buffer_size = 1 << 10;
/* requests smaller than 2^10 are satisfied from the 2^10 pool. */
for ( i = 1; i < BUFFER_POOL_FIRST; ++i )
{
buffers.pool[i] = buffers.pool[BUFFER_POOL_FIRST];
}
for ( i = BUFFER_POOL_FIRST + 1; i <= BUFFER_POOL_LAST; ++i )
{
buffers.pool[i] = hb_fifo_init(BUFFER_POOL_MAX_ELEMENTS, 1);
buffers.pool[i]->buffer_size = 1 << i;
}
}
#if defined(HB_FIFO_DEBUG)
static void dump_fifo(hb_fifo_t * f)
{
hb_buffer_t * b = f->first;
if (b)
{
while (b)
{
fprintf(stderr, "%p:%d:%d\n", b, b->size, b->alloc);
b = b->next;
}
fprintf(stderr, "\n");
}
}
static void fifo_list_add( hb_fifo_t * f )
{
hb_fifo_t *next = fifo_list.next;
fifo_list.next = f;
f->next = next;
}
static void fifo_list_rem( hb_fifo_t * f )
{
hb_fifo_t *next, *prev;
prev = &fifo_list;
next = fifo_list.next;
while ( next && next != f )
{
prev = next;
next = next->next;
}
if ( next == f )
{
prev->next = f->next;
}
}
// These routines are useful for finding and debugging problems
// with the fifos and buffer pools
static void buffer_pool_validate( hb_fifo_t * f )
{
hb_buffer_t *b;
hb_lock( f->lock );
b = f->first;
while (b)
{
if (b->alloc != f->buffer_size)
{
fprintf(stderr, "Invalid buffer pool size! buf %p size %d pool size %d\n", b, b->alloc, f->buffer_size);
dump_fifo( f );
*(char*)0 = 1;
}
b = b->next;
}
hb_unlock( f->lock );
}
static void buffer_pools_validate( void )
{
int ii;
for ( ii = BUFFER_POOL_FIRST; ii <= BUFFER_POOL_LAST; ++ii )
{
buffer_pool_validate( buffers.pool[ii] );
}
}
void fifo_list_validate( void )
{
hb_fifo_t *next = fifo_list.next;
hb_fifo_t *m;
hb_buffer_t *b, *c;
int count;
buffer_pools_validate();
while ( next )
{
count = 0;
hb_lock( next->lock );
b = next->first;
// Count the number of entries in this fifo
while (b)
{
c = b->next;
// check that the current buffer is not duplicated in this fifo
while (c)
{
if (c == b)
{
fprintf(stderr, "Duplicate buffer in fifo!\n");
dump_fifo(next);
*(char*)0 = 1;
}
c = c->next;
}
// check that the current buffer is not duplicated in another fifo
m = next->next;
while (m)
{
hb_lock( m->lock );
c = m->first;
while (c)
{
if (c == b)
{
fprintf(stderr, "Duplicate buffer in another fifo!\n");
dump_fifo(next);
*(char*)0 = 1;
}
c = c->next;
}
hb_unlock( m->lock );
m = m->next;
}
count++;
b = b->next;
}
if ( count != next->size )
{
fprintf(stderr, "Invalid fifo size! count %d size %d\n", count, next->size);
dump_fifo(next);
*(char*)0 = 1;
}
hb_unlock( next->lock );
next = next->next;
}
}
#endif
void hb_buffer_pool_free( void )
{
int i;
int count;
int64_t freed = 0;
hb_buffer_t *b;
hb_lock(buffers.lock);
#if defined(HB_BUFFER_DEBUG)
hb_deep_log(2, "leaked %d buffers", hb_list_count(buffers.alloc_list));
for (i = 0; i < hb_list_count(buffers.alloc_list); i++)
{
hb_buffer_t *b = hb_list_item(buffers.alloc_list, i);
hb_deep_log(2, "leaked buffer %p type %d size %d alloc %d",
b, b->s.type, b->size, b->alloc);
}
#endif
for( i = BUFFER_POOL_FIRST; i <= BUFFER_POOL_LAST; ++i)
{
count = 0;
while( ( b = hb_fifo_get(buffers.pool[i]) ) )
{
if( b->data )
{
freed += b->alloc;
if (b->cl.buffer != NULL)
{
/* OpenCL */
if (hb_cl_free_mapped_buffer(b->cl.buffer, b->data) == 0)
{
hb_log("hb_buffer_pool_free: bad free: %p -> buffer %p map %p",
b, b->cl.buffer, b->data);
}
}
else
{
free(b->data);
}
}
free( b );
count++;
}
if ( count )
{
hb_deep_log( 2, "Freed %d buffers of size %d", count,
buffers.pool[i]->buffer_size);
}
}
hb_deep_log( 2, "Allocated %"PRId64" bytes of buffers on this pass and Freed %"PRId64" bytes, "
"%"PRId64" bytes leaked", buffers.allocated, freed, buffers.allocated - freed);
buffers.allocated = 0;
hb_unlock(buffers.lock);
}
static hb_fifo_t *size_to_pool( int size )
{
int i;
for ( i = BUFFER_POOL_FIRST; i <= BUFFER_POOL_LAST; ++i )
{
if ( size <= (1 << i) )
{
return buffers.pool[i];
}
}
return NULL;
}
hb_buffer_t * hb_buffer_init_internal( int size , int needsMapped )
{
hb_buffer_t * b;
// Certain libraries (hrm ffmpeg) expect buffers passed to them to
// end on certain alignments (ffmpeg is 8). So allocate some extra bytes.
// Note that we can't simply align the end of our buffer because
// sometimes we feed data to these libraries starting from arbitrary
// points within the buffer.
int alloc = size + 16;
hb_fifo_t *buffer_pool = size_to_pool( alloc );
if( buffer_pool )
{
b = hb_fifo_get( buffer_pool );
/* OpenCL */
if (b != NULL && needsMapped && b->cl.buffer == NULL)
{
// We need a mapped OpenCL buffer and that is not
// what we got out of the pool.
// Ditch it; it will get replaced with what we need.
if (b->data != NULL)
{
free(b->data);
}
free(b);
b = NULL;
}
if( b )
{
/*
* Zero the contents of the buffer, would be nice if we
* didn't have to do this.
*/
uint8_t *data = b->data;
/* OpenCL */
cl_mem buffer = b->cl.buffer;
cl_event last_event = b->cl.last_event;
int loc = b->cl.buffer_location;
memset( b, 0, sizeof(hb_buffer_t) );
b->alloc = buffer_pool->buffer_size;
b->size = size;
b->data = data;
b->s.start = AV_NOPTS_VALUE;
b->s.stop = AV_NOPTS_VALUE;
b->s.renderOffset = AV_NOPTS_VALUE;
/* OpenCL */
b->cl.buffer = buffer;
b->cl.last_event = last_event;
b->cl.buffer_location = loc;
#if defined(HB_BUFFER_DEBUG)
hb_lock(buffers.lock);
hb_list_add(buffers.alloc_list, b);
hb_unlock(buffers.lock);
#endif
return( b );
}
}
/*
* No existing buffers, create a new one
*/
if( !( b = calloc( sizeof( hb_buffer_t ), 1 ) ) )
{
hb_log( "out of memory" );
return NULL;
}
b->size = size;
b->alloc = buffer_pool ? buffer_pool->buffer_size : alloc;
if (size)
{
/* OpenCL */
b->cl.last_event = NULL;
b->cl.buffer_location = HOST;
/* OpenCL */
if (needsMapped)
{
int status = hb_cl_create_mapped_buffer(&b->cl.buffer, &b->data, b->alloc);
if (!status)
{
hb_error("Failed to map CL buffer");
free(b);
return NULL;
}
}
else
{
b->cl.buffer = NULL;
#if defined( SYS_DARWIN ) || defined( SYS_FREEBSD ) || defined( SYS_MINGW )
b->data = malloc( b->alloc );
#elif defined( SYS_CYGWIN )
/* FIXME */
b->data = malloc( b->alloc + 17 );
#else
b->data = memalign( 16, b->alloc );
#endif
}
if( !b->data )
{
hb_log( "out of memory" );
free( b );
return NULL;
}
hb_lock(buffers.lock);
buffers.allocated += b->alloc;
hb_unlock(buffers.lock);
}
b->s.start = AV_NOPTS_VALUE;
b->s.stop = AV_NOPTS_VALUE;
b->s.renderOffset = AV_NOPTS_VALUE;
#if defined(HB_BUFFER_DEBUG)
hb_lock(buffers.lock);
hb_list_add(buffers.alloc_list, b);
hb_unlock(buffers.lock);
#endif
return b;
}
hb_buffer_t * hb_buffer_init( int size )
{
return hb_buffer_init_internal(size, 0);
}
void hb_buffer_realloc( hb_buffer_t * b, int size )
{
if ( size > b->alloc || b->data == NULL )
{
uint32_t orig = b->data != NULL ? b->alloc : 0;
size = size_to_pool( size )->buffer_size;
b->data = realloc( b->data, size );
b->alloc = size;
hb_lock(buffers.lock);
buffers.allocated += size - orig;
hb_unlock(buffers.lock);
}
}
void hb_buffer_reduce( hb_buffer_t * b, int size )
{
if ( size < b->alloc / 8 || b->data == NULL )
{
hb_buffer_t * tmp = hb_buffer_init( size );
hb_buffer_swap_copy( b, tmp );
memcpy( b->data, tmp->data, size );
tmp->next = NULL;
hb_buffer_close( &tmp );
}
}
hb_buffer_t * hb_buffer_dup( const hb_buffer_t * src )
{
hb_buffer_t * buf;
if ( src == NULL )
return NULL;
buf = hb_buffer_init( src->size );
if ( buf )
{
memcpy( buf->data, src->data, src->size );
buf->s = src->s;
buf->f = src->f;
if ( buf->s.type == FRAME_BUF )
hb_buffer_init_planes( buf );
}
#ifdef USE_QSV
memcpy(&buf->qsv_details, &src->qsv_details, sizeof(src->qsv_details));
#endif
return buf;
}
int hb_buffer_copy(hb_buffer_t * dst, const hb_buffer_t * src)
{
if (src == NULL || dst == NULL)
return -1;
if ( dst->size < src->size )
return -1;
memcpy( dst->data, src->data, src->size );
dst->s = src->s;
dst->f = src->f;
if (dst->s.type == FRAME_BUF)
hb_buffer_init_planes(dst);
return 0;
}
static void hb_buffer_init_planes_internal( hb_buffer_t * b, uint8_t * has_plane )
{
uint8_t * plane = b->data;
int p;
for( p = 0; p < 4; p++ )
{
if ( has_plane[p] )
{
b->plane[p].data = plane;
b->plane[p].stride = hb_image_stride( b->f.fmt, b->f.width, p );
b->plane[p].height_stride = hb_image_height_stride( b->f.fmt, b->f.height, p );
b->plane[p].width = hb_image_width( b->f.fmt, b->f.width, p );
b->plane[p].height = hb_image_height( b->f.fmt, b->f.height, p );
b->plane[p].size = b->plane[p].stride * b->plane[p].height_stride;
plane += b->plane[p].size;
}
}
}
void hb_buffer_init_planes( hb_buffer_t * b )
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(b->f.fmt);
int p;
uint8_t has_plane[4] = {0,};
for( p = 0; p < 4; p++ )
{
has_plane[desc->comp[p].plane] = 1;
}
hb_buffer_init_planes_internal( b, has_plane );
}
// this routine gets a buffer for an uncompressed picture
// with pixel format pix_fmt and dimensions width x height.
hb_buffer_t * hb_frame_buffer_init( int pix_fmt, int width, int height )
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
hb_buffer_t * buf;
int p;
uint8_t has_plane[4] = {0,};
for( p = 0; p < 4; p++ )
{
has_plane[desc->comp[p].plane] = 1;
}
int size = 0;
for( p = 0; p < 4; p++ )
{
if ( has_plane[p] )
{
size += hb_image_stride( pix_fmt, width, p ) *
hb_image_height_stride( pix_fmt, height, p );
}
}
/* OpenCL */
buf = hb_buffer_init_internal(size , hb_use_buffers());
if( buf == NULL )
return NULL;
buf->s.type = FRAME_BUF;
buf->f.width = width;
buf->f.height = height;
buf->f.fmt = pix_fmt;
hb_buffer_init_planes_internal( buf, has_plane );
return buf;
}
// this routine reallocs a buffer for an uncompressed YUV420 video frame
// with dimensions width x height.
void hb_video_buffer_realloc( hb_buffer_t * buf, int width, int height )
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(buf->f.fmt);
int p;
uint8_t has_plane[4] = {0,};
for( p = 0; p < 4; p++ )
{
has_plane[desc->comp[p].plane] = 1;
}
int size = 0;
for( p = 0; p < 4; p++ )
{
if ( has_plane[p] )
{
size += hb_image_stride( buf->f.fmt, width, p ) *
hb_image_height_stride( buf->f.fmt, height, p );
}
}
hb_buffer_realloc(buf, size );
buf->f.width = width;
buf->f.height = height;
buf->size = size;
hb_buffer_init_planes_internal( buf, has_plane );
}
// this routine 'moves' data from src to dst by interchanging 'data',
// 'size' & 'alloc' between them and copying the rest of the fields
// from src to dst.
void hb_buffer_swap_copy( hb_buffer_t *src, hb_buffer_t *dst )
{
uint8_t *data = dst->data;
int size = dst->size;
int alloc = dst->alloc;
/* OpenCL */
cl_mem buffer = dst->cl.buffer;
cl_event last_event = dst->cl.last_event;
int loc = dst->cl.buffer_location;
*dst = *src;
src->data = data;
src->size = size;
src->alloc = alloc;
/* OpenCL */
src->cl.buffer = buffer;
src->cl.last_event = last_event;
src->cl.buffer_location = loc;
}
// Frees the specified buffer list.
void hb_buffer_close( hb_buffer_t ** _b )
{
hb_buffer_t * b = *_b;
while( b )
{
hb_buffer_t * next = b->next;
hb_fifo_t *buffer_pool = size_to_pool( b->alloc );
b->next = NULL;
#if defined(HB_BUFFER_DEBUG)
hb_lock(buffers.lock);
hb_list_rem(buffers.alloc_list, b);
hb_unlock(buffers.lock);
#endif
// Close any attached subtitle buffers
hb_buffer_close( &b->sub );
if( buffer_pool && b->data && !hb_fifo_is_full( buffer_pool ) )
{
hb_fifo_push_head( buffer_pool, b );
b = next;
continue;
}
// either the pool is full or this size doesn't use a pool
// free the buf
if( b->data )
{
if (b->cl.buffer != NULL)
{
/* OpenCL */
if (hb_cl_free_mapped_buffer(b->cl.buffer, b->data) == 0)
{
hb_log("hb_buffer_pool_free: bad free %p -> buffer %p map %p",
b, b->cl.buffer, b->data);
}
}
else
{
free(b->data);
}
hb_lock(buffers.lock);
buffers.allocated -= b->alloc;
hb_unlock(buffers.lock);
}
free( b );
b = next;
}
*_b = NULL;
}
void hb_buffer_move_subs( hb_buffer_t * dst, hb_buffer_t * src )
{
// Note that dst takes ownership of the subtitles
dst->sub = src->sub;
src->sub = NULL;
#ifdef USE_QSV
memcpy(&dst->qsv_details, &src->qsv_details, sizeof(src->qsv_details));
#endif
}
hb_image_t * hb_buffer_to_image(hb_buffer_t *buf)
{
hb_image_t *image = calloc(1, sizeof(hb_image_t));
#if defined( SYS_DARWIN ) || defined( SYS_FREEBSD ) || defined( SYS_MINGW )
image->data = malloc( buf->size );
#elif defined( SYS_CYGWIN )
/* FIXME */
image->data = malloc( buf->size + 17 );
#else
image->data = memalign( 16, buf->size );
#endif
if (image->data == NULL)
{
free(image);
return NULL;
}
image->format = buf->f.fmt;
image->width = buf->f.width;
image->height = buf->f.height;
memcpy(image->data, buf->data, buf->size);
int p;
uint8_t *data = image->data;
for (p = 0; p < 4; p++)
{
image->plane[p].data = data;
image->plane[p].width = buf->plane[p].width;
image->plane[p].height = buf->plane[p].height;
image->plane[p].stride = buf->plane[p].stride;
image->plane[p].height_stride = buf->plane[p].height_stride;
image->plane[p].size = buf->plane[p].size;
data += image->plane[p].size;
}
return image;
}
void hb_image_close(hb_image_t **_image)
{
if (_image == NULL)
return;
hb_image_t * image = *_image;
if (image != NULL)
{
free(image->data);
free(image);
*_image = NULL;
}
}
hb_fifo_t * hb_fifo_init( int capacity, int thresh )
{
hb_fifo_t * f;
f = calloc( sizeof( hb_fifo_t ), 1 );
f->lock = hb_lock_init();
f->cond_full = hb_cond_init();
f->cond_empty = hb_cond_init();
f->capacity = capacity;
f->thresh = thresh;
f->buffer_size = 0;
#if defined(HB_FIFO_DEBUG)
// Add the fifo to the global fifo list
fifo_list_add( f );
#endif
return f;
}
int hb_fifo_size_bytes( hb_fifo_t * f )
{
int ret = 0;
hb_buffer_t * link;
hb_lock( f->lock );
link = f->first;
while ( link )
{
ret += link->size;
link = link->next;
}
hb_unlock( f->lock );
return ret;
}
int hb_fifo_size( hb_fifo_t * f )
{
int ret;
hb_lock( f->lock );
ret = f->size;
hb_unlock( f->lock );
return ret;
}
int hb_fifo_is_full( hb_fifo_t * f )
{
int ret;
hb_lock( f->lock );
ret = ( f->size >= f->capacity );
hb_unlock( f->lock );
return ret;
}
float hb_fifo_percent_full( hb_fifo_t * f )
{
float ret;
hb_lock( f->lock );
ret = f->size / f->capacity;
hb_unlock( f->lock );
return ret;
}
// Pulls the first packet out of this FIFO, blocking until such a packet is available.
// Returns NULL if this FIFO has been closed or flushed.
hb_buffer_t * hb_fifo_get_wait( hb_fifo_t * f )
{
hb_buffer_t * b;
hb_lock( f->lock );
if( f->size < 1 )
{
f->wait_empty = 1;
hb_cond_timedwait( f->cond_empty, f->lock, FIFO_TIMEOUT );
if( f->size < 1 )
{
hb_unlock( f->lock );
return NULL;
}
}
b = f->first;
f->first = b->next;
b->next = NULL;
f->size -= 1;
if( f->wait_full && f->size == f->capacity - f->thresh )
{
f->wait_full = 0;
hb_cond_signal( f->cond_full );
}
hb_unlock( f->lock );
return b;
}
// Pulls a packet out of this FIFO, or returns NULL if no packet is available.
hb_buffer_t * hb_fifo_get( hb_fifo_t * f )
{
hb_buffer_t * b;
hb_lock( f->lock );
if( f->size < 1 )
{
hb_unlock( f->lock );
return NULL;
}
b = f->first;
f->first = b->next;
b->next = NULL;
f->size -= 1;
if( f->wait_full && f->size == f->capacity - f->thresh )
{
f->wait_full = 0;
hb_cond_signal( f->cond_full );
}
hb_unlock( f->lock );
return b;
}
hb_buffer_t * hb_fifo_see_wait( hb_fifo_t * f )
{
hb_buffer_t * b;
hb_lock( f->lock );
if( f->size < 1 )
{
f->wait_empty = 1;
hb_cond_timedwait( f->cond_empty, f->lock, FIFO_TIMEOUT );
if( f->size < 1 )
{
hb_unlock( f->lock );
return NULL;
}
}
b = f->first;
hb_unlock( f->lock );
return b;
}
// Returns the first packet in the specified FIFO.
// If the FIFO is empty, returns NULL.
hb_buffer_t * hb_fifo_see( hb_fifo_t * f )
{
hb_buffer_t * b;
hb_lock( f->lock );
if( f->size < 1 )
{
hb_unlock( f->lock );
return NULL;
}
b = f->first;
hb_unlock( f->lock );
return b;
}
hb_buffer_t * hb_fifo_see2( hb_fifo_t * f )
{
hb_buffer_t * b;
hb_lock( f->lock );
if( f->size < 2 )
{
hb_unlock( f->lock );
return NULL;
}
b = f->first->next;
hb_unlock( f->lock );
return b;
}
// Waits until the specified FIFO is no longer full or until FIFO_TIMEOUT milliseconds have elapsed.
// Returns whether the FIFO is non-full upon return.
int hb_fifo_full_wait( hb_fifo_t * f )
{
int result;
hb_lock( f->lock );
if( f->size >= f->capacity )
{
f->wait_full = 1;
hb_cond_timedwait( f->cond_full, f->lock, FIFO_TIMEOUT );
}
result = ( f->size < f->capacity );
hb_unlock( f->lock );
return result;
}
// Pushes the specified buffer onto the specified FIFO,
// blocking until the FIFO has space available.
void hb_fifo_push_wait( hb_fifo_t * f, hb_buffer_t * b )
{
if( !b )
{
return;
}
hb_lock( f->lock );
if( f->size >= f->capacity )
{
f->wait_full = 1;
hb_cond_timedwait( f->cond_full, f->lock, FIFO_TIMEOUT );
}
if( f->size > 0 )
{
f->last->next = b;
}
else
{
f->first = b;
}
f->last = b;
f->size += 1;
while( f->last->next )
{
f->size += 1;
f->last = f->last->next;
}
if( f->wait_empty && f->size >= 1 )
{
f->wait_empty = 0;
hb_cond_signal( f->cond_empty );
}
hb_unlock( f->lock );
}
// Appends the specified packet list to the end of the specified FIFO.
void hb_fifo_push( hb_fifo_t * f, hb_buffer_t * b )
{
if( !b )
{
return;
}
hb_lock( f->lock );
if( f->size > 0 )
{
f->last->next = b;
}
else
{
f->first = b;
}
f->last = b;
f->size += 1;
while( f->last->next )
{
f->size += 1;
f->last = f->last->next;
}
if( f->wait_empty && f->size >= 1 )
{
f->wait_empty = 0;
hb_cond_signal( f->cond_empty );
}
hb_unlock( f->lock );
}
// Prepends the specified packet list to the start of the specified FIFO.
void hb_fifo_push_head( hb_fifo_t * f, hb_buffer_t * b )
{
hb_buffer_t * tmp;
uint32_t size = 0;
if( !b )
{
return;
}
hb_lock( f->lock );
/*
* If there are a chain of buffers prepend the lot
*/
tmp = b;
while( tmp->next )
{
tmp = tmp->next;
size += 1;
}
if( f->size > 0 )
{
tmp->next = f->first;
}
else
{
f->last = tmp;
}
f->first = b;
f->size += ( size + 1 );
hb_unlock( f->lock );
}
// Pushes a list of packets onto the specified FIFO as a single element.
void hb_fifo_push_list_element( hb_fifo_t *fifo, hb_buffer_t *buffer_list )
{
hb_buffer_t *container = hb_buffer_init( 0 );
// XXX: Using an arbitrary hb_buffer_t pointer (other than 'next')
// to carry the list inside a single "container" buffer
container->sub = buffer_list;
hb_fifo_push( fifo, container );
}
// Removes a list of packets from the specified FIFO that were stored as a single element.
hb_buffer_t *hb_fifo_get_list_element( hb_fifo_t *fifo )
{
hb_buffer_t *container = hb_fifo_get( fifo );
// XXX: Using an arbitrary hb_buffer_t pointer (other than 'next')
// to carry the list inside a single "container" buffer
hb_buffer_t *buffer_list = container->sub;
hb_buffer_close( &container );
return buffer_list;
}
void hb_fifo_close( hb_fifo_t ** _f )
{
hb_fifo_t * f = *_f;
hb_buffer_t * b;
if ( f == NULL )
return;
hb_deep_log( 2, "fifo_close: trashing %d buffer(s)", hb_fifo_size( f ) );
while( ( b = hb_fifo_get( f ) ) )
{
hb_buffer_close( &b );
}
hb_lock_close( &f->lock );
hb_cond_close( &f->cond_empty );
hb_cond_close( &f->cond_full );
#if defined(HB_FIFO_DEBUG)
// Remove the fifo from the global fifo list
fifo_list_rem( f );
#endif
free( f );
*_f = NULL;
}
void hb_fifo_flush( hb_fifo_t * f )
{
hb_buffer_t * b;
while( ( b = hb_fifo_get( f ) ) )
{
hb_buffer_close( &b );
}
hb_lock( f->lock );
hb_cond_signal( f->cond_empty );
hb_cond_signal( f->cond_full );
hb_unlock( f->lock );
}
HandBrake-0.10.2/libhb/enclame.c 0000664 0001752 0001752 00000015050 12463330511 016666 0 ustar handbrake handbrake /* enclame.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "lame/lame.h"
int enclameInit( hb_work_object_t *, hb_job_t * );
int enclameWork( hb_work_object_t *, hb_buffer_t **, hb_buffer_t ** );
void enclameClose( hb_work_object_t * );
hb_work_object_t hb_enclame =
{
WORK_ENCLAME,
"MP3 encoder (libmp3lame)",
enclameInit,
enclameWork,
enclameClose
};
struct hb_work_private_s
{
hb_job_t * job;
/* LAME handle */
lame_global_flags * lame;
int out_discrete_channels;
unsigned long input_samples;
unsigned long output_bytes;
uint8_t * buf;
hb_list_t * list;
int64_t pts;
};
int enclameInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
hb_audio_t * audio = w->audio;
w->private_data = pv;
pv->job = job;
hb_log( "enclame: opening libmp3lame" );
pv->lame = lame_init();
// use ABR
lame_set_scale( pv->lame, 32768.0 );
if( audio->config.out.compression_level >= 0 )
{
lame_set_quality( pv->lame, audio->config.out.compression_level );
}
if( audio->config.out.bitrate > 0 )
{
lame_set_VBR( pv->lame, vbr_abr );
lame_set_VBR_mean_bitrate_kbps( pv->lame, audio->config.out.bitrate );
}
else if( audio->config.out.quality >= 0 )
{
lame_set_brate( pv->lame, 0 );
lame_set_VBR( pv->lame, vbr_default );
lame_set_VBR_quality( pv->lame, audio->config.out.quality );
}
lame_set_in_samplerate( pv->lame, audio->config.out.samplerate );
lame_set_out_samplerate( pv->lame, audio->config.out.samplerate );
pv->out_discrete_channels = hb_mixdown_get_discrete_channel_count( audio->config.out.mixdown );
// Lame's default encoding mode is JOINT_STEREO. This subtracts signal
// that is "common" to left and right (within some threshold) and encodes
// it separately. This improves quality at low bitrates, but hurts
// imaging (channel separation) at higher bitrates. So if the bitrate
// is suffeciently high, use regular STEREO mode.
if ( pv->out_discrete_channels == 1 )
{
lame_set_mode( pv->lame, MONO );
lame_set_num_channels( pv->lame, 1 );
}
else if ( audio->config.out.bitrate >= 128 )
{
lame_set_mode( pv->lame, STEREO );
}
lame_init_params( pv->lame );
pv->input_samples = 1152 * pv->out_discrete_channels;
pv->output_bytes = LAME_MAXMP3BUFFER;
pv->buf = malloc( pv->input_samples * sizeof( float ) );
audio->config.out.samples_per_frame = 1152;
pv->list = hb_list_init();
pv->pts = AV_NOPTS_VALUE;
return 0;
}
/***********************************************************************
* Close
***********************************************************************
*
**********************************************************************/
void enclameClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
lame_close( pv->lame );
hb_list_empty( &pv->list );
free( pv->buf );
free( pv );
w->private_data = NULL;
}
/***********************************************************************
* Encode
***********************************************************************
*
**********************************************************************/
static hb_buffer_t * Encode( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
hb_audio_t * audio = w->audio;
hb_buffer_t * buf;
float samples[2][1152];
uint64_t pts, pos;
int i, j;
if( hb_list_bytes( pv->list ) < pv->input_samples * sizeof( float ) )
{
return NULL;
}
hb_list_getbytes( pv->list, pv->buf, pv->input_samples * sizeof( float ),
&pts, &pos);
for( i = 0; i < 1152; i++ )
{
for( j = 0; j < pv->out_discrete_channels; j++ )
{
samples[j][i] = ((float *) pv->buf)[(pv->out_discrete_channels * i + j)];
}
}
buf = hb_buffer_init( pv->output_bytes );
buf->s.start = pts + 90000 * pos / pv->out_discrete_channels / sizeof( float ) / audio->config.out.samplerate;
buf->s.duration = (double)90000 * 1152 / audio->config.out.samplerate;
buf->s.stop = buf->s.start + buf->s.duration;
pv->pts = buf->s.stop;
buf->size = lame_encode_buffer_float(
pv->lame, samples[0], samples[1],
1152, buf->data, LAME_MAXMP3BUFFER );
buf->s.type = AUDIO_BUF;
buf->s.frametype = HB_FRAME_AUDIO;
if( !buf->size )
{
/* Encoding was successful but we got no data. Try to encode
more */
hb_buffer_close( &buf );
return Encode( w );
}
else if( buf->size < 0 )
{
hb_log( "enclame: lame_encode_buffer failed" );
hb_buffer_close( &buf );
return NULL;
}
return buf;
}
/***********************************************************************
* Work
***********************************************************************
*
**********************************************************************/
int enclameWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_audio_t * audio = w->audio;
hb_buffer_t * in = *buf_in;
hb_buffer_t * buf;
if ( (*buf_in)->size <= 0 )
{
/* EOF on input - send it downstream & say we're done */
buf = hb_buffer_init( pv->output_bytes );
buf->size = lame_encode_flush( pv->lame, buf->data, LAME_MAXMP3BUFFER );
buf->s.start = pv->pts;
buf->s.stop = buf->s.start + 90000 * 1152 / audio->config.out.samplerate;
buf->s.type = AUDIO_BUF;
buf->s.frametype = HB_FRAME_AUDIO;
if( buf->size <= 0 )
{
hb_buffer_close( &buf );
}
// Add the flushed data
*buf_out = buf;
// Add the eof
if ( buf )
{
buf->next = in;
}
else
{
*buf_out = in;
}
*buf_in = NULL;
return HB_WORK_DONE;
}
hb_list_add( pv->list, *buf_in );
*buf_in = NULL;
*buf_out = buf = Encode( w );
while( buf )
{
buf->next = Encode( w );
buf = buf->next;
}
return HB_WORK_OK;
}
HandBrake-0.10.2/libhb/muxavformat.c 0000664 0001752 0001752 00000131224 12531124076 017640 0 ustar handbrake handbrake /* muxavformat.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include "libavformat/avformat.h"
#include "libavutil/avstring.h"
#include "libavutil/intreadwrite.h"
#include "hb.h"
#include "lang.h"
struct hb_mux_data_s
{
enum
{
MUX_TYPE_VIDEO,
MUX_TYPE_AUDIO,
MUX_TYPE_SUBTITLE
} type;
AVStream *st;
int64_t duration;
hb_buffer_t * delay_buf;
int64_t prev_chapter_tc;
int16_t current_chapter;
AVBitStreamFilterContext* bitstream_filter;
};
struct hb_mux_object_s
{
HB_MUX_COMMON;
hb_job_t * job;
AVFormatContext * oc;
AVRational time_base;
int ntracks;
hb_mux_data_t ** tracks;
int64_t chapter_delay;
};
enum
{
META_TITLE,
META_ARTIST,
META_DIRECTOR,
META_COMPOSER,
META_RELEASE_DATE,
META_COMMENT,
META_ALBUM,
META_GENRE,
META_DESCRIPTION,
META_SYNOPSIS,
META_LAST
};
enum
{
META_MUX_MP4,
META_MUX_MKV,
META_MUX_LAST
};
const char *metadata_keys[META_LAST][META_MUX_LAST] =
{
{"title", "TITLE"},
{"artist", "ARTIST"},
{"album_artist", "DIRECTOR"},
{"composer", "COMPOSER"},
{"date", "DATE_RELEASED"},
{"comment", "SUMMARY"},
{"album", NULL},
{"genre", "GENRE"},
{"description", "DESCRIPTION"},
{"synopsis", "SYNOPSIS"}
};
static char* lookup_lang_code(int mux, char *iso639_2)
{
iso639_lang_t *lang;
char *out = NULL;
switch (mux)
{
case HB_MUX_AV_MP4:
out = iso639_2;
break;
case HB_MUX_AV_MKV:
// MKV lang codes should be ISO-639-2B if it exists,
// else ISO-639-2
lang = lang_for_code2( iso639_2 );
out = lang->iso639_2b ? lang->iso639_2b : lang->iso639_2;
break;
default:
break;
}
return out;
}
/**********************************************************************
* avformatInit
**********************************************************************
* Allocates hb_mux_data_t structures, create file and write headers
*********************************************************************/
static int avformatInit( hb_mux_object_t * m )
{
hb_job_t * job = m->job;
hb_audio_t * audio;
hb_mux_data_t * track;
int meta_mux;
int max_tracks;
int ii, ret;
const char *muxer_name = NULL;
uint8_t default_track_flag = 1;
uint8_t need_fonts = 0;
char *lang;
max_tracks = 1 + hb_list_count( job->list_audio ) +
hb_list_count( job->list_subtitle );
m->tracks = calloc(max_tracks, sizeof(hb_mux_data_t*));
m->oc = avformat_alloc_context();
if (m->oc == NULL)
{
hb_error( "Could not initialize avformat context." );
goto error;
}
AVDictionary * av_opts = NULL;
switch (job->mux)
{
case HB_MUX_AV_MP4:
m->time_base.num = 1;
m->time_base.den = 90000;
if( job->ipod_atom )
muxer_name = "ipod";
else
muxer_name = "mp4";
meta_mux = META_MUX_MP4;
av_dict_set(&av_opts, "brand", "mp42", 0);
if (job->mp4_optimize)
av_dict_set(&av_opts, "movflags", "faststart+disable_chpl", 0);
else
av_dict_set(&av_opts, "movflags", "+disable_chpl", 0);
break;
case HB_MUX_AV_MKV:
// libavformat is essentially hard coded such that it only
// works with a timebase of 1/1000
m->time_base.num = 1;
m->time_base.den = 1000;
muxer_name = "matroska";
meta_mux = META_MUX_MKV;
break;
default:
{
hb_error("Invalid Mux %x", job->mux);
goto error;
}
}
m->oc->oformat = av_guess_format(muxer_name, NULL, NULL);
if(m->oc->oformat == NULL)
{
hb_error("Could not guess output format %s", muxer_name);
goto error;
}
av_strlcpy(m->oc->filename, job->file, sizeof(m->oc->filename));
ret = avio_open2(&m->oc->pb, job->file, AVIO_FLAG_WRITE,
&m->oc->interrupt_callback, NULL);
if( ret < 0 )
{
hb_error( "avio_open2 failed, errno %d", ret);
goto error;
}
/* Video track */
track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
job->mux_data = track;
track->type = MUX_TYPE_VIDEO;
track->prev_chapter_tc = AV_NOPTS_VALUE;
track->st = avformat_new_stream(m->oc, NULL);
if (track->st == NULL)
{
hb_error("Could not initialize video stream");
goto error;
}
track->st->time_base = m->time_base;
avcodec_get_context_defaults3(track->st->codec, NULL);
track->st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
uint8_t *priv_data = NULL;
int priv_size = 0;
switch (job->vcodec)
{
case HB_VCODEC_X264:
case HB_VCODEC_QSV_H264:
track->st->codec->codec_id = AV_CODEC_ID_H264;
/* Taken from x264 muxers.c */
priv_size = 5 + 1 + 2 + job->config.h264.sps_length + 1 + 2 +
job->config.h264.pps_length;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("H.264 extradata: malloc failure");
goto error;
}
priv_data[0] = 1;
priv_data[1] = job->config.h264.sps[1]; /* AVCProfileIndication */
priv_data[2] = job->config.h264.sps[2]; /* profile_compat */
priv_data[3] = job->config.h264.sps[3]; /* AVCLevelIndication */
priv_data[4] = 0xff; // nalu size length is four bytes
priv_data[5] = 0xe1; // one sps
priv_data[6] = job->config.h264.sps_length >> 8;
priv_data[7] = job->config.h264.sps_length;
memcpy(priv_data+8, job->config.h264.sps,
job->config.h264.sps_length);
priv_data[8+job->config.h264.sps_length] = 1; // one pps
priv_data[9+job->config.h264.sps_length] =
job->config.h264.pps_length >> 8;
priv_data[10+job->config.h264.sps_length] =
job->config.h264.pps_length;
memcpy(priv_data+11+job->config.h264.sps_length,
job->config.h264.pps, job->config.h264.pps_length );
break;
case HB_VCODEC_FFMPEG_MPEG4:
track->st->codec->codec_id = AV_CODEC_ID_MPEG4;
if (job->config.mpeg4.length != 0)
{
priv_size = job->config.mpeg4.length;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("MPEG4 extradata: malloc failure");
goto error;
}
memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
}
break;
case HB_VCODEC_FFMPEG_MPEG2:
track->st->codec->codec_id = AV_CODEC_ID_MPEG2VIDEO;
if (job->config.mpeg4.length != 0)
{
priv_size = job->config.mpeg4.length;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("MPEG2 extradata: malloc failure");
goto error;
}
memcpy(priv_data, job->config.mpeg4.bytes, priv_size);
}
break;
case HB_VCODEC_FFMPEG_VP8:
track->st->codec->codec_id = AV_CODEC_ID_VP8;
priv_data = NULL;
priv_size = 0;
break;
case HB_VCODEC_THEORA:
{
track->st->codec->codec_id = AV_CODEC_ID_THEORA;
int size = 0;
ogg_packet *ogg_headers[3];
for (ii = 0; ii < 3; ii++)
{
ogg_headers[ii] = (ogg_packet *)job->config.theora.headers[ii];
size += ogg_headers[ii]->bytes + 2;
}
priv_size = size;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("Theora extradata: malloc failure");
goto error;
}
size = 0;
for(ii = 0; ii < 3; ii++)
{
AV_WB16(priv_data + size, ogg_headers[ii]->bytes);
size += 2;
memcpy(priv_data+size, ogg_headers[ii]->packet,
ogg_headers[ii]->bytes);
size += ogg_headers[ii]->bytes;
}
} break;
case HB_VCODEC_X265:
track->st->codec->codec_id = AV_CODEC_ID_HEVC;
if (job->config.h265.headers_length > 0)
{
priv_size = job->config.h265.headers_length;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("H.265 extradata: malloc failure");
goto error;
}
memcpy(priv_data, job->config.h265.headers, priv_size);
}
break;
default:
hb_error("muxavformat: Unknown video codec: %x", job->vcodec);
goto error;
}
track->st->codec->extradata = priv_data;
track->st->codec->extradata_size = priv_size;
if (job->anamorphic.mode > 0)
{
track->st->sample_aspect_ratio.num = job->anamorphic.par_width;
track->st->sample_aspect_ratio.den = job->anamorphic.par_height;
track->st->codec->sample_aspect_ratio.num = job->anamorphic.par_width;
track->st->codec->sample_aspect_ratio.den = job->anamorphic.par_height;
}
else
{
track->st->sample_aspect_ratio.num = 1;
track->st->sample_aspect_ratio.den = 1;
track->st->codec->sample_aspect_ratio.num = 1;
track->st->codec->sample_aspect_ratio.den = 1;
}
track->st->codec->width = job->width;
track->st->codec->height = job->height;
track->st->disposition |= AV_DISPOSITION_DEFAULT;
int vrate_base, vrate;
if( job->pass == 2 )
{
hb_interjob_t * interjob = hb_interjob_get( job->h );
vrate_base = interjob->vrate_base;
vrate = interjob->vrate;
}
else
{
vrate_base = job->vrate_base;
vrate = job->vrate;
}
// If the vrate is 27000000, there's a good chance this is
// a standard rate that we have in our hb_video_rates table.
// Because of rounding errors and approximations made while
// measuring framerate, the actual value may not be exact. So
// we look for rates that are "close" and make an adjustment
// to fps.den.
if (vrate == 27000000)
{
const hb_rate_t *video_framerate = NULL;
while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
{
if (abs(vrate_base - video_framerate->rate) < 10)
{
vrate_base = video_framerate->rate;
break;
}
}
}
hb_reduce(&vrate_base, &vrate, vrate_base, vrate);
if (job->mux == HB_MUX_AV_MP4)
{
// libavformat mp4 muxer requires that the codec time_base have the
// same denominator as the stream time_base, it uses it for the
// mdhd timescale.
double scale = (double)track->st->time_base.den / vrate;
track->st->codec->time_base.den = track->st->time_base.den;
track->st->codec->time_base.num = vrate_base * scale;
}
else
{
track->st->codec->time_base.num = vrate_base;
track->st->codec->time_base.den = vrate;
}
/* add the audio tracks */
for(ii = 0; ii < hb_list_count( job->list_audio ); ii++ )
{
audio = hb_list_item( job->list_audio, ii );
track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
audio->priv.mux_data = track;
track->type = MUX_TYPE_AUDIO;
track->st = avformat_new_stream(m->oc, NULL);
if (track->st == NULL)
{
hb_error("Could not initialize audio stream");
goto error;
}
avcodec_get_context_defaults3(track->st->codec, NULL);
track->st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (job->mux == HB_MUX_AV_MP4)
{
track->st->codec->time_base.num = audio->config.out.samples_per_frame;
track->st->codec->time_base.den = audio->config.out.samplerate;
track->st->time_base.num = 1;
track->st->time_base.den = audio->config.out.samplerate;
}
else
{
track->st->codec->time_base = m->time_base;
}
priv_data = NULL;
priv_size = 0;
switch (audio->config.out.codec & HB_ACODEC_MASK)
{
case HB_ACODEC_DCA:
case HB_ACODEC_DCA_HD:
track->st->codec->codec_id = AV_CODEC_ID_DTS;
break;
case HB_ACODEC_AC3:
track->st->codec->codec_id = AV_CODEC_ID_AC3;
break;
case HB_ACODEC_LAME:
case HB_ACODEC_MP3:
track->st->codec->codec_id = AV_CODEC_ID_MP3;
break;
case HB_ACODEC_VORBIS:
{
track->st->codec->codec_id = AV_CODEC_ID_VORBIS;
int jj, size = 0;
ogg_packet *ogg_headers[3];
for (jj = 0; jj < 3; jj++)
{
ogg_headers[jj] = (ogg_packet *)audio->priv.config.vorbis.headers[jj];
size += ogg_headers[jj]->bytes + 2;
}
priv_size = size;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("Vorbis extradata: malloc failure");
goto error;
}
size = 0;
for(jj = 0; jj < 3; jj++)
{
AV_WB16(priv_data + size, ogg_headers[jj]->bytes);
size += 2;
memcpy(priv_data+size, ogg_headers[jj]->packet,
ogg_headers[jj]->bytes);
size += ogg_headers[jj]->bytes;
}
} break;
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
track->st->codec->codec_id = AV_CODEC_ID_FLAC;
if (audio->priv.config.extradata.length)
{
priv_size = audio->priv.config.extradata.length;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("FLAC extradata: malloc failure");
goto error;
}
memcpy(priv_data,
audio->priv.config.extradata.bytes,
audio->priv.config.extradata.length);
}
break;
case HB_ACODEC_FFAAC:
case HB_ACODEC_CA_AAC:
case HB_ACODEC_CA_HAAC:
case HB_ACODEC_FDK_AAC:
case HB_ACODEC_FDK_HAAC:
track->st->codec->codec_id = AV_CODEC_ID_AAC;
// TODO: fix AAC in TS parsing. We need to fill
// extradata with AAC config. Some players will play
// an AAC stream that is missing extradata and some
// will not.
//
// libav mkv muxer expects there to be extradata for
// AAC and will crash if it is NULL. So allocate extra
// byte so that av_malloc does not return NULL when length
// is 0.
priv_size = audio->priv.config.extradata.length;
priv_data = av_malloc(priv_size + 1);
if (priv_data == NULL)
{
hb_error("AAC extradata: malloc failure");
goto error;
}
memcpy(priv_data,
audio->priv.config.extradata.bytes,
audio->priv.config.extradata.length);
// AAC from pass-through source may be ADTS.
// Therefore inserting "aac_adtstoasc" bitstream filter is
// preferred.
// The filter does nothing for non-ADTS bitstream.
if (audio->config.out.codec == HB_ACODEC_AAC_PASS)
{
track->bitstream_filter = av_bitstream_filter_init("aac_adtstoasc");
}
break;
default:
hb_error("muxavformat: Unknown audio codec: %x",
audio->config.out.codec);
goto error;
}
track->st->codec->extradata = priv_data;
track->st->codec->extradata_size = priv_size;
if( default_track_flag )
{
track->st->disposition |= AV_DISPOSITION_DEFAULT;
default_track_flag = 0;
}
lang = lookup_lang_code(job->mux, audio->config.lang.iso639_2 );
if (lang != NULL)
{
av_dict_set(&track->st->metadata, "language", lang, 0);
}
track->st->codec->sample_rate = audio->config.out.samplerate;
if (audio->config.out.codec & HB_ACODEC_PASS_FLAG)
{
track->st->codec->channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout);
track->st->codec->channel_layout = audio->config.in.channel_layout;
}
else
{
track->st->codec->channels = hb_mixdown_get_discrete_channel_count(audio->config.out.mixdown);
track->st->codec->channel_layout = hb_ff_mixdown_xlat(audio->config.out.mixdown, NULL);
}
char *name;
if (audio->config.out.name == NULL)
{
switch (track->st->codec->channels)
{
case 1:
name = "Mono";
break;
case 2:
name = "Stereo";
break;
default:
name = "Surround";
break;
}
}
else
{
name = audio->config.out.name;
}
// Set audio track title
av_dict_set(&track->st->metadata, "title", name, 0);
if (job->mux == HB_MUX_AV_MP4)
{
// Some software (MPC, mediainfo) use hdlr description
// for track title
av_dict_set(&track->st->metadata, "handler", name, 0);
}
}
char * subidx_fmt =
"size: %dx%d\n"
"org: %d, %d\n"
"scale: 100%%, 100%%\n"
"alpha: 100%%\n"
"smooth: OFF\n"
"fadein/out: 50, 50\n"
"align: OFF at LEFT TOP\n"
"time offset: 0\n"
"forced subs: %s\n"
"palette: %06x, %06x, %06x, %06x, %06x, %06x, "
"%06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x, %06x\n"
"custom colors: OFF, tridx: 0000, "
"colors: 000000, 000000, 000000, 000000\n";
int subtitle_default = -1;
for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
{
hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, ii );
if( subtitle->config.dest == PASSTHRUSUB )
{
if ( subtitle->config.default_track )
subtitle_default = ii;
}
}
// Quicktime requires that at least one subtitle is enabled,
// else it doesn't show any of the subtitles.
// So check to see if any of the subtitles are flagged to be
// the defualt. The default will the the enabled track, else
// enable the first track.
if (job->mux == HB_MUX_AV_MP4 && subtitle_default == -1)
{
subtitle_default = 0;
}
for( ii = 0; ii < hb_list_count( job->list_subtitle ); ii++ )
{
hb_subtitle_t * subtitle;
uint32_t rgb[16];
char subidx[2048];
int len;
subtitle = hb_list_item( job->list_subtitle, ii );
if (subtitle->config.dest != PASSTHRUSUB)
continue;
track = m->tracks[m->ntracks++] = calloc(1, sizeof( hb_mux_data_t ) );
subtitle->mux_data = track;
track->type = MUX_TYPE_SUBTITLE;
track->st = avformat_new_stream(m->oc, NULL);
if (track->st == NULL)
{
hb_error("Could not initialize subtitle stream");
goto error;
}
avcodec_get_context_defaults3(track->st->codec, NULL);
track->st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
track->st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
track->st->time_base = m->time_base;
track->st->codec->time_base = m->time_base;
track->st->codec->width = subtitle->width;
track->st->codec->height = subtitle->height;
priv_data = NULL;
priv_size = 0;
switch (subtitle->source)
{
case VOBSUB:
{
int jj;
track->st->codec->codec_id = AV_CODEC_ID_DVD_SUBTITLE;
for (jj = 0; jj < 16; jj++)
rgb[jj] = hb_yuv2rgb(subtitle->palette[jj]);
len = snprintf(subidx, 2048, subidx_fmt,
subtitle->width, subtitle->height,
0, 0, "OFF",
rgb[0], rgb[1], rgb[2], rgb[3],
rgb[4], rgb[5], rgb[6], rgb[7],
rgb[8], rgb[9], rgb[10], rgb[11],
rgb[12], rgb[13], rgb[14], rgb[15]);
priv_size = len + 1;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("VOBSUB extradata: malloc failure");
goto error;
}
memcpy(priv_data, subidx, priv_size);
} break;
case PGSSUB:
{
track->st->codec->codec_id = AV_CODEC_ID_HDMV_PGS_SUBTITLE;
} break;
case CC608SUB:
case CC708SUB:
case TX3GSUB:
case SRTSUB:
case UTF8SUB:
case SSASUB:
{
if (job->mux == HB_MUX_AV_MP4)
{
track->st->codec->codec_id = AV_CODEC_ID_MOV_TEXT;
}
else
{
track->st->codec->codec_id = AV_CODEC_ID_SSA;
need_fonts = 1;
if (subtitle->extradata_size)
{
priv_size = subtitle->extradata_size;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("SSA extradata: malloc failure");
goto error;
}
memcpy(priv_data, subtitle->extradata, priv_size);
}
}
} break;
default:
continue;
}
if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT)
{
// Build codec extradata for tx3g.
// If we were using a libav codec to generate this data
// this would (or should) be done for us.
uint8_t properties[] = {
0x00, 0x00, 0x00, 0x00, // Display Flags
0x01, // Horiz. Justification
0xff, // Vert. Justification
0x00, 0x00, 0x00, 0xff, // Bg color
0x00, 0x00, 0x00, 0x00, // Default text box
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, // Reserved
0x00, 0x01, // Font ID
0x00, // Font face
0x18, // Font size
0xff, 0xff, 0xff, 0xff, // Fg color
// Font table:
0x00, 0x00, 0x00, 0x12, // Font table size
'f','t','a','b', // Tag
0x00, 0x01, // Count
0x00, 0x01, // Font ID
0x05, // Font name length
'A','r','i','a','l' // Font name
};
int width, height = 60;
if (job->anamorphic.mode)
width = job->width * ((float)job->anamorphic.par_width / job->anamorphic.par_height);
else
width = job->width;
track->st->codec->width = width;
track->st->codec->height = height;
properties[14] = height >> 8;
properties[15] = height & 0xff;
properties[16] = width >> 8;
properties[17] = width & 0xff;
priv_size = sizeof(properties);
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("TX3G extradata: malloc failure");
goto error;
}
memcpy(priv_data, properties, priv_size);
}
track->st->codec->extradata = priv_data;
track->st->codec->extradata_size = priv_size;
if ( ii == subtitle_default )
{
track->st->disposition |= AV_DISPOSITION_DEFAULT;
}
lang = lookup_lang_code(job->mux, subtitle->iso639_2 );
if (lang != NULL)
{
av_dict_set(&track->st->metadata, "language", lang, 0);
}
}
if (need_fonts)
{
hb_list_t * list_attachment = job->list_attachment;
int i;
for ( i = 0; i < hb_list_count(list_attachment); i++ )
{
hb_attachment_t * attachment = hb_list_item( list_attachment, i );
if (attachment->type == FONT_TTF_ATTACH &&
attachment->size > 0)
{
AVStream *st = avformat_new_stream(m->oc, NULL);
if (st == NULL)
{
hb_error("Could not initialize attachment stream");
goto error;
}
avcodec_get_context_defaults3(st->codec, NULL);
st->codec->codec_type = AVMEDIA_TYPE_ATTACHMENT;
st->codec->codec_id = AV_CODEC_ID_TTF;
priv_size = attachment->size;
priv_data = av_malloc(priv_size);
if (priv_data == NULL)
{
hb_error("Font extradata: malloc failure");
goto error;
}
memcpy(priv_data, attachment->data, priv_size);
st->codec->extradata = priv_data;
st->codec->extradata_size = priv_size;
av_dict_set(&st->metadata, "filename", attachment->name, 0);
}
}
}
if( job->metadata )
{
hb_metadata_t *md = job->metadata;
hb_deep_log(2, "Writing Metadata to output file...");
if (md->name &&
metadata_keys[META_TITLE][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_TITLE][meta_mux], md->name, 0);
}
if (md->artist &&
metadata_keys[META_ARTIST][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_ARTIST][meta_mux], md->artist, 0);
}
if (md->album_artist &&
metadata_keys[META_DIRECTOR][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_DIRECTOR][meta_mux],
md->album_artist, 0);
}
if (md->composer &&
metadata_keys[META_COMPOSER][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_COMPOSER][meta_mux],
md->composer, 0);
}
if (md->release_date &&
metadata_keys[META_RELEASE_DATE][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_RELEASE_DATE][meta_mux],
md->release_date, 0);
}
if (md->comment &&
metadata_keys[META_COMMENT][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_COMMENT][meta_mux], md->comment, 0);
}
if (!md->name && md->album &&
metadata_keys[META_ALBUM][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_ALBUM][meta_mux], md->album, 0);
}
if (md->genre &&
metadata_keys[META_GENRE][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_GENRE][meta_mux], md->genre, 0);
}
if (md->description &&
metadata_keys[META_DESCRIPTION][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_DESCRIPTION][meta_mux],
md->description, 0);
}
if (md->long_description &&
metadata_keys[META_SYNOPSIS][meta_mux] != NULL)
{
av_dict_set(&m->oc->metadata,
metadata_keys[META_SYNOPSIS][meta_mux],
md->long_description, 0);
}
}
char tool_string[80];
snprintf(tool_string, sizeof(tool_string), "HandBrake %s %i",
HB_PROJECT_VERSION, HB_PROJECT_BUILD);
av_dict_set(&m->oc->metadata, "encoding_tool", tool_string, 0);
time_t now = time(NULL);
struct tm * now_utc = gmtime(&now);
char now_8601[24];
strftime(now_8601, sizeof(now_8601), "%FT%TZ", now_utc);
av_dict_set(&m->oc->metadata, "creation_time", now_8601, 0);
ret = avformat_write_header(m->oc, &av_opts);
if( ret < 0 )
{
av_dict_free( &av_opts );
hb_error( "muxavformat: avformat_write_header failed!");
goto error;
}
AVDictionaryEntry *t = NULL;
while( ( t = av_dict_get( av_opts, "", t, AV_DICT_IGNORE_SUFFIX ) ) )
{
hb_log( "muxavformat: Unknown option %s", t->key );
}
av_dict_free( &av_opts );
return 0;
error:
free(job->mux_data);
job->mux_data = NULL;
avformat_free_context(m->oc);
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
return -1;
}
static int add_chapter(hb_mux_object_t *m, int64_t start, int64_t end, char * title)
{
AVChapter *chap;
AVChapter **chapters;
int nchap = m->oc->nb_chapters;
nchap++;
chapters = av_realloc(m->oc->chapters, nchap * sizeof(AVChapter*));
if (chapters == NULL)
{
hb_error("chapter array: malloc failure");
return -1;
}
chap = av_mallocz(sizeof(AVChapter));
if (chap == NULL)
{
hb_error("chapter: malloc failure");
return -1;
}
m->oc->chapters = chapters;
m->oc->chapters[nchap-1] = chap;
m->oc->nb_chapters = nchap;
chap->id = nchap;
chap->time_base = m->time_base;
// libav does not currently have a good way to deal with chapters and
// delayed stream timestamps. It makes no corrections to the chapter
// track. A patch to libav would touch a lot of things, so for now,
// work around the issue here.
chap->start = start + m->chapter_delay;
chap->end = end;
av_dict_set(&chap->metadata, "title", title, 0);
return 0;
}
static int avformatMux(hb_mux_object_t *m, hb_mux_data_t *track, hb_buffer_t *buf)
{
AVPacket pkt;
int64_t dts, pts, duration = AV_NOPTS_VALUE;
hb_job_t *job = m->job;
uint8_t sub_out[2048];
if (track->type == MUX_TYPE_VIDEO &&
track->prev_chapter_tc == AV_NOPTS_VALUE)
{
// Chapter timestamps are biased the same as video timestamps.
// This needs to be reflected in the initial chapter timestamp.
//
// TODO: Don't assume the first chapter is at 0. Pass the first
// chapter through the pipeline instead of dropping it as we
// currently do.
m->chapter_delay = av_rescale_q(m->job->config.h264.init_delay,
(AVRational){1,90000},
track->st->time_base);
track->prev_chapter_tc = -m->chapter_delay;
}
// We only compute dts duration for MP4 files
if (track->type == MUX_TYPE_VIDEO && (job->mux & HB_MUX_MASK_MP4))
{
hb_buffer_t * tmp;
// delay by one frame so that we can compute duration properly.
tmp = track->delay_buf;
track->delay_buf = buf;
buf = tmp;
}
if (buf == NULL)
return 0;
if (buf->s.renderOffset == AV_NOPTS_VALUE)
{
dts = av_rescale_q(buf->s.start, (AVRational){1,90000},
track->st->time_base);
}
else
{
dts = av_rescale_q(buf->s.renderOffset, (AVRational){1,90000},
track->st->time_base);
}
pts = av_rescale_q(buf->s.start, (AVRational){1,90000},
track->st->time_base);
if (track->type == MUX_TYPE_VIDEO && track->delay_buf != NULL)
{
int64_t delayed_dts;
delayed_dts = av_rescale_q(track->delay_buf->s.renderOffset,
(AVRational){1,90000},
track->st->time_base);
duration = delayed_dts - dts;
}
if (duration < 0 && buf->s.duration > 0)
{
duration = av_rescale_q(buf->s.duration, (AVRational){1,90000},
track->st->time_base);
}
if (duration < 0)
{
// There is a possiblility that some subtitles get through the pipeline
// without ever discovering their true duration. Make the duration
// 10 seconds in this case. Unless they are PGS subs which should
// have zero duration.
if (track->type == MUX_TYPE_SUBTITLE &&
track->st->codec->codec_id != AV_CODEC_ID_HDMV_PGS_SUBTITLE)
duration = av_rescale_q(10, (AVRational){1,1},
track->st->time_base);
else
duration = 0;
}
// Theora can generate 0 length output for duplicate frames.
// Since we use 0 length buffers to indicate end of stream, we
// can't allow 0 lenth buffers.
//
// As a work-around, always allocate an extra byte for theora buffers.
// Remove this extra byte here.
//
// This is fixed correctly in svn trunk by using a end of stream flag
// instead of 0 length buffer.
if (track->type == MUX_TYPE_VIDEO && job->vcodec == HB_VCODEC_THEORA)
{
buf->size--;
}
av_init_packet(&pkt);
pkt.data = buf->data;
pkt.size = buf->size;
pkt.dts = dts;
pkt.pts = pts;
pkt.duration = duration;
if (track->type == MUX_TYPE_VIDEO && ((job->vcodec & HB_VCODEC_H264_MASK) ||
(job->vcodec & HB_VCODEC_FFMPEG_MASK)))
{
if (buf->s.frametype == HB_FRAME_IDR)
pkt.flags |= AV_PKT_FLAG_KEY;
}
else if (buf->s.frametype & HB_FRAME_KEY)
{
pkt.flags |= AV_PKT_FLAG_KEY;
}
switch (track->type)
{
case MUX_TYPE_VIDEO:
{
if (job->chapter_markers && buf->s.new_chap)
{
hb_chapter_t *chapter;
// reached chapter N, write marker for chapter N-1
// we don't know the end time of chapter N-1 till we receive
// chapter N. So we are always writing the previous chapter
// mark.
track->current_chapter = buf->s.new_chap - 1;
// chapter numbers start at 1, but the list starts at 0
chapter = hb_list_item(job->list_chapter,
track->current_chapter - 1);
// make sure we're not writing a chapter that has 0 length
if (chapter != NULL && track->prev_chapter_tc < pkt.pts)
{
char title[1024];
if (chapter->title != NULL)
{
snprintf(title, 1023, "%s", chapter->title);
}
else
{
snprintf(title, 1023, "Chapter %d",
track->current_chapter);
}
add_chapter(m, track->prev_chapter_tc, pkt.pts, title);
}
track->prev_chapter_tc = pkt.pts;
}
} break;
case MUX_TYPE_SUBTITLE:
{
if (job->mux == HB_MUX_AV_MP4)
{
/* Write an empty sample */
if ( track->duration < pts )
{
AVPacket empty_pkt;
uint8_t empty[2] = {0,0};
av_init_packet(&empty_pkt);
empty_pkt.data = empty;
empty_pkt.size = 2;
empty_pkt.dts = track->duration;
empty_pkt.pts = track->duration;
empty_pkt.duration = pts - duration;
empty_pkt.convergence_duration = empty_pkt.duration;
empty_pkt.stream_index = track->st->index;
int ret = av_interleaved_write_frame(m->oc, &empty_pkt);
if (ret < 0)
{
char errstr[64];
av_strerror(ret, errstr, sizeof(errstr));
hb_error("avformatMux: track %d, av_interleaved_write_frame failed with error '%s' (empty_pkt)",
track->st->index, errstr);
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return -1;
}
}
if (track->st->codec->codec_id == AV_CODEC_ID_MOV_TEXT)
{
uint8_t styleatom[2048];;
uint16_t stylesize = 0;
uint8_t buffer[2048];
uint16_t buffersize = 0;
*buffer = '\0';
/*
* Copy the subtitle into buffer stripping markup and creating
* style atoms for them.
*/
hb_muxmp4_process_subtitle_style( buf->data,
buffer,
styleatom, &stylesize );
buffersize = strlen((char*)buffer);
/* Write the subtitle sample */
memcpy( sub_out + 2, buffer, buffersize );
memcpy( sub_out + 2 + buffersize, styleatom, stylesize);
sub_out[0] = ( buffersize >> 8 ) & 0xff;
sub_out[1] = buffersize & 0xff;
pkt.data = sub_out;
pkt.size = buffersize + stylesize + 2;
}
}
if (track->st->codec->codec_id == AV_CODEC_ID_SSA &&
job->mux == HB_MUX_AV_MKV)
{
// avformat requires the this additional information
// which it parses and then strips away
int start_hh, start_mm, start_ss, start_ms;
int stop_hh, stop_mm, stop_ss, stop_ms, layer;
char *ssa;
start_hh = buf->s.start / (90000 * 60 * 60);
start_mm = (buf->s.start / (90000 * 60)) % 60;
start_ss = (buf->s.start / 90000) % 60;
start_ms = (buf->s.start / 900) % 100;
stop_hh = buf->s.stop / (90000 * 60 * 60);
stop_mm = (buf->s.stop / (90000 * 60)) % 60;
stop_ss = (buf->s.stop / 90000) % 60;
stop_ms = (buf->s.stop / 900) % 100;
// Skip the read-order field
ssa = strchr((char*)buf->data, ',');
if (ssa != NULL)
ssa++;
// Skip the layer field
layer = strtol(ssa, NULL, 10);
ssa = strchr(ssa, ',');
if (ssa != NULL)
ssa++;
sprintf((char*)sub_out,
"Dialogue: %d,%d:%02d:%02d.%02d,%d:%02d:%02d.%02d,%s",
layer,
start_hh, start_mm, start_ss, start_ms,
stop_hh, stop_mm, stop_ss, stop_ms, ssa);
pkt.data = sub_out;
pkt.size = strlen((char*)sub_out) + 1;
}
pkt.convergence_duration = pkt.duration;
} break;
case MUX_TYPE_AUDIO:
default:
break;
}
track->duration = pts + pkt.duration;
if (track->bitstream_filter)
{
av_bitstream_filter_filter(track->bitstream_filter, track->st->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
}
pkt.stream_index = track->st->index;
int ret = av_interleaved_write_frame(m->oc, &pkt);
// Many avformat muxer functions do not check the error status
// of the AVIOContext. So we need to check it ourselves to detect
// write errors (like disk full condition).
if (ret < 0 || m->oc->pb->error != 0)
{
char errstr[64];
av_strerror(ret < 0 ? ret : m->oc->pb->error, errstr, sizeof(errstr));
hb_error("avformatMux: track %d, av_interleaved_write_frame failed with error '%s'",
track->st->index, errstr);
*job->done_error = HB_ERROR_UNKNOWN;
*job->die = 1;
return -1;
}
hb_buffer_close( &buf );
return 0;
}
static int avformatEnd(hb_mux_object_t *m)
{
hb_job_t *job = m->job;
hb_mux_data_t *track = job->mux_data;
if( !job->mux_data )
{
/*
* We must have failed to create the file in the first place.
*/
return 0;
}
// Flush any delayed frames
int ii;
for (ii = 0; ii < m->ntracks; ii++)
{
avformatMux(m, m->tracks[ii], NULL);
if (m->tracks[ii]->bitstream_filter)
{
av_bitstream_filter_close(m->tracks[ii]->bitstream_filter);
}
}
if (job->chapter_markers)
{
hb_chapter_t *chapter;
// get the last chapter
chapter = hb_list_item(job->list_chapter, track->current_chapter++);
// only write the last chapter marker if it lasts at least 1.5 second
if (chapter != NULL && chapter->duration > 135000LL)
{
char title[1024];
if (chapter->title != NULL)
{
snprintf(title, 1023, "%s", chapter->title);
}
else
{
snprintf(title, 1023, "Chapter %d", track->current_chapter);
}
add_chapter(m, track->prev_chapter_tc, track->duration, title);
}
}
// Update and track private data that can change during
// encode.
for(ii = 0; ii < hb_list_count( job->list_audio ); ii++)
{
AVStream *st;
hb_audio_t * audio;
audio = hb_list_item(job->list_audio, ii);
st = audio->priv.mux_data->st;
switch (audio->config.out.codec & HB_ACODEC_MASK)
{
case HB_ACODEC_FFFLAC:
case HB_ACODEC_FFFLAC24:
if( audio->priv.config.extradata.length )
{
uint8_t *priv_data;
int priv_size;
priv_size = audio->priv.config.extradata.length;
priv_data = av_realloc(st->codec->extradata, priv_size);
if (priv_data == NULL)
{
break;
}
memcpy(priv_data,
audio->priv.config.extradata.bytes,
audio->priv.config.extradata.length);
st->codec->extradata = priv_data;
st->codec->extradata_size = priv_size;
}
break;
default:
break;
}
}
av_write_trailer(m->oc);
avio_close(m->oc->pb);
avformat_free_context(m->oc);
free(m->tracks);
m->oc = NULL;
return 0;
}
hb_mux_object_t * hb_mux_avformat_init( hb_job_t * job )
{
hb_mux_object_t * m = calloc( sizeof( hb_mux_object_t ), 1 );
m->init = avformatInit;
m->mux = avformatMux;
m->end = avformatEnd;
m->job = job;
return m;
}
HandBrake-0.10.2/libhb/demuxmpeg.c 0000664 0001752 0001752 00000032712 12463330511 017261 0 ustar handbrake handbrake /* demuxmpeg.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
static inline void check_mpeg_scr( hb_psdemux_t *state, int64_t scr, int tol )
{
/*
* This section of code implements the timing model of
* the "Standard Target Decoder" (STD) of the MPEG2 standard
* (specified in ISO 13818-1 sections 2.4.2, 2.5.2 & Annex D).
* The STD removes and corrects for clock discontinuities so
* that the time stamps on the video, audio & other media
* streams can be used for cross-media synchronization. To do
* this the STD has its own timestamp value, the System Clock
* Reference or SCR, in the PACK header. Clock discontinuities
* are detected using the SCR & and the adjustment needed
* to correct post-discontinuity timestamps to be contiguous
* with pre-discontinuity timestamps is computed from pre- and
* post-discontinuity values of the SCR. Then this adjustment
* is applied to every media timestamp (PTS).
*
* ISO 13818-1 says there must be an SCR at least every 700ms
* (100ms for Transport Streams) so if the difference between
* this SCR & the previous is >700ms it's a discontinuity.
* If the difference is negative it's non-physical (time doesn't
* go backward) and must also be a discontinuity. When we find a
* discontinuity we adjust the scr_offset so that the SCR of the
* new packet lines up with that of the previous packet.
*/
// we declare a discontinuity if there's a gap of more than
// 'tol'ms between the last scr & this or if this scr goes back
// by more than half a frame time.
int64_t scr_delta = scr - state->last_scr;
if (state->last_scr == AV_NOPTS_VALUE ||
scr_delta > 90*tol || scr_delta < -90*10)
{
++state->scr_changes;
state->last_pts = AV_NOPTS_VALUE;
}
state->last_scr = scr;
}
static inline void save_chap( hb_psdemux_t *state, hb_buffer_t *buf )
{
if ( state && buf->s.new_chap )
{
state->new_chap = buf->s.new_chap;
buf->s.new_chap = 0;
}
}
static inline void restore_chap( hb_psdemux_t *state, hb_buffer_t *buf )
{
if ( state )
{
buf->s.new_chap = state->new_chap;
state->new_chap = 0;
}
}
/* Basic MPEG demuxer */
void hb_demux_dvd_ps( hb_buffer_t * buf, hb_list_t * list_es, hb_psdemux_t* state )
{
hb_buffer_t * buf_es;
int pos = 0;
while ( buf )
{
save_chap( state, buf );
#define d (buf->data)
/* pack_header */
if( d[pos] != 0 || d[pos+1] != 0 ||
d[pos+2] != 0x1 || d[pos+3] != 0xBA )
{
hb_log( "hb_demux_ps: not a PS packet (%02x%02x%02x%02x)",
d[pos], d[pos+1], d[pos+2], d[pos+3] );
hb_buffer_t *tmp = buf->next;
buf->next = NULL;
hb_buffer_close( &buf );
buf = tmp;
continue;
}
pos += 4; /* pack_start_code */
if ( state )
{
/* extract the system clock reference (scr) */
int64_t scr = ((uint64_t)(d[pos] & 0x38) << 27) |
((uint64_t)(d[pos] & 0x03) << 28) |
((uint64_t)(d[pos+1]) << 20) |
((uint64_t)(d[pos+2] >> 3) << 15) |
((uint64_t)(d[pos+2] & 3) << 13) |
((uint64_t)(d[pos+3]) << 5) |
(d[pos+4] >> 3);
check_mpeg_scr( state, scr, 700 );
}
pos += 9; /* pack_header */
pos += 1 + ( d[pos] & 0x7 ); /* stuffing bytes */
/* system_header */
if( d[pos] == 0 && d[pos+1] == 0 &&
d[pos+2] == 0x1 && d[pos+3] == 0xBB )
{
int header_length;
pos += 4; /* system_header_start_code */
header_length = ( d[pos] << 8 ) + d[pos+1];
pos += 2 + header_length;
}
/* pes */
while( pos + 6 < buf->size &&
d[pos] == 0 && d[pos+1] == 0 && d[pos+2] == 0x1 )
{
int id;
int pes_packet_length;
int pes_packet_end;
int pes_header_d_length;
int pes_header_end;
int has_pts;
int64_t pts = AV_NOPTS_VALUE, dts = AV_NOPTS_VALUE;
pos += 3; /* packet_start_code_prefix */
id = d[pos];
pos += 1;
/* pack_header */
if( id == 0xBA)
{
pos += 10 + (d[pos+9] & 7);
continue;
}
/* system_header */
if( id == 0xBB )
{
int header_length;
header_length = ( d[pos] << 8 ) + d[pos+1];
pos += 2 + header_length;
continue;
}
pes_packet_length = ( d[pos] << 8 ) + d[pos+1];
pos += 2; /* pes_packet_length */
pes_packet_end = pos + pes_packet_length;
if( id != 0xE0 && id != 0xBD &&
( id & 0xC0 ) != 0xC0 )
{
/* Not interesting */
pos = pes_packet_end;
continue;
}
has_pts = d[pos+1] >> 6;
pos += 2; /* Required headers */
pes_header_d_length = d[pos];
pos += 1;
pes_header_end = pos + pes_header_d_length;
if( has_pts )
{
pts = ( (uint64_t)(d[pos] & 0xe ) << 29 ) +
( d[pos+1] << 22 ) +
( ( d[pos+2] >> 1 ) << 15 ) +
( d[pos+3] << 7 ) +
( d[pos+4] >> 1 );
if ( has_pts & 1 )
{
dts = ( (uint64_t)(d[pos+5] & 0xe ) << 29 ) +
( d[pos+6] << 22 ) +
( ( d[pos+7] >> 1 ) << 15 ) +
( d[pos+8] << 7 ) +
( d[pos+9] >> 1 );
}
else
{
dts = pts;
}
}
pos = pes_header_end;
if( id == 0xBD )
{
id |= ( d[pos] << 8 );
if( ( id & 0xF0FF ) == 0x80BD ) /* A52 */
{
pos += 4;
}
else if( ( id & 0xE0FF ) == 0x20BD || /* SPU */
( id & 0xF0FF ) == 0xA0BD ) /* LPCM */
{
pos += 1;
}
}
/* Sanity check */
if( pos >= pes_packet_end )
{
pos = pes_packet_end;
continue;
}
/* Here we hit we ES payload */
buf_es = hb_buffer_init( pes_packet_end - pos );
buf_es->s.id = id;
buf_es->s.start = pts;
buf_es->s.renderOffset = dts;
buf_es->s.stop = AV_NOPTS_VALUE;
if ( state && id == 0xE0)
{
// Consume a chapter break, and apply it to the ES.
restore_chap( state, buf_es );
}
memcpy( buf_es->data, d + pos, pes_packet_end - pos );
hb_list_add( list_es, buf_es );
pos = pes_packet_end;
}
hb_buffer_t *tmp = buf->next;
buf->next = NULL;
hb_buffer_close( &buf );
buf = tmp;
}
#undef d
}
// mpeg transport stream demuxer. the elementary stream headers have been
// stripped off and buf has all the info gleaned from them: id is set,
// start contains the pts (if any), renderOffset contains the dts (if any)
// and stop contains the pcr (if it changed).
void hb_demux_mpeg(hb_buffer_t *buf, hb_list_t *list_es,
hb_psdemux_t *state, int pcr_tolerance)
{
while ( buf )
{
save_chap( state, buf );
if ( state )
{
if ( buf->s.discontinuity )
{
// Buffer has been flagged as a discontinuity. This happens
// when a blueray changes clips.
++state->scr_changes;
state->last_scr = buf->s.start;
state->scr_delta = 0;
}
// we're keeping track of timing (i.e., not in scan)
// check if there's a new pcr in this packet
if ( buf->s.pcr >= 0 )
{
// we have a new pcr
check_mpeg_scr( state, buf->s.pcr, pcr_tolerance );
buf->s.pcr = AV_NOPTS_VALUE;
// Some streams have consistantly bad PCRs or SCRs
// So filter out the offset
if ( buf->s.start >= 0 )
state->scr_delta = buf->s.start - state->last_scr;
}
if ( buf->s.start >= 0 )
{
// Program streams have an SCR in every PACK header so they
// can't lose their clock reference. But the PCR in Transport
// streams is typically on <.1% of the packets. If a PCR
// packet gets lost and it marks a clock discontinuity then
// the data following it will be referenced to the wrong
// clock & introduce huge gaps or throw our A/V sync off.
// We try to protect against that here by sanity checking
// timestamps against the current reference clock and discarding
// packets where the DTS is "too far" from its clock.
int64_t fdelta = buf->s.start - state->last_scr - state->scr_delta;
if ( fdelta < -300 * 90000LL || fdelta > 300 * 90000LL )
{
// packet too far behind or ahead of its clock reference
++state->dts_drops;
hb_buffer_t *tmp = buf->next;
buf->next = NULL;
hb_buffer_close( &buf );
buf = tmp;
continue;
}
else
{
// Some streams have no PCRs. In these cases, we
// will only get an "PCR" update if a large change
// in DTS or PTS is detected. So we need to update
// our scr_delta with each valid timestamp so that
// fdelta does not continually grow.
state->scr_delta = buf->s.start - state->last_scr;
}
if (buf->s.type == AUDIO_BUF || buf->s.type == VIDEO_BUF)
{
if ( state->last_pts >= 0 )
{
fdelta = buf->s.start - state->last_pts;
if ( fdelta < -5 * 90000LL || fdelta > 5 * 90000LL )
{
// Packet too far from last. This may be a NZ TV broadcast
// as they like to change the PCR without sending a PCR
// update. Since it may be a while until they actually tell
// us the new PCR use the PTS as the PCR.
++state->scr_changes;
state->last_scr = buf->s.start;
state->scr_delta = 0;
}
}
state->last_pts = buf->s.start;
}
}
if ( buf->s.type == VIDEO_BUF )
{
restore_chap( state, buf );
}
}
hb_buffer_t *tmp = buf->next;
buf->next = NULL;
hb_list_add( list_es, buf );
buf = tmp;
}
}
void hb_demux_ts(hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state)
{
// Distance between PCRs in TS is up to 100ms, but we have seen
// streams that exceed this, so allow up to 300ms.
hb_demux_mpeg(buf, list_es, state, 300);
}
void hb_demux_ps(hb_buffer_t *buf, hb_list_t *list_es, hb_psdemux_t *state)
{
// Distance between SCRs in PS is up to 700ms
hb_demux_mpeg(buf, list_es, state, 700);
}
// "null" demuxer (makes a copy of input buf & returns it in list)
// used when the reader for some format includes its own demuxer.
// for example, ffmpeg.
void hb_demux_null( hb_buffer_t * buf, hb_list_t * list_es, hb_psdemux_t* state )
{
while ( buf )
{
save_chap( state, buf );
if ( state )
{
// if we don't have a time offset yet,
// use this timestamp as the offset.
if (state->scr_changes == 0 &&
(buf->s.start != AV_NOPTS_VALUE ||
buf->s.renderOffset != AV_NOPTS_VALUE))
{
++state->scr_changes;
state->last_scr = buf->s.start >= 0 ? buf->s.start : buf->s.renderOffset;
}
if ( buf->s.type == VIDEO_BUF )
{
restore_chap( state, buf );
}
}
hb_buffer_t *tmp = buf->next;
buf->next = NULL;
hb_list_add( list_es, buf );
buf = tmp;
}
}
const hb_muxer_t hb_demux[] = { hb_demux_dvd_ps, hb_demux_ts, hb_demux_ps, hb_demux_null };
HandBrake-0.10.2/libhb/oclscale.c 0000664 0001752 0001752 00000027054 12463330511 017056 0 ustar handbrake handbrake /* oclscale.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#include
#include "common.h"
#include "opencl.h"
#include "openclwrapper.h"
#define FILTER_LEN 4
#define _A -0.5f
cl_float cubic(cl_float x)
{
if (x < 0)
x = -x;
if (x < 1)
return (_A + 2.0f) * (x * x * x) - (_A + 3.0f) * (x * x) + 0 + 1;
else if (x < 2)
return (_A) * (x * x * x) - (5.0f * _A) * (x * x) + (8.0f * _A) * x - (4.0f * _A);
else
return 0;
}
cl_float *hb_bicubic_weights(cl_float scale, int length)
{
cl_float *weights = (cl_float*) malloc(length * sizeof(cl_float) * 4);
int i; // C rocks
cl_float *out = weights;
for (i = 0; i < length; ++i)
{
cl_float x = i / scale;
cl_float dx = x - (int)x;
*out++ = cubic(-dx - 1.0f);
*out++ = cubic(-dx);
*out++ = cubic(-dx + 1.0f);
*out++ = cubic(-dx + 2.0f);
}
return weights;
}
int setupScaleWeights(cl_float xscale, cl_float yscale, int width, int height, hb_oclscale_t *os, KernelEnv *kenv);
/**
* executive scale using opencl
* get filter args
* create output buffer
* create horizontal filter buffer
* create vertical filter buffer
* create kernels
*/
int hb_ocl_scale_func( void **data, KernelEnv *kenv )
{
cl_int status;
cl_mem in_buf = data[0];
cl_mem out_buf = data[1];
int crop_top = (intptr_t)data[2];
int crop_bottom = (intptr_t)data[3];
int crop_left = (intptr_t)data[4];
int crop_right = (intptr_t)data[5];
cl_int in_frame_w = (intptr_t)data[6];
cl_int in_frame_h = (intptr_t)data[7];
cl_int out_frame_w = (intptr_t)data[8];
cl_int out_frame_h = (intptr_t)data[9];
hb_oclscale_t *os = data[10];
hb_buffer_t *in = data[11];
hb_buffer_t *out = data[12];
if (hb_ocl == NULL)
{
hb_error("hb_ocl_scale_func: OpenCL support not available");
return 0;
}
if (os->initialized == 0)
{
hb_log( "Scaling With OpenCL" );
if (kenv->isAMD != 0)
hb_log( "Using Zero Copy");
// create the block kernel
cl_int status;
os->m_kernel = hb_ocl->clCreateKernel(kenv->program, "frame_scale", &status);
os->initialized = 1;
}
{
// Use the new kernel
cl_event events[5];
int eventCount = 0;
if (kenv->isAMD == 0) {
status = hb_ocl->clEnqueueUnmapMemObject(kenv->command_queue,
in->cl.buffer, in->data, 0,
NULL, &events[eventCount++]);
status = hb_ocl->clEnqueueUnmapMemObject(kenv->command_queue,
out->cl.buffer, out->data, 0,
NULL, &events[eventCount++]);
}
cl_int srcPlaneOffset0 = in->plane[0].data - in->data;
cl_int srcPlaneOffset1 = in->plane[1].data - in->data;
cl_int srcPlaneOffset2 = in->plane[2].data - in->data;
cl_int srcRowWords0 = in->plane[0].stride;
cl_int srcRowWords1 = in->plane[1].stride;
cl_int srcRowWords2 = in->plane[2].stride;
cl_int dstPlaneOffset0 = out->plane[0].data - out->data;
cl_int dstPlaneOffset1 = out->plane[1].data - out->data;
cl_int dstPlaneOffset2 = out->plane[2].data - out->data;
cl_int dstRowWords0 = out->plane[0].stride;
cl_int dstRowWords1 = out->plane[1].stride;
cl_int dstRowWords2 = out->plane[2].stride;
if (crop_top != 0 || crop_bottom != 0 || crop_left != 0 || crop_right != 0) {
srcPlaneOffset0 += crop_left + crop_top * srcRowWords0;
srcPlaneOffset1 += crop_left / 2 + (crop_top / 2) * srcRowWords1;
srcPlaneOffset2 += crop_left / 2 + (crop_top / 2) * srcRowWords2;
in_frame_w = in_frame_w - crop_right - crop_left;
in_frame_h = in_frame_h - crop_bottom - crop_top;
}
cl_float xscale = (out_frame_w * 1.0f) / in_frame_w;
cl_float yscale = (out_frame_h * 1.0f) / in_frame_h;
setupScaleWeights(xscale, yscale, out_frame_w, out_frame_h, os, kenv);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 0, sizeof(cl_mem), &out_buf);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 1, sizeof(cl_mem), &in_buf);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 2, sizeof(cl_float), &xscale);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 3, sizeof(cl_float), &yscale);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 4, sizeof(cl_int), &srcPlaneOffset0);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 5, sizeof(cl_int), &srcPlaneOffset1);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 6, sizeof(cl_int), &srcPlaneOffset2);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 7, sizeof(cl_int), &dstPlaneOffset0);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 8, sizeof(cl_int), &dstPlaneOffset1);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 9, sizeof(cl_int), &dstPlaneOffset2);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 10, sizeof(cl_int), &srcRowWords0);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 11, sizeof(cl_int), &srcRowWords1);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 12, sizeof(cl_int), &srcRowWords2);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 13, sizeof(cl_int), &dstRowWords0);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 14, sizeof(cl_int), &dstRowWords1);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 15, sizeof(cl_int), &dstRowWords2);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 16, sizeof(cl_int), &in_frame_w);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 17, sizeof(cl_int), &in_frame_h);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 18, sizeof(cl_int), &out_frame_w);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 19, sizeof(cl_int), &out_frame_h);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 20, sizeof(cl_mem), &os->bicubic_x_weights);
HB_OCL_CHECK(hb_ocl->clSetKernelArg, os->m_kernel, 21, sizeof(cl_mem), &os->bicubic_y_weights);
size_t workOffset[] = { 0, 0, 0 };
size_t globalWorkSize[] = { 1, 1, 1 };
size_t localWorkSize[] = { 1, 1, 1 };
int xgroups = (out_frame_w + 63) / 64;
int ygroups = (out_frame_h + 15) / 16;
localWorkSize[0] = 64;
localWorkSize[1] = 1;
localWorkSize[2] = 1;
globalWorkSize[0] = xgroups * 64;
globalWorkSize[1] = ygroups;
globalWorkSize[2] = 3;
HB_OCL_CHECK(hb_ocl->clEnqueueNDRangeKernel, kenv->command_queue,
os->m_kernel, 3, workOffset, globalWorkSize, localWorkSize,
eventCount, eventCount == 0 ? NULL : &events[0], &events[eventCount]);
++eventCount;
if (kenv->isAMD == 0) {
in->data = hb_ocl->clEnqueueMapBuffer(kenv->command_queue, in->cl.buffer,
CL_FALSE, CL_MAP_READ|CL_MAP_WRITE,
0, in->alloc,
eventCount ? 1 : 0,
eventCount ? &events[eventCount - 1] : NULL,
&events[eventCount], &status);
out->data = hb_ocl->clEnqueueMapBuffer(kenv->command_queue, out->cl.buffer,
CL_FALSE, CL_MAP_READ|CL_MAP_WRITE,
0, out->alloc,
eventCount ? 1 : 0,
eventCount ? &events[eventCount - 1] : NULL,
&events[eventCount + 1], &status);
eventCount += 2;
}
hb_ocl->clFlush(kenv->command_queue);
hb_ocl->clWaitForEvents(eventCount, &events[0]);
int i;
for (i = 0; i < eventCount; ++i)
{
hb_ocl->clReleaseEvent(events[i]);
}
}
return 1;
}
int setupScaleWeights(cl_float xscale, cl_float yscale, int width, int height, hb_oclscale_t *os, KernelEnv *kenv)
{
cl_int status;
if (hb_ocl == NULL)
{
hb_error("setupScaleWeights: OpenCL support not available");
return 1;
}
if (os->xscale != xscale || os->width < width)
{
cl_float *xweights = hb_bicubic_weights(xscale, width);
HB_OCL_BUF_FREE (hb_ocl, os->bicubic_x_weights);
HB_OCL_BUF_CREATE(hb_ocl, os->bicubic_x_weights, CL_MEM_READ_ONLY,
sizeof(cl_float) * width * 4);
HB_OCL_CHECK(hb_ocl->clEnqueueWriteBuffer, kenv->command_queue, os->bicubic_x_weights,
CL_TRUE, 0, sizeof(cl_float) * width * 4, xweights, 0, NULL, NULL);
os->width = width;
os->xscale = xscale;
free(xweights);
}
if ((os->yscale != yscale) || (os->height < height))
{
cl_float *yweights = hb_bicubic_weights(yscale, height);
HB_OCL_BUF_FREE (hb_ocl, os->bicubic_y_weights);
HB_OCL_BUF_CREATE(hb_ocl, os->bicubic_y_weights, CL_MEM_READ_ONLY,
sizeof(cl_float) * height * 4);
HB_OCL_CHECK(hb_ocl->clEnqueueWriteBuffer, kenv->command_queue, os->bicubic_y_weights,
CL_TRUE, 0, sizeof(cl_float) * height * 4, yweights, 0, NULL, NULL);
os->height = height;
os->yscale = yscale;
free(yweights);
}
return 0;
}
/**
* function describe: this function is used to scaling video frame. it uses the gausi scaling algorithm
* parameter:
* inputFrameBuffer: the source video frame opencl buffer
* outputdata: the destination video frame buffer
* inputWidth: the width of the source video frame
* inputHeight: the height of the source video frame
* outputWidth: the width of destination video frame
* outputHeight: the height of destination video frame
*/
static int s_scale_init_flag = 0;
int do_scale_init()
{
if ( s_scale_init_flag==0 )
{
int st = hb_register_kernel_wrapper( "frame_scale", hb_ocl_scale_func );
if( !st )
{
hb_log( "register kernel[%s] failed", "frame_scale" );
return 0;
}
s_scale_init_flag++;
}
return 1;
}
int hb_ocl_scale(hb_buffer_t *in, hb_buffer_t *out, int *crop, hb_oclscale_t *os)
{
void *data[13];
if (do_scale_init() == 0)
return 0;
data[0] = in->cl.buffer;
data[1] = out->cl.buffer;
data[2] = (void*)(intptr_t)(crop[0]);
data[3] = (void*)(intptr_t)(crop[1]);
data[4] = (void*)(intptr_t)(crop[2]);
data[5] = (void*)(intptr_t)(crop[3]);
data[6] = (void*)(intptr_t)(in->f.width);
data[7] = (void*)(intptr_t)(in->f.height);
data[8] = (void*)(intptr_t)(out->f.width);
data[9] = (void*)(intptr_t)(out->f.height);
data[10] = os;
data[11] = in;
data[12] = out;
if( !hb_run_kernel( "frame_scale", data ) )
hb_log( "run kernel[%s] failed", "frame_scale" );
return 0;
}
HandBrake-0.10.2/libhb/dxva2api.c 0000664 0001752 0001752 00000002047 12463330511 017002 0 ustar handbrake handbrake /* dxva2api.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
Authors: Peng Gao
Li Cao
*/
#ifdef USE_HWD
#include "dxva2api.h"
__inline float hb_dx_fixedtofloat( const DXVA2_Fixed32 _fixed_ )
{
return (FLOAT)_fixed_.Value + (FLOAT)_fixed_.Fraction / 0x10000;
}
__inline const DXVA2_Fixed32 hb_dx_fixed32_opaque_alpha()
{
DXVA2_Fixed32 _fixed_;
_fixed_.Fraction = 0;
_fixed_.Value = 0;
_fixed_.ll = 1;
return _fixed_;
}
__inline DXVA2_Fixed32 hb_dx_floattofixed( const float _float_ )
{
DXVA2_Fixed32 _fixed_;
_fixed_.Fraction = LOWORD( _float_ * 0x10000 );
_fixed_.Value = HIWORD( _float_ * 0x10000 );
return _fixed_;
}
#endif
HandBrake-0.10.2/libhb/taskset.h 0000664 0001752 0001752 00000003543 12463330511 016751 0 ustar handbrake handbrake /* taskset.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_TASKSET_H
#define HB_TASKSET_H
#define TASKSET_POSIX_COMPLIANT 1
#include "bits.h"
typedef struct hb_taskset_s {
int thread_count;
int arg_size;
int bitmap_elements;
hb_thread_t ** task_threads;
uint8_t * task_threads_args;
uint32_t * task_begin_bitmap; // Threads can begin
uint32_t * task_complete_bitmap; // Threads have completed
uint32_t * task_stop_bitmap; // Threads should exit
hb_lock_t * task_cond_lock; // Held during condition tests
hb_cond_t * task_begin; // Threads can begin work
hb_cond_t * task_complete; // Threads have finished work.
} taskset_t;
int taskset_init( taskset_t *, int /*thread_count*/, size_t /*user_arg_size*/ );
void taskset_cycle( taskset_t * );
void taskset_fini( taskset_t * );
int taskset_thread_spawn( taskset_t *, int /*thr_idx*/, const char * /*descr*/,
thread_func_t *, int /*priority*/ );
void taskset_thread_wait4start( taskset_t *, int );
void taskset_thread_complete( taskset_t *, int );
static inline void *taskset_thread_args( taskset_t *, int );
static inline int taskset_thread_stop( taskset_t *, int );
static inline void *
taskset_thread_args( taskset_t *ts, int thr_idx )
{
return( ts->task_threads_args + ( ts->arg_size * thr_idx ) );
}
static inline int
taskset_thread_stop( taskset_t *ts, int thr_idx )
{
return bit_is_set( ts->task_stop_bitmap, thr_idx );
}
#endif /* HB_TASKSET_H */
HandBrake-0.10.2/libhb/muxcommon.c 0000664 0001752 0001752 00000063577 12463330511 017325 0 ustar handbrake handbrake /* muxcommon.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "decssasub.h"
#define MIN_BUFFERING (1024*1024*10)
#define MAX_BUFFERING (1024*1024*50)
struct hb_mux_object_s
{
HB_MUX_COMMON;
};
typedef struct
{
int size; // Size in bits
uint32_t * vec;
} hb_bitvec_t;
typedef struct
{
hb_buffer_t **fifo;
uint32_t in; // number of bufs put into fifo
uint32_t out; // number of bufs taken out of fifo
uint32_t flen; // fifo length (must be power of two)
} mux_fifo_t;
typedef struct
{
hb_mux_data_t * mux_data;
uint64_t frames;
uint64_t bytes;
mux_fifo_t mf;
int buffered_size;
} hb_track_t;
typedef struct
{
hb_lock_t * mutex;
int ref;
int done;
hb_mux_object_t * m;
double pts; // end time of next muxing chunk
double interleave; // size in 90KHz ticks of media chunks we mux
uint32_t max_tracks; // total number of tracks allocated
uint32_t ntracks; // total number of tracks we're muxing
hb_bitvec_t * eof; // bitmask of track with eof
hb_bitvec_t * rdy; // bitmask of tracks ready to output
hb_bitvec_t * allEof; // valid bits in eof (all tracks)
hb_bitvec_t * allRdy; // valid bits in rdy (audio & video tracks)
hb_track_t ** track; // tracks to mux 'max_tracks' elements
int buffered_size;
} hb_mux_t;
struct hb_work_private_s
{
hb_job_t * job;
int track;
hb_mux_t * mux;
};
static int hb_bitvec_add_bits(hb_bitvec_t *bv, int bits)
{
int ii;
int words_cur = (bv->size + 31) >> 5;
int words = (bv->size + bits + 31) >> 5;
if (words > words_cur)
{
uint32_t *tmp = realloc(bv->vec, words * sizeof(uint32_t));
if (tmp == NULL)
{
return -1;
}
for (ii = words_cur; ii < words; ii++)
tmp[ii] = 0;
bv->vec = tmp;
}
bv->size += bits;
return 0;
}
static hb_bitvec_t* hb_bitvec_new(int size)
{
hb_bitvec_t *bv = calloc(sizeof(hb_bitvec_t), 1);
hb_bitvec_add_bits(bv, size);
return bv;
}
static void hb_bitvec_free(hb_bitvec_t **_bv)
{
hb_bitvec_t *bv = *_bv;
free(bv->vec);
free(bv);
*_bv = NULL;
}
static void hb_bitvec_set(hb_bitvec_t *bv, int n)
{
if (n >= bv->size)
return; // Error. Should never happen.
int word = n >> 5;
uint32_t bit = 1 << (n & 0x1F);
bv->vec[word] |= bit;
}
static void hb_bitvec_clr(hb_bitvec_t *bv, int n)
{
if (n >= bv->size)
return; // Error. Should never happen.
int word = n >> 5;
uint32_t bit = 1 << (n & 0x1F);
bv->vec[word] &= ~bit;
}
static void hb_bitvec_zero(hb_bitvec_t *bv)
{
int words = (bv->size + 31) >> 5;
memset(bv->vec, 0, words * sizeof(uint32_t));
}
static int hb_bitvec_bit(hb_bitvec_t *bv, int n)
{
if (n >= bv->size)
return 0; // Error. Should never happen.
int word = n >> 5;
uint32_t bit = 1 << (n & 0x1F);
return !!(bv->vec[word] & bit);
}
static int hb_bitvec_any(hb_bitvec_t *bv)
{
uint32_t result = 0;;
int ii;
int words = (bv->size + 31) >> 5;
for (ii = 0; ii < words; ii++)
result |= bv->vec[ii];
return !!result;
}
static int hb_bitvec_cmp(hb_bitvec_t *bv1, hb_bitvec_t *bv2)
{
if (bv1->size != bv2->size)
return 0;
int ii;
int words = (bv1->size + 31) >> 5;
for (ii = 0; ii < words; ii++)
if (bv1->vec[ii] != bv2->vec[ii])
return 0;
return 1;
}
static int hb_bitvec_and_cmp(hb_bitvec_t *bv1, hb_bitvec_t *bv2, hb_bitvec_t *bv3)
{
if (bv1->size != bv2->size)
return 0;
int ii;
int words = (bv1->size + 31) >> 5;
for (ii = 0; ii < words; ii++)
if ((bv1->vec[ii] & bv2->vec[ii]) != bv3->vec[ii])
return 0;
return 1;
}
static int hb_bitvec_cpy(hb_bitvec_t *bv1, hb_bitvec_t *bv2)
{
if (bv1->size < bv2->size)
{
int result = hb_bitvec_add_bits(bv1, bv2->size - bv1->size);
if (result < 0)
return result;
}
int words = (bv1->size + 31) >> 5;
memcpy(bv1->vec, bv2->vec, words * sizeof(uint32_t));
return 0;
}
// The muxer handles two different kinds of media: Video and audio tracks
// are continuous: once they start they generate continuous, consecutive
// sequence of bufs until they end. The muxer will time align all continuous
// media tracks so that their data will be well interleaved in the output file.
// (Smooth, low latency playback with minimal player buffering requires that
// data that's going to be presented close together in time also be close
// together in the output file). Since HB's audio and video encoders run at
// different speeds, the time-aligning involves buffering *all* the continuous
// media tracks until a frame with a timestamp beyond the current alignment
// point arrives on the slowest fifo (usually the video encoder).
//
// The other kind of media, subtitles, close-captions, vobsubs and
// similar tracks, are intermittent. They generate frames sporadically or on
// human time scales (seconds) rather than near the video frame rate (milliseconds).
// If intermittent sources were treated like continuous sources huge sections of
// audio and video would get buffered waiting for the next subtitle to show up.
// To keep this from happening the muxer doesn't wait for intermittent tracks
// (essentially it assumes that they will always go through the HB processing
// pipeline faster than the associated video). They are still time aligned and
// interleaved at the appropriate point in the output file.
// This routine adds another track for the muxer to process. The media input
// stream will be read from HandBrake fifo 'fifo'. Buffers read from that
// stream will be time-aligned with all the other media streams then passed
// to the container-specific 'mux' routine with argument 'mux_data' (see
// routine OutputTrackChunk). 'is_continuous' must be 1 for an audio or video
// track and 0 otherwise (see above).
static void add_mux_track( hb_mux_t *mux, hb_mux_data_t *mux_data,
int is_continuous )
{
if ( mux->ntracks + 1 > mux->max_tracks )
{
int max_tracks = mux->max_tracks ? mux->max_tracks * 2 : 32;
hb_track_t **tmp;
tmp = realloc(mux->track, max_tracks * sizeof(hb_track_t*));
if (tmp == NULL)
{
hb_error("add_mux_track: realloc failed, too many tracks (>%d)",
max_tracks);
return;
}
mux->track = tmp;
mux->max_tracks = max_tracks;
}
hb_track_t *track = calloc( sizeof( hb_track_t ), 1 );
track->mux_data = mux_data;
track->mf.flen = 8;
track->mf.fifo = calloc( sizeof(track->mf.fifo[0]), track->mf.flen );
int t = mux->ntracks++;
mux->track[t] = track;
hb_bitvec_set(mux->allEof, t);
if (is_continuous)
hb_bitvec_set(mux->allRdy, t);
}
static int mf_full( hb_track_t * track )
{
if ( track->buffered_size > MAX_BUFFERING )
return 1;
return 0;
}
static void mf_push( hb_mux_t * mux, int tk, hb_buffer_t *buf )
{
hb_track_t * track = mux->track[tk];
uint32_t mask = track->mf.flen - 1;
uint32_t in = track->mf.in;
hb_buffer_reduce( buf, buf->size );
if ( track->buffered_size > MAX_BUFFERING )
{
hb_bitvec_cpy(mux->rdy, mux->allRdy);
}
if ( ( ( in + 1 ) & mask ) == ( track->mf.out & mask ) )
{
// fifo is full - expand it to double the current size.
// This is a bit tricky because when we change the size
// it changes the modulus (mask) used to convert the in
// and out counters to fifo indices. Since existing items
// will be referenced at a new location after the expand
// we can't just realloc the fifo. If there were
// hundreds of fifo entries it would be worth it to have code
// for each of the four possible before/after configurations
// but these fifos are small so we just allocate a new chunk
// of memory then do element by element copies using the old &
// new masks then free the old fifo's memory..
track->mf.flen *= 2;
uint32_t nmask = track->mf.flen - 1;
hb_buffer_t **nfifo = malloc( track->mf.flen * sizeof(*nfifo) );
int indx = track->mf.out;
while ( indx != track->mf.in )
{
nfifo[indx & nmask] = track->mf.fifo[indx & mask];
++indx;
}
free( track->mf.fifo );
track->mf.fifo = nfifo;
mask = nmask;
}
track->mf.fifo[in & mask] = buf;
track->mf.in = in + 1;
track->buffered_size += buf->size;
mux->buffered_size += buf->size;
}
static hb_buffer_t *mf_pull( hb_mux_t * mux, int tk )
{
hb_track_t *track =mux->track[tk];
hb_buffer_t *b = NULL;
if ( track->mf.out != track->mf.in )
{
// the fifo isn't empty
b = track->mf.fifo[track->mf.out & (track->mf.flen - 1)];
++track->mf.out;
track->buffered_size -= b->size;
mux->buffered_size -= b->size;
}
return b;
}
static hb_buffer_t *mf_peek( hb_track_t *track )
{
return track->mf.out == track->mf.in ?
NULL : track->mf.fifo[track->mf.out & (track->mf.flen - 1)];
}
static void MoveToInternalFifos( int tk, hb_mux_t *mux, hb_buffer_t * buf )
{
// move all the buffers on the track's fifo to our internal
// fifo so that (a) we don't deadlock in the reader and
// (b) we can control how data from multiple tracks is
// interleaved in the output file.
mf_push( mux, tk, buf );
if ( buf->s.start >= mux->pts )
{
// buffer is past our next interleave point so
// note that this track is ready to be output.
hb_bitvec_set(mux->rdy, tk);
}
}
static void OutputTrackChunk( hb_mux_t *mux, int tk, hb_mux_object_t *m )
{
hb_track_t *track = mux->track[tk];
hb_buffer_t *buf;
while ( ( buf = mf_peek( track ) ) != NULL && buf->s.start < mux->pts )
{
buf = mf_pull( mux, tk );
track->frames += 1;
track->bytes += buf->size;
m->mux( m, track->mux_data, buf );
}
}
static int muxWork( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
{
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
hb_mux_t * mux = pv->mux;
hb_track_t * track;
int i;
hb_buffer_t * buf = *buf_in;
hb_lock( mux->mutex );
if ( mux->done )
{
hb_unlock( mux->mutex );
return HB_WORK_DONE;
}
if ( buf->size <= 0 )
{
// EOF - mark this track as done
hb_buffer_close( &buf );
hb_bitvec_set(mux->eof, pv->track);
hb_bitvec_set(mux->rdy, pv->track);
}
else if ((job->pass != 0 && job->pass != 2) ||
hb_bitvec_bit(mux->eof, pv->track))
{
hb_buffer_close( &buf );
}
else
{
MoveToInternalFifos( pv->track, mux, buf );
}
*buf_in = NULL;
if (!hb_bitvec_and_cmp(mux->rdy, mux->allRdy, mux->allRdy) &&
!hb_bitvec_and_cmp(mux->eof, mux->allEof, mux->allEof))
{
hb_unlock( mux->mutex );
return HB_WORK_OK;
}
hb_bitvec_t *more;
more = hb_bitvec_new(0);
hb_bitvec_cpy(more, mux->rdy);
// all tracks have at least 'interleave' ticks of data. Output
// all that we can in 'interleave' size chunks.
while ((hb_bitvec_and_cmp(mux->rdy, mux->allRdy, mux->allRdy) &&
hb_bitvec_any(more) && mux->buffered_size > MIN_BUFFERING ) ||
(hb_bitvec_cmp(mux->eof, mux->allEof)))
{
hb_bitvec_zero(more);
for ( i = 0; i < mux->ntracks; ++i )
{
track = mux->track[i];
OutputTrackChunk( mux, i, mux->m );
if ( mf_full( track ) )
{
// If the track's fifo is still full, advance
// the currint interleave point and try again.
hb_bitvec_cpy(mux->rdy, mux->allRdy);
break;
}
// if the track is at eof or still has data that's past
// our next interleave point then leave it marked as rdy.
// Otherwise clear rdy.
if (hb_bitvec_bit(mux->eof, i) &&
(track->mf.out == track->mf.in ||
track->mf.fifo[(track->mf.in-1) & (track->mf.flen-1)]->s.start
< mux->pts + mux->interleave))
{
hb_bitvec_clr(mux->rdy, i);
}
if ( track->mf.out != track->mf.in )
{
hb_bitvec_set(more, i);
}
}
// if all the tracks are at eof we're just purging their
// remaining data -- keep going until all internal fifos are empty.
if (hb_bitvec_cmp(mux->eof, mux->allEof))
{
for ( i = 0; i < mux->ntracks; ++i )
{
if ( mux->track[i]->mf.out != mux->track[i]->mf.in )
{
break;
}
}
if ( i >= mux->ntracks )
{
mux->done = 1;
hb_unlock( mux->mutex );
hb_bitvec_free(&more);
return HB_WORK_DONE;
}
}
mux->pts += mux->interleave;
}
hb_bitvec_free(&more);
hb_unlock( mux->mutex );
return HB_WORK_OK;
}
void muxClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
hb_mux_t * mux = pv->mux;
hb_job_t * job = pv->job;
hb_track_t * track;
int i;
hb_lock( mux->mutex );
if ( --mux->ref == 0 )
{
// Update state before closing muxer. Closing the muxer
// may initiate optimization which can take a while and
// we want the muxing state to be visible while this is
// happening.
if( job->pass == 0 || job->pass == 2 )
{
/* Update the UI */
hb_state_t state;
state.state = HB_STATE_MUXING;
state.param.muxing.progress = 0;
hb_set_state( job->h, &state );
}
if( mux->m )
{
mux->m->end( mux->m );
free( mux->m );
}
// we're all done muxing -- print final stats and cleanup.
if( job->pass == 0 || job->pass == 2 )
{
hb_stat_t sb;
uint64_t bytes_total, frames_total;
if (!hb_stat(job->file, &sb))
{
hb_deep_log( 2, "mux: file size, %"PRId64" bytes", (uint64_t) sb.st_size );
bytes_total = 0;
frames_total = 0;
for( i = 0; i < mux->ntracks; ++i )
{
track = mux->track[i];
hb_log( "mux: track %d, %"PRId64" frames, %"PRId64" bytes, %.2f kbps, fifo %d",
i, track->frames, track->bytes,
90000.0 * track->bytes / mux->pts / 125,
track->mf.flen );
if( !i && job->vquality < 0 )
{
/* Video */
hb_deep_log( 2, "mux: video bitrate error, %+"PRId64" bytes",
(int64_t)(track->bytes - mux->pts * job->vbitrate * 125 / 90000) );
}
bytes_total += track->bytes;
frames_total += track->frames;
}
if( bytes_total && frames_total )
{
hb_deep_log( 2, "mux: overhead, %.2f bytes per frame",
(float) ( sb.st_size - bytes_total ) /
frames_total );
}
}
}
for( i = 0; i < mux->ntracks; ++i )
{
hb_buffer_t * b;
track = mux->track[i];
while ( (b = mf_pull( mux, i )) != NULL )
{
hb_buffer_close( &b );
}
if( track->mux_data )
{
free( track->mux_data );
free( track->mf.fifo );
}
free( track );
}
free(mux->track);
hb_unlock( mux->mutex );
hb_lock_close( &mux->mutex );
hb_bitvec_free(&mux->eof);
hb_bitvec_free(&mux->rdy);
hb_bitvec_free(&mux->allEof);
hb_bitvec_free(&mux->allRdy);
free( mux );
}
else
{
hb_unlock( mux->mutex );
}
free( pv );
w->private_data = NULL;
}
static void mux_loop( void * _w )
{
hb_work_object_t * w = _w;
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
hb_buffer_t * buf_in;
while ( !*job->die && w->status != HB_WORK_DONE )
{
buf_in = hb_fifo_get_wait( w->fifo_in );
if ( pv->mux->done )
break;
if ( buf_in == NULL )
continue;
if ( *job->die )
{
if( buf_in )
{
hb_buffer_close( &buf_in );
}
break;
}
w->status = w->work( w, &buf_in, NULL );
if( buf_in )
{
hb_buffer_close( &buf_in );
}
}
}
hb_work_object_t * hb_muxer_init( hb_job_t * job )
{
int i;
hb_mux_t * mux = calloc( sizeof( hb_mux_t ), 1 );
hb_work_object_t * w;
hb_work_object_t * muxer;
// The bit vectors must be allocated before hb_thread_init for the
// audio and subtitle muxer jobs below.
int bit_vec_size = 1 + hb_list_count(job->list_audio) +
hb_list_count(job->list_subtitle);
mux->rdy = hb_bitvec_new(bit_vec_size);
mux->eof = hb_bitvec_new(bit_vec_size);
mux->allRdy = hb_bitvec_new(bit_vec_size);
mux->allEof = hb_bitvec_new(bit_vec_size);
mux->mutex = hb_lock_init();
// set up to interleave track data in blocks of 1 video frame time.
// (the best case for buffering and playout latency). The container-
// specific muxers can reblock this into bigger chunks if necessary.
mux->interleave = 90000. * (double)job->vrate_base / (double)job->vrate;
mux->pts = mux->interleave;
/* Get a real muxer */
if( job->pass == 0 || job->pass == 2)
{
switch( job->mux )
{
case HB_MUX_AV_MP4:
case HB_MUX_AV_MKV:
mux->m = hb_mux_avformat_init( job );
break;
default:
hb_error( "No muxer selected, exiting" );
*job->done_error = HB_ERROR_INIT;
*job->die = 1;
return NULL;
}
/* Create file, write headers */
if( mux->m )
{
mux->m->init( mux->m );
}
}
/* Initialize the work objects that will receive fifo data */
muxer = hb_get_work( WORK_MUX );
muxer->private_data = calloc( sizeof( hb_work_private_t ), 1 );
muxer->private_data->job = job;
muxer->private_data->mux = mux;
mux->ref++;
muxer->private_data->track = mux->ntracks;
muxer->fifo_in = job->fifo_mpeg4;
add_mux_track( mux, job->mux_data, 1 );
muxer->done = &muxer->private_data->mux->done;
for( i = 0; i < hb_list_count( job->list_audio ); i++ )
{
hb_audio_t *audio = hb_list_item( job->list_audio, i );
w = hb_get_work( WORK_MUX );
w->private_data = calloc( sizeof( hb_work_private_t ), 1 );
w->private_data->job = job;
w->private_data->mux = mux;
mux->ref++;
w->private_data->track = mux->ntracks;
w->fifo_in = audio->priv.fifo_out;
add_mux_track( mux, audio->priv.mux_data, 1 );
w->done = &job->done;
hb_list_add( job->list_work, w );
w->thread = hb_thread_init( w->name, mux_loop, w, HB_NORMAL_PRIORITY );
}
for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
{
hb_subtitle_t *subtitle = hb_list_item( job->list_subtitle, i );
if (subtitle->config.dest != PASSTHRUSUB)
continue;
w = hb_get_work( WORK_MUX );
w->private_data = calloc( sizeof( hb_work_private_t ), 1 );
w->private_data->job = job;
w->private_data->mux = mux;
mux->ref++;
w->private_data->track = mux->ntracks;
w->fifo_in = subtitle->fifo_out;
add_mux_track( mux, subtitle->mux_data, 0 );
w->done = &job->done;
hb_list_add( job->list_work, w );
w->thread = hb_thread_init( w->name, mux_loop, w, HB_NORMAL_PRIORITY );
}
return muxer;
}
// muxInit does nothing because the muxer has a special initializer
// that takes care of initializing all muxer work objects
static int muxInit( hb_work_object_t * w, hb_job_t * job )
{
return 0;
}
hb_work_object_t hb_muxer =
{
WORK_MUX,
"Muxer",
muxInit,
muxWork,
muxClose
};
#define TX3G_STYLES (HB_STYLE_FLAG_BOLD | \
HB_STYLE_FLAG_ITALIC | \
HB_STYLE_FLAG_UNDERLINE)
typedef struct style_context_s
{
uint8_t * style_atoms;
int style_atom_count;
hb_subtitle_style_t current_style;
int style_start;
} style_context_t;
static void update_style_atoms(style_context_t *ctx, int stop)
{
uint8_t *style_entry;
uint8_t face = 0;
style_entry = ctx->style_atoms + 10 + (12 * ctx->style_atom_count);
if (ctx->current_style.flags & HB_STYLE_FLAG_BOLD)
face |= 1;
if (ctx->current_style.flags & HB_STYLE_FLAG_ITALIC)
face |= 2;
if (ctx->current_style.flags & HB_STYLE_FLAG_UNDERLINE)
face |= 4;
style_entry[0] = (ctx->style_start >> 8) & 0xff; // startChar
style_entry[1] = ctx->style_start & 0xff;
style_entry[2] = (stop >> 8) & 0xff; // endChar
style_entry[3] = stop & 0xff;
style_entry[4] = 0; // font-ID msb
style_entry[5] = 1; // font-ID lsb
style_entry[6] = face; // face-style-flags
style_entry[7] = 24; // font-size
style_entry[8] = (ctx->current_style.fg_rgb >> 16) & 0xff; // r
style_entry[9] = (ctx->current_style.fg_rgb >> 8) & 0xff; // g
style_entry[10] = (ctx->current_style.fg_rgb) & 0xff; // b
style_entry[11] = ctx->current_style.fg_alpha; // a
ctx->style_atom_count++;
}
static void update_style(style_context_t *ctx,
hb_subtitle_style_t *style, int pos)
{
if (ctx->style_start < pos)
{
// do we need to add a style atom?
if (((ctx->current_style.flags ^ style->flags) & TX3G_STYLES) ||
ctx->current_style.fg_rgb != style->fg_rgb ||
ctx->current_style.fg_alpha != style->fg_alpha)
{
update_style_atoms(ctx, pos - 1);
ctx->current_style = *style;
ctx->style_start = pos;
}
}
else
{
ctx->current_style = *style;
ctx->style_start = pos;
}
}
static void style_context_init(style_context_t *ctx, uint8_t *style_atoms)
{
memset(ctx, 0, sizeof(*ctx));
ctx->style_atoms = style_atoms;
ctx->style_start = INT_MAX;
}
/*
* Copy the input to output removing markup and adding markup to the style
* atom where appropriate.
*/
void hb_muxmp4_process_subtitle_style(uint8_t *input,
uint8_t *output,
uint8_t *style_atoms, uint16_t *stylesize)
{
uint16_t utf8_count = 0; // utf8 count from start of subtitle
int consumed, in_pos = 0, out_pos = 0, len, ii, lines;
style_context_t ctx;
hb_subtitle_style_t style;
char *text, *tmp;
*stylesize = 0;
style_context_init(&ctx, style_atoms);
hb_ssa_style_init(&style);
// Skip past the SSA preamble
text = (char*)input;
for (ii = 0; ii < 8; ii++)
{
tmp = strchr(text, ',');
if (tmp == NULL)
break;
text = tmp + 1;
}
in_pos = text - (char*)input;
while (input[in_pos] != '\0')
{
lines = 1;
text = hb_ssa_to_text((char*)input + in_pos, &consumed, &style);
if (text == NULL)
break;
// count UTF8 characters, and get length of text
len = 0;
for (ii = 0; text[ii] != '\0'; ii++)
{
if ((text[ii] & 0xc0) == 0x80)
{
utf8_count++;
hb_deep_log( 3, "mux: Counted %d UTF-8 chrs within subtitle",
utf8_count);
}
// By default tx3g only supports 2 lines of text
// To support more lines, we must enable the virtical placement
// flag in the tx3g atom and add tbox atoms to the sample
// data to set the vertical placement for each subtitle.
// Although tbox defines a rectangle, the QT spec says
// that only the vertical placement is honored (bummer).
if (text[ii] == '\n')
{
lines++;
if (lines > 2)
text[ii] = ' ';
}
len++;
}
strcpy((char*)output+out_pos, text);
free(text);
out_pos += len;
in_pos += consumed;
update_style(&ctx, &style, out_pos - utf8_count);
}
// Return to default style at end of line, flushes any pending
// style changes
hb_ssa_style_init(&style);
update_style(&ctx, &style, out_pos - utf8_count);
// null terminate output string
output[out_pos] = 0;
if (ctx.style_atom_count > 0)
{
*stylesize = 10 + (ctx.style_atom_count * 12);
memcpy(style_atoms + 4, "styl", 4);
style_atoms[0] = 0;
style_atoms[1] = 0;
style_atoms[2] = (*stylesize >> 8) & 0xff;
style_atoms[3] = *stylesize & 0xff;
style_atoms[8] = (ctx.style_atom_count >> 8) & 0xff;
style_atoms[9] = ctx.style_atom_count & 0xff;
}
}
HandBrake-0.10.2/libhb/common.h 0000664 0001752 0001752 00000131325 12463330511 016563 0 ustar handbrake handbrake /* common.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_COMMON_H
#define HB_COMMON_H
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
/*
* It seems WinXP doesn't align the stack of new threads to 16 bytes.
* To prevent crashes in SSE functions, we need to force stack alignement
* of new threads.
*/
#if defined( __GNUC__ ) && (defined( _WIN32 ) || defined( __MINGW32__ ))
# define attribute_align_thread __attribute__((force_align_arg_pointer))
#else
# define attribute_align_thread
#endif
#if defined( __GNUC__ ) && !(defined( _WIN32 ) || defined( __MINGW32__ ))
# define HB_WPRINTF(s,v) __attribute__((format(printf,s,v)))
#else
# define HB_WPRINTF(s,v)
#endif
#if defined( SYS_MINGW )
# define fseek fseeko64
# define ftell ftello64
# undef fseeko
# define fseeko fseeko64
# undef ftello
# define ftello ftello64
# define flockfile(...)
# define funlockfile(...)
# define getc_unlocked getc
# undef off_t
# define off_t off64_t
#endif
#ifndef MIN
#define MIN( a, b ) ( (a) > (b) ? (b) : (a) )
#endif
#ifndef MAX
#define MAX( a, b ) ( (a) > (b) ? (a) : (b) )
#endif
#ifndef HB_DEBUG_ASSERT
#define HB_DEBUG_ASSERT(x, y) { if ((x)) { hb_error("ASSERT: %s", y); exit(1); } }
#endif
#define EVEN( a ) ((a) + ((a) & 1))
#define MULTIPLE_MOD(a, b) (((b) * (int)(((a) + ((b) / 2)) / (b))))
#define MULTIPLE_MOD_UP(a, b) (((b) * (int)(((a) + ((b) - 1)) / (b))))
#define MULTIPLE_MOD_DOWN(a, b) (((b) * (int)((a) / (b))))
#define HB_DVD_READ_BUFFER_SIZE 2048
typedef struct hb_handle_s hb_handle_t;
typedef struct hb_list_s hb_list_t;
typedef struct hb_rate_s hb_rate_t;
typedef struct hb_dither_s hb_dither_t;
typedef struct hb_mixdown_s hb_mixdown_t;
typedef struct hb_encoder_s hb_encoder_t;
typedef struct hb_container_s hb_container_t;
typedef struct hb_rational_s hb_rational_t;
typedef struct hb_geometry_s hb_geometry_t;
typedef struct hb_ui_geometry_s hb_ui_geometry_t;
typedef struct hb_image_s hb_image_t;
typedef struct hb_job_s hb_job_t;
typedef struct hb_title_set_s hb_title_set_t;
typedef struct hb_title_s hb_title_t;
typedef struct hb_chapter_s hb_chapter_t;
typedef struct hb_audio_s hb_audio_t;
typedef struct hb_audio_config_s hb_audio_config_t;
typedef struct hb_subtitle_s hb_subtitle_t;
typedef struct hb_subtitle_config_s hb_subtitle_config_t;
typedef struct hb_attachment_s hb_attachment_t;
typedef struct hb_metadata_s hb_metadata_t;
typedef struct hb_coverart_s hb_coverart_t;
typedef struct hb_state_s hb_state_t;
typedef union hb_esconfig_u hb_esconfig_t;
typedef struct hb_work_private_s hb_work_private_t;
typedef struct hb_work_object_s hb_work_object_t;
typedef struct hb_filter_private_s hb_filter_private_t;
typedef struct hb_filter_object_s hb_filter_object_t;
typedef struct hb_buffer_s hb_buffer_t;
typedef struct hb_buffer_settings_s hb_buffer_settings_t;
typedef struct hb_image_format_s hb_image_format_t;
typedef struct hb_fifo_s hb_fifo_t;
typedef struct hb_lock_s hb_lock_t;
typedef enum
{
HB_ERROR_NONE = 0,
HB_ERROR_CANCELED ,
HB_ERROR_WRONG_INPUT,
HB_ERROR_INIT ,
HB_ERROR_UNKNOWN
} hb_error_code;
#include "ports.h"
#ifdef __LIBHB__
#include "internal.h"
#define PRIVATE
#else
#define PRIVATE const
#endif
#include "audio_remap.h"
#include "libavutil/channel_layout.h"
#ifdef USE_QSV
#include "libavcodec/qsv.h"
#endif
hb_list_t * hb_list_init();
int hb_list_count( const hb_list_t * );
void hb_list_add( hb_list_t *, void * );
void hb_list_insert( hb_list_t * l, int pos, void * p );
void hb_list_rem( hb_list_t *, void * );
void * hb_list_item( const hb_list_t *, int );
void hb_list_close( hb_list_t ** );
void hb_reduce( int *x, int *y, int num, int den );
void hb_reduce64( int64_t *x, int64_t *y, int64_t num, int64_t den );
void hb_limit_rational64( int64_t *x, int64_t *y, int64_t num, int64_t den, int64_t limit );
#define HB_KEEP_WIDTH 0x01
#define HB_KEEP_HEIGHT 0x02
#define HB_KEEP_DISPLAY_ASPECT 0x04
void hb_job_set_encoder_preset (hb_job_t *job, const char *preset);
void hb_job_set_encoder_tune (hb_job_t *job, const char *tune);
void hb_job_set_encoder_options(hb_job_t *job, const char *options);
void hb_job_set_encoder_profile(hb_job_t *job, const char *profile);
void hb_job_set_encoder_level (hb_job_t *job, const char *level);
void hb_job_set_file (hb_job_t *job, const char *file);
hb_audio_t *hb_audio_copy(const hb_audio_t *src);
hb_list_t *hb_audio_list_copy(const hb_list_t *src);
void hb_audio_close(hb_audio_t **audio);
void hb_audio_config_init(hb_audio_config_t * audiocfg);
int hb_audio_add(const hb_job_t * job, const hb_audio_config_t * audiocfg);
hb_audio_config_t * hb_list_audio_config_item(hb_list_t * list, int i);
int hb_subtitle_add_ssa_header(hb_subtitle_t *subtitle, int width, int height);
hb_subtitle_t *hb_subtitle_copy(const hb_subtitle_t *src);
hb_list_t *hb_subtitle_list_copy(const hb_list_t *src);
void hb_subtitle_close( hb_subtitle_t **sub );
int hb_subtitle_add(const hb_job_t * job, const hb_subtitle_config_t * subtitlecfg, int track);
int hb_srt_add(const hb_job_t * job, const hb_subtitle_config_t * subtitlecfg,
const char *lang);
int hb_subtitle_can_force( int source );
int hb_subtitle_can_burn( int source );
int hb_subtitle_can_pass( int source, int mux );
int hb_audio_can_apply_drc(uint32_t codec, uint32_t codec_param, int encoder);
hb_attachment_t *hb_attachment_copy(const hb_attachment_t *src);
hb_list_t *hb_attachment_list_copy(const hb_list_t *src);
void hb_attachment_close(hb_attachment_t **attachment);
hb_metadata_t * hb_metadata_init();
hb_metadata_t * hb_metadata_copy(const hb_metadata_t *src);
void hb_metadata_close(hb_metadata_t **metadata);
void hb_metadata_set_name( hb_metadata_t *metadata, const char *name );
void hb_metadata_set_artist( hb_metadata_t *metadata, const char *artist );
void hb_metadata_set_composer( hb_metadata_t *metadata, const char *composer );
void hb_metadata_set_release_date( hb_metadata_t *metadata, const char *release_date );
void hb_metadata_set_comment( hb_metadata_t *metadata, const char *comment );
void hb_metadata_set_genre( hb_metadata_t *metadata, const char *genre );
void hb_metadata_set_album( hb_metadata_t *metadata, const char *album );
void hb_metadata_set_album_artist( hb_metadata_t *metadata, const char *album_artist );
void hb_metadata_set_description( hb_metadata_t *metadata, const char *description );
void hb_metadata_set_long_description( hb_metadata_t *metadata, const char *long_description );
void hb_metadata_add_coverart( hb_metadata_t *metadata, const uint8_t *data, int size, int type );
void hb_metadata_rem_coverart( hb_metadata_t *metadata, int ii );
hb_chapter_t *hb_chapter_copy(const hb_chapter_t *src);
hb_list_t *hb_chapter_list_copy(const hb_list_t *src);
void hb_chapter_close(hb_chapter_t **chapter);
void hb_chapter_set_title(hb_chapter_t *chapter, const char *title);
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_rate_s.cs when changing this struct
struct hb_rate_s
{
const char *name;
int rate;
};
struct hb_dither_s
{
const char *description;
const char *short_name;
int method;
};
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_mixdown_s.cs when changing this struct
struct hb_mixdown_s
{
const char *name;
const char *short_name;
int amixdown;
};
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_encoder_s.cs when changing this struct
struct hb_encoder_s
{
const char *name; // note: used in presets
const char *short_name; // note: used in CLI
const char *long_name; // used in log
int codec; // HB_*CODEC_* define
int muxers; // supported muxers
};
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_container_s.cs when changing this struct
struct hb_container_s
{
const char *name;
const char *short_name;
const char *long_name;
const char *default_extension;
int format;
};
struct hb_rational_s
{
int num;
int den;
};
struct hb_geometry_s
{
int width;
int height;
hb_rational_t par;
};
struct hb_ui_geometry_s
{
int mode; // Anamorphic mode, see job struct anamorphic
int keep; // Specifies settings that shouldn't be changed
int itu_par; // use dvd dimensions to determine PAR
int modulus; // pixel alignment for loose anamorphic
int crop[4]; // Pixels cropped from source before scaling
int width; // destination storage width
int height; // destination storage height
int maxWidth; // max destination storage width
int maxHeight; // max destination storage height
hb_rational_t par; // Pixel aspect used in custom anamorphic
hb_rational_t dar; // Display aspect used in custom anamorphic
};
struct hb_image_s
{
int format;
int width;
int height;
uint8_t *data;
struct image_plane
{
uint8_t *data;
int width;
int height;
int stride;
int height_stride;
int size;
} plane[4];
};
void hb_image_close(hb_image_t **_image);
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_subtitle_config_s.cs when changing this struct
struct hb_subtitle_config_s
{
enum subdest { RENDERSUB, PASSTHRUSUB } dest;
int force;
int default_track;
/* SRT subtitle tracks only */
char src_filename[256];
char src_codeset[40];
int64_t offset;
};
/*******************************************************************************
* Lists of rates, mixdowns, encoders etc.
*******************************************************************************
*
* Use hb_*_get_next() to get the next list item (use NULL to get the first).
*
* Use hb_*_get_from_name() to get the value corresponding to a name.
* The name can be either the short or full name.
* Legacy names are sanitized to currently-supported values whenever possible.
* Returns 0 or -1 if no value could be found.
*
* Use hb_*_get_name() and hb_*_get_short_name() to get the corresponding value.
* Returns NULL if the value is invalid.
*
* Use hb_*_get_long_name() when the name is not descriptive enough for you.
*
* hb_*_sanitize_name() are convenience functions for use when dealing
* with full names (e.g. to translate legacy values while loading a preset).
*
* Names are case-insensitive; libhb will ensure that the lists do not contain
* more than one entry with the same name.
*
* Use hb_*_get_limits() to get the minimum/maximum for lists with numerically
* ordered values.
*
* Use hb_*_get_best() to sanitize a value based on other relevant parameters.
*
* Use hb_*_get_default() to get the default based on other relevant parameters.
*
*/
void hb_common_global_init();
int hb_video_framerate_get_from_name(const char *name);
const char* hb_video_framerate_get_name(int framerate);
const char* hb_video_framerate_sanitize_name(const char *name);
const hb_rate_t* hb_video_framerate_get_next(const hb_rate_t *last);
int hb_audio_samplerate_get_best(uint32_t codec, int samplerate, int *sr_shift);
int hb_audio_samplerate_get_from_name(const char *name);
const char* hb_audio_samplerate_get_name(int samplerate);
const hb_rate_t* hb_audio_samplerate_get_next(const hb_rate_t *last);
int hb_audio_bitrate_get_best(uint32_t codec, int bitrate, int samplerate, int mixdown);
int hb_audio_bitrate_get_default(uint32_t codec, int samplerate, int mixdown);
void hb_audio_bitrate_get_limits(uint32_t codec, int samplerate, int mixdown, int *low, int *high);
const hb_rate_t* hb_audio_bitrate_get_next(const hb_rate_t *last);
void hb_video_quality_get_limits(uint32_t codec, float *low, float *high, float *granularity, int *direction);
const char* hb_video_quality_get_name(uint32_t codec);
const char* const* hb_video_encoder_get_presets (int encoder);
const char* const* hb_video_encoder_get_tunes (int encoder);
const char* const* hb_video_encoder_get_profiles(int encoder);
const char* const* hb_video_encoder_get_levels (int encoder);
void hb_audio_quality_get_limits(uint32_t codec, float *low, float *high, float *granularity, int *direction);
float hb_audio_quality_get_best(uint32_t codec, float quality);
float hb_audio_quality_get_default(uint32_t codec);
void hb_audio_compression_get_limits(uint32_t codec, float *low, float *high, float *granularity, int *direction);
float hb_audio_compression_get_best(uint32_t codec, float compression);
float hb_audio_compression_get_default(uint32_t codec);
int hb_audio_dither_get_default();
int hb_audio_dither_get_default_method(); // default method, if enabled && supported
int hb_audio_dither_is_supported(uint32_t codec);
int hb_audio_dither_get_from_name(const char *name);
const char* hb_audio_dither_get_description(int method);
const hb_dither_t* hb_audio_dither_get_next(const hb_dither_t *last);
int hb_mixdown_is_supported(int mixdown, uint32_t codec, uint64_t layout);
int hb_mixdown_has_codec_support(int mixdown, uint32_t codec);
int hb_mixdown_has_remix_support(int mixdown, uint64_t layout);
int hb_mixdown_get_discrete_channel_count(int mixdown);
int hb_mixdown_get_low_freq_channel_count(int mixdown);
int hb_mixdown_get_best(uint32_t codec, uint64_t layout, int mixdown);
int hb_mixdown_get_default(uint32_t codec, uint64_t layout);
int hb_mixdown_get_from_name(const char *name);
const char* hb_mixdown_get_name(int mixdown);
const char* hb_mixdown_get_short_name(int mixdown);
const char* hb_mixdown_sanitize_name(const char *name);
const hb_mixdown_t* hb_mixdown_get_next(const hb_mixdown_t *last);
int hb_video_encoder_get_default(int muxer);
int hb_video_encoder_get_from_name(const char *name);
const char* hb_video_encoder_get_name(int encoder);
const char* hb_video_encoder_get_short_name(int encoder);
const char* hb_video_encoder_get_long_name(int encoder);
const char* hb_video_encoder_sanitize_name(const char *name);
const hb_encoder_t* hb_video_encoder_get_next(const hb_encoder_t *last);
/*
* hb_audio_encoder_get_fallback_for_passthru() will sanitize a passthru codec
* to the matching audio encoder (if any is available).
*
* hb_audio_encoder_get_from_name(), hb_audio_encoder_sanitize_name() will
* sanitize legacy encoder names, but won't convert passthru to an encoder.
*/
int hb_audio_encoder_get_fallback_for_passthru(int passthru);
int hb_audio_encoder_get_default(int muxer);
int hb_audio_encoder_get_from_name(const char *name);
const char* hb_audio_encoder_get_name(int encoder);
const char* hb_audio_encoder_get_short_name(int encoder);
const char* hb_audio_encoder_get_long_name(int encoder);
const char* hb_audio_encoder_sanitize_name(const char *name);
const hb_encoder_t* hb_audio_encoder_get_next(const hb_encoder_t *last);
/*
* Not typically used by the UIs
* (set hb_job_t.acodec_copy_mask, hb_job_t.acodec_fallback instead).
*/
void hb_autopassthru_apply_settings(hb_job_t *job);
void hb_autopassthru_print_settings(hb_job_t *job);
int hb_autopassthru_get_encoder(int in_codec, int copy_mask, int fallback, int muxer);
int hb_container_get_from_name(const char *name);
int hb_container_get_from_extension(const char *extension); // not really a container name
const char* hb_container_get_name(int format);
const char* hb_container_get_short_name(int format);
const char* hb_container_get_long_name(int format);
const char* hb_container_get_default_extension(int format);
const char* hb_container_sanitize_name(const char *name);
const hb_container_t* hb_container_get_next(const hb_container_t *last);
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_title_set_s.cs when changing this struct
struct hb_title_set_s
{
hb_list_t * list_title;
int feature; // Detected DVD feature title
};
extern int hb_gui_use_hwd_flag;
typedef enum
{
HB_ANAMORPHIC_NONE,
HB_ANAMORPHIC_STRICT,
HB_ANAMORPHIC_LOOSE,
HB_ANAMORPHIC_CUSTOM
} hb_anamorphic_mode_t;
/******************************************************************************
* hb_job_t: settings to be filled by the UI
* Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_job_s.cs when changing this struct
*****************************************************************************/
struct hb_job_s
{
/* ID assigned by UI so it can groups job passes together */
int sequence_id;
/* Pointer to the title to be ripped */
hb_title_t * title;
int feature; // Detected DVD feature title
/* Chapter selection */
int chapter_start;
int chapter_end;
/* Include chapter marker track in mp4? */
int chapter_markers;
/* Picture settings:
crop: must be multiples of 2 (top/bottom/left/right)
deinterlace: 0 or 1
width: must be a multiple of 2
height: must be a multiple of 2
grayscale: black and white encoding
pixel_ratio: store pixel aspect ratio in the video
pixel_aspect_width: numerator for pixel aspect ratio
pixel_aspect_height: denominator for pixel aspect ratio
modulus: set a number for dimensions to be multiples of
maxWidth: keep width below this
maxHeight: keep height below this */
int crop[4];
int deinterlace;
hb_list_t * list_filter;
int width;
int height;
int grayscale;
struct
{
hb_anamorphic_mode_t mode;
int itu_par;
int par_width;
int par_height;
int dar_width; // 0 if normal
int dar_height; // 0 if normal
int keep_display_aspect;
} anamorphic;
int modulus;
int maxWidth;
int maxHeight;
/* Video settings:
vcodec: output codec
vquality: output quality (if < 0.0, bitrate is used instead)
vbitrate: output bitrate (Kbps)
vrate, vrate_base: output framerate is vrate / vrate_base
cfr: 0 (vfr), 1 (cfr), 2 (pfr) [see render.c]
pass: 0, 1 or 2 (or -1 for scan)
areBframes: boolean to note if b-frames are used */
#define HB_VCODEC_MASK 0x0000FFF
#define HB_VCODEC_X264 0x0000001
#define HB_VCODEC_THEORA 0x0000002
#define HB_VCODEC_X265 0x0000004
#define HB_VCODEC_FFMPEG_MPEG4 0x0000010
#define HB_VCODEC_FFMPEG_MPEG2 0x0000020
#define HB_VCODEC_FFMPEG_VP8 0x0000040
#define HB_VCODEC_FFMPEG_MASK 0x00000F0
#define HB_VCODEC_QSV_H264 0x0000100
#define HB_VCODEC_QSV_MASK 0x0000F00
#define HB_VCODEC_H264_MASK (HB_VCODEC_X264|HB_VCODEC_QSV_H264)
int vcodec;
float vquality;
int vbitrate;
int vrate;
int vrate_base;
int cfr;
int pass;
int fastfirstpass;
char *encoder_preset;
char *encoder_tune;
char *encoder_options;
char *encoder_profile;
char *encoder_level;
int areBframes;
int color_matrix_code;
int color_prim;
int color_transfer;
int color_matrix;
// see https://developer.apple.com/quicktime/icefloe/dispatch019.html#colr
#define HB_COLR_PRI_BT709 1
#define HB_COLR_PRI_UNDEF 2
#define HB_COLR_PRI_EBUTECH 5 // use for bt470bg
#define HB_COLR_PRI_SMPTEC 6 // smpte170m; also use for bt470m and smpte240m
// 0, 3-4, 7-65535: reserved
#define HB_COLR_TRA_BT709 1 // also use for bt470m, bt470bg and smpte170m
#define HB_COLR_TRA_UNDEF 2
#define HB_COLR_TRA_SMPTE240M 7
// 0, 3-6, 8-65535: reserved
#define HB_COLR_MAT_BT709 1
#define HB_COLR_MAT_UNDEF 2
#define HB_COLR_MAT_SMPTE170M 6 // also use for fcc and bt470bg
#define HB_COLR_MAT_SMPTE240M 7
// 0, 3-5, 8-65535: reserved
hb_list_t * list_chapter;
/* List of audio settings. */
hb_list_t * list_audio;
int acodec_copy_mask; // Auto Passthru allowed codecs
int acodec_fallback; // Auto Passthru fallback encoder
/* Subtitles */
hb_list_t * list_subtitle;
hb_list_t * list_attachment;
hb_metadata_t * metadata;
/*
* Muxer settings
* mux: output file format
* file: file path
*/
#define HB_MUX_MASK 0xFF0001
#define HB_MUX_MP4V2 0x010000
#define HB_MUX_AV_MP4 0x020000
#define HB_MUX_MASK_MP4 0x030000
#define HB_MUX_LIBMKV 0x100000
#define HB_MUX_AV_MKV 0x200000
#define HB_MUX_MASK_MKV 0x300000
#define HB_MUX_MASK_AV 0x220000
/* default muxer for each container */
#define HB_MUX_MP4 HB_MUX_AV_MP4
#define HB_MUX_MKV HB_MUX_AV_MKV
int mux;
char * file;
/* Allow MP4 files > 4 gigs */
int largeFileSize;
int mp4_optimize;
int ipod_atom;
int indepth_scan;
hb_subtitle_config_t select_subtitle_config;
int angle; // dvd angle to encode
int frame_to_start; // declare eof when we hit this frame
int64_t pts_to_start; // drop frames until we pass this pts
// in the time-linearized input stream
int frame_to_stop; // declare eof when we hit this frame
int64_t pts_to_stop; // declare eof when we pass this pts in
// the time-linearized input stream
int start_at_preview; // if non-zero, encoding will start
// at the position of preview n
int seek_points; // out of N previews
uint32_t frames_to_skip; // decode but discard this many frames
// initially (for frame accurate positioning
// to non-I frames).
int use_opencl;
int use_hwd;
int use_decomb;
int use_detelecine;
#ifdef USE_QSV
// QSV-specific settings
struct
{
int decode;
int async_depth;
av_qsv_context *ctx;
// shared encoding parameters
// initialized by the QSV encoder, then used upstream (e.g. by filters)
// to configure their output so that it matches what the encoder expects
struct
{
int pic_struct;
int align_width;
int align_height;
int is_init_done;
} enc_info;
} qsv;
#endif
#ifdef __LIBHB__
/* Internal data */
hb_handle_t * h;
hb_lock_t * pause;
volatile hb_error_code * done_error;
volatile int * die;
volatile int done;
uint64_t st_pause_date;
uint64_t st_paused;
hb_fifo_t * fifo_mpeg2; /* MPEG-2 video ES */
hb_fifo_t * fifo_raw; /* Raw pictures */
hb_fifo_t * fifo_sync; /* Raw pictures, framerate corrected */
hb_fifo_t * fifo_render; /* Raw pictures, scaled */
hb_fifo_t * fifo_mpeg4; /* MPEG-4 video ES */
hb_list_t * list_work;
hb_esconfig_t config;
hb_mux_data_t * mux_data;
#endif
};
/* Audio starts here */
/* Audio Codecs: Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/NativeConstants.cs when changing these consts */
#define HB_ACODEC_MASK 0x00FFFF00
#define HB_ACODEC_LAME 0x00000200
#define HB_ACODEC_VORBIS 0x00000400
#define HB_ACODEC_AC3 0x00000800
#define HB_ACODEC_LPCM 0x00001000
#define HB_ACODEC_DCA 0x00002000
#define HB_ACODEC_CA_AAC 0x00004000
#define HB_ACODEC_CA_HAAC 0x00008000
#define HB_ACODEC_FFAAC 0x00010000
#define HB_ACODEC_FFMPEG 0x00020000
#define HB_ACODEC_DCA_HD 0x00040000
#define HB_ACODEC_MP3 0x00080000
#define HB_ACODEC_FFFLAC 0x00100000
#define HB_ACODEC_FFFLAC24 0x00200000
#define HB_ACODEC_FDK_AAC 0x00400000
#define HB_ACODEC_FDK_HAAC 0x00800000
#define HB_ACODEC_FF_MASK 0x00FF2800
#define HB_ACODEC_PASS_FLAG 0x40000000
#define HB_ACODEC_PASS_MASK (HB_ACODEC_MP3 | HB_ACODEC_FFAAC | HB_ACODEC_DCA_HD | HB_ACODEC_AC3 | HB_ACODEC_DCA)
#define HB_ACODEC_AUTO_PASS (HB_ACODEC_PASS_MASK | HB_ACODEC_PASS_FLAG)
#define HB_ACODEC_MP3_PASS (HB_ACODEC_MP3 | HB_ACODEC_PASS_FLAG)
#define HB_ACODEC_AAC_PASS (HB_ACODEC_FFAAC | HB_ACODEC_PASS_FLAG)
#define HB_ACODEC_AC3_PASS (HB_ACODEC_AC3 | HB_ACODEC_PASS_FLAG)
#define HB_ACODEC_DCA_PASS (HB_ACODEC_DCA | HB_ACODEC_PASS_FLAG)
#define HB_ACODEC_DCA_HD_PASS (HB_ACODEC_DCA_HD | HB_ACODEC_PASS_FLAG)
#define HB_ACODEC_ANY (HB_ACODEC_MASK | HB_ACODEC_PASS_FLAG)
#define HB_SUBSTREAM_BD_TRUEHD 0x72
#define HB_SUBSTREAM_BD_AC3 0x76
#define HB_SUBSTREAM_BD_DTSHD 0x72
#define HB_SUBSTREAM_BD_DTS 0x71
/* define an invalid VBR quality compatible with all VBR-capable codecs */
#define HB_INVALID_AUDIO_QUALITY (-3.)
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_audio_config_s.cs when changing this struct
struct hb_audio_config_s
{
/* Output */
struct
{
enum
{
// make sure audio->config.out.mixdown isn't treated as unsigned
HB_INVALID_AMIXDOWN = -1,
HB_AMIXDOWN_NONE = 0,
HB_AMIXDOWN_MONO,
HB_AMIXDOWN_LEFT,
HB_AMIXDOWN_RIGHT,
HB_AMIXDOWN_STEREO,
HB_AMIXDOWN_DOLBY,
HB_AMIXDOWN_DOLBYPLII,
HB_AMIXDOWN_5POINT1,
HB_AMIXDOWN_6POINT1,
HB_AMIXDOWN_7POINT1,
HB_AMIXDOWN_5_2_LFE,
} mixdown; /* Audio mixdown */
int track; /* Output track number */
uint32_t codec; /* Output audio codec */
int samplerate; /* Output sample rate (Hz) */
int samples_per_frame; /* Number of samples per frame */
int bitrate; /* Output bitrate (Kbps) */
float quality; /* Output quality (encoder-specific) */
float compression_level; /* Output compression level (encoder-specific) */
double dynamic_range_compression; /* Amount of DRC applied to this track */
double gain; /* Gain (in dB), negative is quieter */
int normalize_mix_level; /* mix level normalization (boolean) */
int dither_method; /* dither algorithm */
char * name; /* Output track name */
int delay;
} out;
/* Input */
struct
{
int track; /* Input track number */
PRIVATE uint32_t codec; /* Input audio codec */
PRIVATE uint32_t codec_param; /* Per-codec config info */
PRIVATE uint32_t reg_desc; /* Registration descriptor of source */
PRIVATE uint32_t stream_type; /* Stream type from source stream */
PRIVATE uint32_t substream_type; /* Substream type for multiplexed streams */
PRIVATE uint32_t version; /* Bitsream version */
PRIVATE uint32_t flags; /* Bitstream flags, codec-specific */
PRIVATE uint32_t mode; /* Bitstream mode, codec-specific */
PRIVATE int samplerate; /* Input sample rate (Hz) */
PRIVATE int samples_per_frame; /* Number of samples per frame */
PRIVATE int bitrate; /* Input bitrate (bps) */
PRIVATE int matrix_encoding; /* Source matrix encoding mode, set by the audio decoder */
PRIVATE uint64_t channel_layout; /* Source channel layout, set by the audio decoder */
PRIVATE hb_chan_map_t * channel_map; /* Source channel map, set by the audio decoder */
} in;
struct
{
PRIVATE char description[1024];
PRIVATE char simple[1024];
PRIVATE char iso639_2[4];
PRIVATE uint8_t type; /* normal, visually impaired, director's commentary */
} lang;
};
#ifdef __LIBHB__
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_audio_s.cs when changing this struct
struct hb_audio_s
{
int id;
hb_audio_config_t config;
struct {
hb_fifo_t * fifo_in; /* AC3/MPEG/LPCM ES */
hb_fifo_t * fifo_raw; /* Raw audio */
hb_fifo_t * fifo_sync; /* Resampled, synced raw audio */
hb_fifo_t * fifo_out; /* MP3/AAC/Vorbis ES */
hb_esconfig_t config;
hb_mux_data_t * mux_data;
hb_fifo_t * scan_cache;
} priv;
};
#endif
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_chapter_s.cs when changing this struct
struct hb_chapter_s
{
int index;
int pgcn;
int pgn;
int cell_start;
int cell_end;
uint64_t block_start;
uint64_t block_end;
uint64_t block_count;
/* Visual-friendly duration */
int hours;
int minutes;
int seconds;
/* Exact duration (in 1/90000s) */
uint64_t duration;
/* Optional chapter title */
char *title;
};
/*
* A subtitle track.
*
* Required fields when a demuxer creates a subtitle track are:
* > id
* - ID of this track
* - must be unique for all tracks within a single job,
* since it is used to look up the appropriate in-FIFO with GetFifoForId()
* > format
* - format of the packets the subtitle decoder work-object sends to sub->fifo_raw
* - for passthru subtitles, is also the format of the final packets sent to sub->fifo_out
* - PICTURESUB for banded 8-bit YAUV pixels; see decvobsub.c documentation for more info
* - TEXTSUB for UTF-8 text marked up with , , or
* - read by the muxers, and by the subtitle burn-in logic in the hb_sync_video work-object
* > source
* - used to create the appropriate subtitle decoder work-object in do_job()
* > config.dest
* - whether to render the subtitle on the video track (RENDERSUB) or
* to pass it through its own subtitle track in the output container (PASSTHRUSUB)
* - all newly created non-VOBSUB tracks should default to PASSTHRUSUB
* - all newly created VOBSUB tracks should default to RENDERSUB, for legacy compatibility
* > lang
* - user-readable description of the subtitle track
* - may correspond to the language of the track (see the 'iso639_2' field)
* - may correspond to the type of track (see the 'type' field; ex: "Closed Captions")
* > iso639_2
* - language code for the subtitle, or "und" if unknown
*
* Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_subtitle_s.cs when changing this struct
*/
struct hb_subtitle_s
{
int id;
int track;
int out_track;
hb_subtitle_config_t config;
enum subtype { PICTURESUB, TEXTSUB } format;
enum subsource { VOBSUB, SRTSUB, CC608SUB, /*unused*/CC708SUB, UTF8SUB, TX3GSUB, SSASUB, PGSSUB } source;
char lang[1024];
char iso639_2[4];
uint8_t type; /* Closed Caption, Childrens, Directors etc */
// Color lookup table for VOB subtitle tracks. Each entry is in YCbCr format.
// Must be filled out by the demuxer for VOB subtitle tracks.
uint32_t palette[16];
uint8_t palette_set;
int width;
int height;
// Codec private data for subtitles originating from FFMPEG sources
uint8_t * extradata;
int extradata_size;
int hits; /* How many hits/occurrences of this subtitle */
int forced_hits; /* How many forced hits in this subtitle */
#ifdef __LIBHB__
/* Internal data */
PRIVATE uint32_t codec; /* Input "codec" */
PRIVATE uint32_t reg_desc; /* registration descriptor of source */
PRIVATE uint32_t stream_type; /* stream type from source stream */
PRIVATE uint32_t substream_type;/* substream for multiplexed streams */
hb_fifo_t * fifo_in; /* SPU ES */
hb_fifo_t * fifo_raw; /* Decoded SPU */
hb_fifo_t * fifo_sync;/* Synced */
hb_fifo_t * fifo_out; /* Correct Timestamps, ready to be muxed */
hb_mux_data_t * mux_data;
#endif
};
/*
* An attachment.
*
* These are usually used for attaching embedded fonts to movies containing SSA subtitles.
*/
struct hb_attachment_s
{
enum attachtype { FONT_TTF_ATTACH, HB_ART_ATTACH } type;
char * name;
char * data;
int size;
};
struct hb_coverart_s
{
uint8_t *data;
uint32_t size;
enum arttype {
HB_ART_UNDEFINED,
HB_ART_BMP,
HB_ART_GIF,
HB_ART_PNG,
HB_ART_JPEG
} type;
};
struct hb_metadata_s
{
char *name;
char *artist; // Actors
char *composer;
char *release_date;
char *comment;
char *album; // DVD
char *album_artist; // Director
char *genre;
char *description;
char *long_description;
hb_list_t * list_coverart;
};
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_title_s.cs when changing this struct
struct hb_title_s
{
enum { HB_DVD_TYPE, HB_BD_TYPE, HB_STREAM_TYPE, HB_FF_STREAM_TYPE } type;
uint32_t reg_desc;
char path[1024];
char name[1024];
int index;
int playlist;
int vts;
int ttn;
int cell_start;
int cell_end;
uint64_t block_start;
uint64_t block_end;
uint64_t block_count;
int angle_count;
void *opaque_priv;
/* Visual-friendly duration */
int hours;
int minutes;
int seconds;
/* Exact duration (in 1/90000s) */
uint64_t duration;
double aspect; // aspect ratio for the title's video
double container_aspect; // aspect ratio from container (0 if none)
int has_resolution_change;
int width;
int height;
int pixel_aspect_width;
int pixel_aspect_height;
int color_prim;
int color_transfer;
int color_matrix;
int rate;
int rate_base;
int crop[4];
enum {HB_DVD_DEMUXER, HB_TS_DEMUXER, HB_PS_DEMUXER, HB_NULL_DEMUXER} demuxer;
int detected_interlacing;
int pcr_pid; /* PCR PID for TS streams */
int video_id; /* demuxer stream id for video */
int video_codec; /* worker object id of video codec */
uint32_t video_stream_type; /* stream type from source stream */
int video_codec_param; /* codec specific config */
char *video_codec_name;
int video_bitrate;
char *container_name;
int data_rate;
// additional supported video decoders (e.g. HW-accelerated implementations)
int video_decode_support;
#define HB_DECODE_SUPPORT_SW 0x01 // software (libavcodec or mpeg2dec)
#define HB_DECODE_SUPPORT_QSV 0x02 // Intel Quick Sync Video
hb_metadata_t *metadata;
hb_list_t * list_chapter;
hb_list_t * list_audio;
hb_list_t * list_subtitle;
hb_list_t * list_attachment;
#define HB_TITLE_JOBS
#if defined(HB_TITLE_JOBS)
hb_job_t * job;
#endif
uint32_t flags;
// set if video stream doesn't have IDR frames
#define HBTF_NO_IDR (1 << 0)
#define HBTF_SCAN_COMPLETE (1 << 1)
// whether OpenCL scaling is supported for this source
int opencl_support;
int hwd_support; // TODO: merge to video_decode_support
};
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_state_s.cs when changing this struct
struct hb_state_s
{
#define HB_STATE_IDLE 1
#define HB_STATE_SCANNING 2
#define HB_STATE_SCANDONE 4
#define HB_STATE_WORKING 8
#define HB_STATE_PAUSED 16
#define HB_STATE_WORKDONE 32
#define HB_STATE_MUXING 64
#define HB_STATE_SEARCHING 128
int state;
union
{
struct
{
/* HB_STATE_SCANNING */
float progress;
int preview_cur;
int preview_count;
int title_cur;
int title_count;
} scanning;
struct
{
/* HB_STATE_WORKING */
float progress;
int job_cur;
int job_count;
float rate_cur;
float rate_avg;
int hours;
int minutes;
int seconds;
int sequence_id;
} working;
struct
{
/* HB_STATE_WORKDONE */
hb_error_code error;
} workdone;
struct
{
/* HB_STATE_MUXING */
float progress;
} muxing;
} param;
};
typedef struct hb_work_info_s
{
const char * name;
int profile;
int level;
int bitrate;
int rate;
int rate_base;
uint32_t version;
uint32_t flags;
uint32_t mode;
union
{
struct
{ // info only valid for video decoders
int width;
int height;
int pixel_aspect_width;
int pixel_aspect_height;
int color_prim;
int color_transfer;
int color_matrix;
int video_decode_support;
};
struct
{ // info only valid for audio decoders
uint64_t channel_layout;
hb_chan_map_t * channel_map;
int samples_per_frame;
int matrix_encoding;
};
};
} hb_work_info_t;
struct hb_work_object_s
{
int id;
char * name;
#ifdef __LIBHB__
int (* init) ( hb_work_object_t *, hb_job_t * );
int (* work) ( hb_work_object_t *, hb_buffer_t **,
hb_buffer_t ** );
void (* close) ( hb_work_object_t * );
/* the info entry point is used by scan to get bitstream information
* during a decode (i.e., it should only be called after at least one
* call to the 'work' entry point). currently it's only called for
* video streams & can be null for other work objects. */
int (* info) ( hb_work_object_t *, hb_work_info_t * );
/* the bitstream info entry point is used by scan to get bitstream
* information from a buffer. it doesn't have to be called during a
* decode (it can be called even if init & work haven't been).
* currently it's only called for audio streams & can be null for
* other work objects. */
int (* bsinfo) ( hb_work_object_t *, const hb_buffer_t *,
hb_work_info_t * );
void (* flush) ( hb_work_object_t * );
hb_fifo_t * fifo_in;
hb_fifo_t * fifo_out;
hb_esconfig_t * config;
/* Pointer hb_audio_t so we have access to the info in the audio worker threads. */
hb_audio_t * audio;
/* Pointer hb_subtitle_t so we have access to the info in the subtitle worker threads. */
hb_subtitle_t * subtitle;
hb_work_private_t * private_data;
hb_thread_t * thread;
volatile int * done;
int status;
int codec_param;
hb_title_t * title;
hb_work_object_t * next;
int thread_sleep_interval;
#endif
};
extern hb_work_object_t hb_sync_video;
extern hb_work_object_t hb_sync_audio;
extern hb_work_object_t hb_decvobsub;
extern hb_work_object_t hb_encvobsub;
extern hb_work_object_t hb_deccc608;
extern hb_work_object_t hb_decsrtsub;
extern hb_work_object_t hb_decutf8sub;
extern hb_work_object_t hb_dectx3gsub;
extern hb_work_object_t hb_decssasub;
extern hb_work_object_t hb_decpgssub;
extern hb_work_object_t hb_encavcodec;
extern hb_work_object_t hb_encqsv;
extern hb_work_object_t hb_encx264;
extern hb_work_object_t hb_enctheora;
extern hb_work_object_t hb_encx265;
extern hb_work_object_t hb_decavcodeca;
extern hb_work_object_t hb_decavcodecv;
extern hb_work_object_t hb_declpcm;
extern hb_work_object_t hb_enclame;
extern hb_work_object_t hb_encvorbis;
extern hb_work_object_t hb_muxer;
extern hb_work_object_t hb_encca_aac;
extern hb_work_object_t hb_encca_haac;
extern hb_work_object_t hb_encavcodeca;
extern hb_work_object_t hb_reader;
#define HB_FILTER_OK 0
#define HB_FILTER_DELAY 1
#define HB_FILTER_FAILED 2
#define HB_FILTER_DROP 3
#define HB_FILTER_DONE 4
int hb_use_dxva(hb_title_t *title);
typedef struct hb_filter_init_s
{
hb_job_t * job;
int pix_fmt;
int width;
int height;
int par_width;
int par_height;
int crop[4];
int vrate_base;
int vrate;
int cfr;
int use_dxva;
} hb_filter_init_t;
typedef struct hb_filter_info_s
{
char human_readable_desc[128];
hb_filter_init_t out;
} hb_filter_info_t;
struct hb_filter_object_s
{
int id;
int enforce_order;
char * name;
char * settings;
#ifdef __LIBHB__
int (* init) ( hb_filter_object_t *, hb_filter_init_t * );
int (* work) ( hb_filter_object_t *,
hb_buffer_t **, hb_buffer_t ** );
void (* close) ( hb_filter_object_t * );
int (* info) ( hb_filter_object_t *, hb_filter_info_t * );
hb_fifo_t * fifo_in;
hb_fifo_t * fifo_out;
hb_subtitle_t * subtitle;
hb_filter_private_t * private_data;
hb_thread_t * thread;
volatile int * done;
int status;
// Filters can drop frames and thus chapter marks
// These are used to bridge the chapter to the next buffer
int chapter_val;
int64_t chapter_time;
#endif
};
// Update win/CS/HandBrake.Interop/HandBrakeInterop/HbLib/hb_filter_ids.cs when changing this enum
enum
{
// for QSV - important to have before other filters
HB_FILTER_QSV_PRE = 1,
// First, filters that may change the framerate (drop or dup frames)
HB_FILTER_DETELECINE,
HB_FILTER_DECOMB,
HB_FILTER_DEINTERLACE,
HB_FILTER_VFR,
// Filters that must operate on the original source image are next
HB_FILTER_DEBLOCK,
HB_FILTER_DENOISE,
HB_FILTER_HQDN3D = HB_FILTER_DENOISE,
HB_FILTER_NLMEANS,
HB_FILTER_RENDER_SUB,
HB_FILTER_CROP_SCALE,
// Finally filters that don't care what order they are in,
// except that they must be after the above filters
HB_FILTER_ROTATE,
// for QSV - important to have as a last one
HB_FILTER_QSV_POST,
// default MSDK VPP filter
HB_FILTER_QSV,
};
hb_filter_object_t * hb_filter_init( int filter_id );
hb_filter_object_t * hb_filter_copy( hb_filter_object_t * filter );
hb_list_t *hb_filter_list_copy(const hb_list_t *src);
void hb_filter_close( hb_filter_object_t ** );
char * hb_generate_filter_settings(int filter_id, const char *preset,
const char *tune);
int hb_validate_filter_settings(int filter_id, const char *filter_param);
int hb_validate_param_string(const char *regex_pattern, const char *param_string);
typedef void hb_error_handler_t( const char *errmsg );
extern void hb_register_error_handler( hb_error_handler_t * handler );
char * hb_strdup_printf(const char *fmt, ...) HB_WPRINTF(1, 2);
char * hb_strncat_dup( const char * s1, const char * s2, size_t n );
int hb_yuv2rgb(int yuv);
int hb_rgb2yuv(int rgb);
const char * hb_subsource_name( int source );
// unparse a set of x264 settings to an HB encopts string
char * hb_x264_param_unparse(const char *x264_preset, const char *x264_tune,
const char *x264_encopts, const char *h264_profile,
const char *h264_level, int width, int height);
#define HB_API_OLD_PRESET_GETTERS
#ifdef HB_API_OLD_PRESET_GETTERS
// x264 preset/tune, qsv preset & h264 profile/level helpers
const char * const * hb_x264_presets();
const char * const * hb_x264_tunes();
#ifdef USE_QSV
const char * const * hb_qsv_presets();
#endif
const char * const * hb_h264_profiles();
const char * const * hb_h264_levels();
#endif
// x264 option name/synonym helper
const char * hb_x264_encopt_name( const char * name );
#ifdef USE_X265
// x265 option name/synonym helper
const char * hb_x265_encopt_name( const char * name );
#endif
#endif
HandBrake-0.10.2/libhb/scan.c 0000664 0001752 0001752 00000130565 12524417313 016223 0 ustar handbrake handbrake /* scan.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include "hb.h"
#include "opencl.h"
#include "hbffmpeg.h"
typedef struct
{
hb_handle_t * h;
volatile int * die;
char * path;
int title_index;
hb_title_set_t * title_set;
hb_bd_t * bd;
hb_dvd_t * dvd;
hb_stream_t * stream;
hb_batch_t * batch;
int preview_count;
int store_previews;
uint64_t min_title_duration;
} hb_scan_t;
#define PREVIEW_READ_THRESH (1024 * 1024 * 300)
static void ScanFunc( void * );
static int DecodePreviews( hb_scan_t *, hb_title_t * title, int flush );
static void LookForAudio( hb_title_t * title, hb_buffer_t * b );
static int AllAudioOK( hb_title_t * title );
static void UpdateState1(hb_scan_t *scan, int title);
static void UpdateState2(hb_scan_t *scan, int title);
static void UpdateState3(hb_scan_t *scan, int preview);
static const char *aspect_to_string( double aspect )
{
switch ( (int)(aspect * 9.) )
{
case 9 * 4 / 3: return "4:3";
case 9 * 16 / 9: return "16:9";
}
static char arstr[32];
sprintf( arstr, aspect >= 1.? "%.2f:1" : "1:%.2f", aspect );
return arstr;
}
hb_thread_t * hb_scan_init( hb_handle_t * handle, volatile int * die,
const char * path, int title_index,
hb_title_set_t * title_set, int preview_count,
int store_previews, uint64_t min_duration )
{
hb_scan_t * data = calloc( sizeof( hb_scan_t ), 1 );
data->h = handle;
data->die = die;
data->path = strdup( path );
data->title_index = title_index;
data->title_set = title_set;
data->preview_count = preview_count;
data->store_previews = store_previews;
data->min_title_duration = min_duration;
return hb_thread_init( "scan", ScanFunc, data, HB_NORMAL_PRIORITY );
}
static void ScanFunc( void * _data )
{
hb_scan_t * data = (hb_scan_t *) _data;
hb_title_t * title;
int i;
int feature = 0;
data->bd = NULL;
data->dvd = NULL;
data->stream = NULL;
/* Try to open the path as a DVD. If it fails, try as a file */
if( ( data->bd = hb_bd_init( data->path ) ) )
{
hb_log( "scan: BD has %d title(s)",
hb_bd_title_count( data->bd ) );
if( data->title_index )
{
/* Scan this title only */
hb_list_add( data->title_set->list_title,
hb_bd_title_scan( data->bd,
data->title_index, 0 ) );
}
else
{
/* Scan all titles */
for( i = 0; i < hb_bd_title_count( data->bd ); i++ )
{
UpdateState1(data, i + 1);
hb_list_add( data->title_set->list_title,
hb_bd_title_scan( data->bd,
i + 1, data->min_title_duration ) );
}
feature = hb_bd_main_feature( data->bd,
data->title_set->list_title );
}
}
else if( ( data->dvd = hb_dvd_init( data->path ) ) )
{
hb_log( "scan: DVD has %d title(s)",
hb_dvd_title_count( data->dvd ) );
if( data->title_index )
{
/* Scan this title only */
hb_list_add( data->title_set->list_title,
hb_dvd_title_scan( data->dvd,
data->title_index, 0 ) );
}
else
{
/* Scan all titles */
for( i = 0; i < hb_dvd_title_count( data->dvd ); i++ )
{
UpdateState1(data, i + 1);
hb_list_add( data->title_set->list_title,
hb_dvd_title_scan( data->dvd,
i + 1, data->min_title_duration ) );
}
feature = hb_dvd_main_feature( data->dvd,
data->title_set->list_title );
}
}
else if ( ( data->batch = hb_batch_init( data->path ) ) )
{
if( data->title_index )
{
/* Scan this title only */
title = hb_batch_title_scan( data->batch, data->title_index );
if ( title )
{
hb_list_add( data->title_set->list_title, title );
}
}
else
{
/* Scan all titles */
for( i = 0; i < hb_batch_title_count( data->batch ); i++ )
{
hb_title_t * title;
UpdateState1(data, i + 1);
title = hb_batch_title_scan( data->batch, i + 1 );
if ( title != NULL )
{
hb_list_add( data->title_set->list_title, title );
}
}
}
}
else
{
data->title_index = 1;
hb_title_t * title = hb_title_init( data->path, data->title_index );
if ( (data->stream = hb_stream_open( data->path, title, 1 ) ) != NULL )
{
title = hb_stream_title_scan( data->stream, title );
if ( title )
hb_list_add( data->title_set->list_title, title );
}
else
{
hb_title_close( &title );
hb_log( "scan: unrecognized file type" );
return;
}
}
for( i = 0; i < hb_list_count( data->title_set->list_title ); )
{
int j, npreviews;
hb_audio_t * audio;
if ( *data->die )
{
goto finish;
}
title = hb_list_item( data->title_set->list_title, i );
UpdateState2(data, i + 1);
/* Decode previews */
/* this will also detect more AC3 / DTS information */
npreviews = DecodePreviews( data, title, 1 );
if (npreviews < 2)
{
npreviews = DecodePreviews( data, title, 0 );
}
if (npreviews == 0)
{
/* TODO: free things */
hb_list_rem( data->title_set->list_title, title );
for( j = 0; j < hb_list_count( title->list_audio ); j++)
{
audio = hb_list_item( title->list_audio, j );
if ( audio->priv.scan_cache )
{
hb_fifo_flush( audio->priv.scan_cache );
hb_fifo_close( &audio->priv.scan_cache );
}
}
hb_title_close( &title );
continue;
}
/* Make sure we found audio rates and bitrates */
for( j = 0; j < hb_list_count( title->list_audio ); )
{
audio = hb_list_item( title->list_audio, j );
if ( audio->priv.scan_cache )
{
hb_fifo_flush( audio->priv.scan_cache );
hb_fifo_close( &audio->priv.scan_cache );
}
if( !audio->config.in.bitrate )
{
hb_log( "scan: removing audio 0x%x because no bitrate found",
audio->id );
hb_list_rem( title->list_audio, audio );
free( audio );
continue;
}
j++;
}
if ( data->dvd || data->bd )
{
// The subtitle width and height needs to be set to the
// title widht and height for DVDs. title width and
// height don't get set until we decode previews, so
// we can't set subtitle width/height till we get here.
for( j = 0; j < hb_list_count( title->list_subtitle ); j++ )
{
hb_subtitle_t *subtitle = hb_list_item( title->list_subtitle, j );
if ( subtitle->source == VOBSUB || subtitle->source == PGSSUB )
{
subtitle->width = title->width;
subtitle->height = title->height;
}
}
}
i++;
}
data->title_set->feature = feature;
/* Mark title scan complete and init jobs */
for( i = 0; i < hb_list_count( data->title_set->list_title ); i++ )
{
title = hb_list_item( data->title_set->list_title, i );
title->flags |= HBTF_SCAN_COMPLETE;
#if defined(HB_TITLE_JOBS)
title->job = hb_job_init( title );
#endif
}
finish:
if( data->bd )
{
hb_bd_close( &data->bd );
}
if( data->dvd )
{
hb_dvd_close( &data->dvd );
}
if (data->stream)
{
hb_stream_close(&data->stream);
}
if( data->batch )
{
hb_batch_close( &data->batch );
}
free( data->path );
free( data );
_data = NULL;
hb_buffer_pool_free();
}
// -----------------------------------------------
// stuff related to cropping
#define DARK 32
static inline int absdiff( int x, int y )
{
return x < y ? y - x : x - y;
}
static inline int clampBlack( int x )
{
// luma 'black' is 16 and anything less should be clamped at 16
return x < 16 ? 16 : x;
}
static int row_all_dark( hb_buffer_t* buf, int row )
{
int width = buf->plane[0].width;
int stride = buf->plane[0].stride;
uint8_t *luma = buf->plane[0].data + stride * row;
// compute the average luma value of the row
int i, avg = 0;
for ( i = 0; i < width; ++i )
{
avg += clampBlack( luma[i] );
}
avg /= width;
if ( avg >= DARK )
return 0;
// since we're trying to detect smooth borders, only take the row if
// all pixels are within +-16 of the average (this range is fairly coarse
// but there's a lot of quantization noise for luma values near black
// so anything less will fail to crop because of the noise).
for ( i = 0; i < width; ++i )
{
if ( absdiff( avg, clampBlack( luma[i] ) ) > 16 )
return 0;
}
return 1;
}
static int column_all_dark( hb_buffer_t* buf, int top, int bottom, int col )
{
int stride = buf->plane[0].stride;
int height = buf->plane[0].height - top - bottom;
uint8_t *luma = buf->plane[0].data + stride * top + col;
// compute the average value of the column
int i = height, avg = 0, row = 0;
for ( ; --i >= 0; row += stride )
{
avg += clampBlack( luma[row] );
}
avg /= height;
if ( avg >= DARK )
return 0;
// since we're trying to detect smooth borders, only take the column if
// all pixels are within +-16 of the average.
i = height, row = 0;
for ( ; --i >= 0; row += stride )
{
if ( absdiff( avg, clampBlack( luma[row] ) ) > 16 )
return 0;
}
return 1;
}
#undef DARK
typedef struct {
int n;
int *t;
int *b;
int *l;
int *r;
} crop_record_t;
static crop_record_t * crop_record_init( int max_previews )
{
crop_record_t *crops = calloc( 1, sizeof(*crops) );
crops->t = calloc( max_previews, sizeof(int) );
crops->b = calloc( max_previews, sizeof(int) );
crops->l = calloc( max_previews, sizeof(int) );
crops->r = calloc( max_previews, sizeof(int) );
return crops;
}
static void crop_record_free( crop_record_t *crops )
{
free( crops->t );
free( crops->b );
free( crops->l );
free( crops->r );
free( crops );
}
static void record_crop( crop_record_t *crops, int t, int b, int l, int r )
{
crops->t[crops->n] = t;
crops->b[crops->n] = b;
crops->l[crops->n] = l;
crops->r[crops->n] = r;
++crops->n;
}
static int compare_int( const void *a, const void *b )
{
return *(const int *)a - *(const int *)b;
}
static void sort_crops( crop_record_t *crops )
{
qsort( crops->t, crops->n, sizeof(crops->t[0]), compare_int );
qsort( crops->b, crops->n, sizeof(crops->t[0]), compare_int );
qsort( crops->l, crops->n, sizeof(crops->t[0]), compare_int );
qsort( crops->r, crops->n, sizeof(crops->t[0]), compare_int );
}
// -----------------------------------------------
// stuff related to title width/height/aspect info
typedef struct {
int count; /* number of times we've seen this info entry */
hb_work_info_t info; /* copy of info entry */
} info_list_t;
static void remember_info( info_list_t *info_list, hb_work_info_t *info )
{
for ( ; info_list->count; ++info_list )
{
if ( memcmp( &info_list->info, info, sizeof(*info) ) == 0 )
{
// we found a match - bump its count
++info_list->count;
return;
}
}
// no match found - add new entry to list (info_list points to
// the first free slot). NB - we assume that info_list was allocated
// so that it's big enough even if there are no dups. I.e., 10 slots
// allocated if there are 10 previews.
info_list->count = 1;
info_list->info = *info;
}
static void most_common_info( info_list_t *info_list, hb_work_info_t *info )
{
int i, biggest = 0;
for ( i = 1; info_list[i].count; ++i )
{
if ( info_list[i].count > info_list[biggest].count )
biggest = i;
}
*info = info_list[biggest].info;
}
static int has_resolution_change( info_list_t *info_list )
{
int w, h, i;
if( !info_list[0].count )
return 0;
w = info_list[0].info.width;
h = info_list[0].info.height;
for ( i = 1; info_list[i].count; ++i )
{
if ( w != info_list[i].info.width || h != info_list[i].info.height )
return 1;
}
return 0;
}
static int is_close_to( int val, int target, int thresh )
{
int diff = val - target;
diff = diff < 0 ? -diff : diff;
return diff < thresh;
}
/***********************************************************************
* DecodePreviews
***********************************************************************
* Decode 10 pictures for the given title.
* It assumes that data->reader and data->vts have successfully been
* DVDOpen()ed and ifoOpen()ed.
**********************************************************************/
static int DecodePreviews( hb_scan_t * data, hb_title_t * title, int flush )
{
int i, npreviews = 0, abort = 0;
hb_buffer_t * buf, * buf_es;
hb_list_t * list_es;
int progressive_count = 0;
int pulldown_count = 0;
int doubled_frame_count = 0;
int interlaced_preview_count = 0;
int frame_wait = 0;
int cc_wait = 10;
int frames;
hb_stream_t * stream = NULL;
info_list_t * info_list = calloc( data->preview_count+1, sizeof(*info_list) );
crop_record_t *crops = crop_record_init( data->preview_count );
list_es = hb_list_init();
if( data->batch )
{
hb_log( "scan: decoding previews for title %d (%s)", title->index, title->path );
}
else
{
hb_log( "scan: decoding previews for title %d", title->index );
}
if (data->bd)
{
hb_bd_start( data->bd, title );
hb_log( "scan: title angle(s) %d", title->angle_count );
}
else if (data->dvd)
{
hb_dvd_start( data->dvd, title, 1 );
title->angle_count = hb_dvd_angle_count( data->dvd );
hb_log( "scan: title angle(s) %d", title->angle_count );
}
else if (data->batch)
{
stream = hb_stream_open( title->path, title, 0 );
}
else if (data->stream)
{
stream = hb_stream_open( data->path, title, 0 );
}
if (title->video_codec == WORK_NONE)
{
hb_error("No video decoder set!");
return 0;
}
hb_work_object_t *vid_decoder = hb_get_work(title->video_codec);
vid_decoder->codec_param = title->video_codec_param;
vid_decoder->title = title;
vid_decoder->init( vid_decoder, NULL );
for( i = 0; i < data->preview_count; i++ )
{
int j;
UpdateState3(data, i + 1);
if ( *data->die )
{
free( info_list );
crop_record_free( crops );
return 0;
}
if (data->bd)
{
if( !hb_bd_seek( data->bd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
{
continue;
}
}
if (data->dvd)
{
if( !hb_dvd_seek( data->dvd, (float) ( i + 1 ) / ( data->preview_count + 1.0 ) ) )
{
continue;
}
}
else if (stream)
{
/* we start reading streams at zero rather than 1/11 because
* short streams may have only one sequence header in the entire
* file and we need it to decode any previews.
*
* Also, seeking to position 0 loses the palette of avi files
* so skip initial seek */
if (i != 0)
{
if (!hb_stream_seek(stream,
(float)i / (data->preview_count + 1.0)))
{
continue;
}
}
else
{
hb_stream_set_need_keyframe(stream, 1);
}
}
hb_deep_log( 2, "scan: preview %d", i + 1 );
if (flush && vid_decoder->flush)
vid_decoder->flush( vid_decoder );
if (title->flags & HBTF_NO_IDR)
{
if (!flush)
{
// If we are doing the first previews decode attempt,
// set this threshold high so that we get the best
// quality frames possible.
frame_wait = 100;
}
else
{
// If we failed to get enough valid frames in the first
// previews decode attempt, lower the threshold to improve
// our chances of getting something to work with.
frame_wait = 10;
}
}
else
{
// For certain mpeg-2 streams, libav is delivering a
// dummy first frame that is all black. So always skip
// one frame
frame_wait = 1;
}
frames = 0;
hb_buffer_t * vid_buf = NULL;
int total_read = 0, packets = 0;
while (total_read < PREVIEW_READ_THRESH ||
(!AllAudioOK(title) && packets < 10000))
{
if (data->bd)
{
if( (buf = hb_bd_read( data->bd )) == NULL )
{
if ( vid_buf )
{
break;
}
hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
abort = 1;
goto skip_preview;
}
}
else if (data->dvd)
{
if( (buf = hb_dvd_read( data->dvd )) == NULL )
{
if ( vid_buf )
{
break;
}
hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
abort = 1;
goto skip_preview;
}
}
else if (stream)
{
if ( (buf = hb_stream_read(stream)) == NULL )
{
if ( vid_buf )
{
break;
}
hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
abort = 1;
goto skip_preview;
}
}
else
{
// Silence compiler warning
buf = NULL;
hb_error( "Error: This can't happen!" );
abort = 1;
goto skip_preview;
}
if (buf->size <= 0)
{
hb_log( "Warning: Could not read data for preview %d, skipped", i + 1 );
abort = 1;
goto skip_preview;
}
total_read += buf->size;
packets++;
(hb_demux[title->demuxer])(buf, list_es, 0 );
while( ( buf_es = hb_list_item( list_es, 0 ) ) )
{
hb_list_rem( list_es, buf_es );
if( buf_es->s.id == title->video_id && vid_buf == NULL )
{
vid_decoder->work( vid_decoder, &buf_es, &vid_buf );
// There are 2 conditions we decode additional
// video frames for during scan.
// 1. We did not detect IDR frames, so the initial video
// frames may be corrupt. We docode extra frames to
// increase the probability of a complete preview frame
// 2. Some frames do not contain CC data, even though
// CCs are present in the stream. So we need to decode
// additional frames to find the CCs.
if (vid_buf != NULL && (frame_wait || cc_wait))
{
if (frames > 0 && vid_buf->s.frametype == HB_FRAME_I)
frame_wait = 0;
if (frame_wait || cc_wait)
{
hb_buffer_close(&vid_buf);
if (frame_wait) frame_wait--;
if (cc_wait) cc_wait--;
}
frames++;
}
}
else if( ! AllAudioOK( title ) )
{
LookForAudio( title, buf_es );
buf_es = NULL;
}
if ( buf_es )
hb_buffer_close( &buf_es );
}
if( vid_buf && AllAudioOK( title ) )
break;
}
if( ! vid_buf )
{
hb_log( "scan: could not get a decoded picture" );
continue;
}
/* Get size and rate infos */
hb_work_info_t vid_info;
if( !vid_decoder->info( vid_decoder, &vid_info ) )
{
/*
* Could not fill vid_info, don't continue and try to use vid_info
* in this case.
*/
if (vid_buf)
{
hb_buffer_close( &vid_buf );
}
hb_log( "scan: could not get a video information" );
continue;
}
remember_info( info_list, &vid_info );
if( is_close_to( vid_info.rate_base, 900900, 100 ) &&
( vid_buf->s.flags & PIC_FLAG_REPEAT_FIRST_FIELD ) )
{
/* Potentially soft telecine material */
pulldown_count++;
}
if( vid_buf->s.flags & PIC_FLAG_REPEAT_FRAME )
{
// AVCHD-Lite specifies that all streams are
// 50 or 60 fps. To produce 25 or 30 fps, camera
// makers are repeating all frames.
doubled_frame_count++;
}
if( is_close_to( vid_info.rate_base, 1126125, 100 ) )
{
// Frame FPS is 23.976 (meaning it's progressive), so start keeping
// track of how many are reporting at that speed. When enough
// show up that way, we want to make that the overall title FPS.
progressive_count++;
}
while( ( buf_es = hb_list_item( list_es, 0 ) ) )
{
hb_list_rem( list_es, buf_es );
hb_buffer_close( &buf_es );
}
/* Check preview for interlacing artifacts */
if( hb_detect_comb( vid_buf, 10, 30, 9, 10, 30, 9 ) )
{
hb_deep_log( 2, "Interlacing detected in preview frame %i", i+1);
interlaced_preview_count++;
}
if( data->store_previews )
{
hb_save_preview( data->h, title->index, i, vid_buf );
}
/* Detect black borders */
int top, bottom, left, right;
int h4 = vid_info.height / 4, w4 = vid_info.width / 4;
// When widescreen content is matted to 16:9 or 4:3 there's sometimes
// a thin border on the outer edge of the matte. On TV content it can be
// "line 21" VBI data that's normally hidden in the overscan. For HD
// content it can just be a diagnostic added in post production so that
// the frame borders are visible. We try to ignore these borders so
// we can crop the matte. The border width depends on the resolution
// (12 pixels on 1080i looks visually the same as 4 pixels on 480i)
// so we allow the border to be up to 1% of the frame height.
const int border = vid_info.height / 100;
for ( top = border; top < h4; ++top )
{
if ( ! row_all_dark( vid_buf, top ) )
break;
}
if ( top <= border )
{
// we never made it past the border region - see if the rows we
// didn't check are dark or if we shouldn't crop at all.
for ( top = 0; top < border; ++top )
{
if ( ! row_all_dark( vid_buf, top ) )
break;
}
if ( top >= border )
{
top = 0;
}
}
for ( bottom = border; bottom < h4; ++bottom )
{
if ( ! row_all_dark( vid_buf, vid_info.height - 1 - bottom ) )
break;
}
if ( bottom <= border )
{
for ( bottom = 0; bottom < border; ++bottom )
{
if ( ! row_all_dark( vid_buf, vid_info.height - 1 - bottom ) )
break;
}
if ( bottom >= border )
{
bottom = 0;
}
}
for ( left = 0; left < w4; ++left )
{
if ( ! column_all_dark( vid_buf, top, bottom, left ) )
break;
}
for ( right = 0; right < w4; ++right )
{
if ( ! column_all_dark( vid_buf, top, bottom, vid_info.width - 1 - right ) )
break;
}
// only record the result if all the crops are less than a quarter of
// the frame otherwise we can get fooled by frames with a lot of black
// like titles, credits & fade-thru-black transitions.
if ( top < h4 && bottom < h4 && left < w4 && right < w4 )
{
record_crop( crops, top, bottom, left, right );
}
++npreviews;
skip_preview:
/* Make sure we found audio rates and bitrates */
for( j = 0; j < hb_list_count( title->list_audio ); j++ )
{
hb_audio_t * audio = hb_list_item( title->list_audio, j );
if ( audio->priv.scan_cache )
{
hb_fifo_flush( audio->priv.scan_cache );
}
}
if (vid_buf)
{
hb_buffer_close( &vid_buf );
}
if (abort)
{
break;
}
}
UpdateState3(data, i);
vid_decoder->close( vid_decoder );
free( vid_decoder );
if (stream != NULL)
{
hb_stream_close(&stream);
}
if ( npreviews )
{
// use the most common frame info for our final title dimensions
hb_work_info_t vid_info;
most_common_info( info_list, &vid_info );
title->has_resolution_change = has_resolution_change( info_list );
if ( title->video_codec_name == NULL )
{
title->video_codec_name = strdup( vid_info.name );
}
title->width = vid_info.width;
title->height = vid_info.height;
if ( vid_info.rate && vid_info.rate_base )
{
// if the frame rate is very close to one of our "common" framerates,
// assume it actually is said frame rate; e.g. some 24000/1001 sources
// may have a rate_base of 1126124 (instead of 1126125)
const hb_rate_t *video_framerate = NULL;
while ((video_framerate = hb_video_framerate_get_next(video_framerate)) != NULL)
{
if (is_close_to(vid_info.rate_base, video_framerate->rate, 100))
{
vid_info.rate_base = video_framerate->rate;
break;
}
}
title->rate = vid_info.rate;
title->rate_base = vid_info.rate_base;
if( vid_info.rate_base == 900900 )
{
if( npreviews >= 4 && pulldown_count >= npreviews / 4 )
{
title->rate_base = 1126125;
hb_deep_log( 2, "Pulldown detected, setting fps to 23.976" );
}
if( npreviews >= 2 && progressive_count >= npreviews / 2 )
{
// We've already deduced that the frame rate is 23.976,
// so set it back again.
title->rate_base = 1126125;
hb_deep_log( 2, "Title's mostly NTSC Film, setting fps to 23.976" );
}
}
if( npreviews >= 2 && doubled_frame_count >= 3 * npreviews / 4 )
{
// We've detected that a significant number of the frames
// have been doubled in duration by repeat flags.
title->rate_base = 2 * vid_info.rate_base;
hb_deep_log( 2, "Repeat frames detected, setting fps to %.3f", (float)title->rate / title->rate_base );
}
}
title->video_bitrate = vid_info.bitrate;
if( vid_info.pixel_aspect_width && vid_info.pixel_aspect_height )
{
title->pixel_aspect_width = vid_info.pixel_aspect_width;
title->pixel_aspect_height = vid_info.pixel_aspect_height;
}
title->color_prim = vid_info.color_prim;
title->color_transfer = vid_info.color_transfer;
title->color_matrix = vid_info.color_matrix;
title->video_decode_support = vid_info.video_decode_support;
// TODO: check video dimensions
title->opencl_support = !!hb_opencl_available();
// compute the aspect ratio based on the storage dimensions and the
// pixel aspect ratio (if supplied) or just storage dimensions if no PAR.
title->aspect = (double)title->width / (double)title->height;
title->aspect *= (double)title->pixel_aspect_width /
(double)title->pixel_aspect_height;
// For unknown reasons some French PAL DVDs put the original
// content's aspect ratio into the mpeg PAR even though it's
// the wrong PAR for the DVD. Apparently they rely on the fact
// that DVD players ignore the content PAR and just use the
// aspect ratio from the DVD metadata. So, if the aspect computed
// from the PAR is different from the container's aspect we use
// the container's aspect & recompute the PAR from it.
if( title->container_aspect && (int)(title->aspect * 9) != (int)(title->container_aspect * 9) )
{
hb_log("scan: content PAR gives wrong aspect %.2f; "
"using container aspect %.2f", title->aspect,
title->container_aspect );
title->aspect = title->container_aspect;
hb_reduce( &title->pixel_aspect_width, &title->pixel_aspect_height,
(int)(title->aspect * title->height + 0.5), title->width );
}
// don't try to crop unless we got at least 3 previews
if ( crops->n > 2 )
{
sort_crops( crops );
// The next line selects median cropping - at least
// 50% of the frames will have their borders removed.
// Other possible choices are loose cropping (i = 0) where
// no non-black pixels will be cropped from any frame and a
// tight cropping (i = crops->n - (crops->n >> 2)) where at
// least 75% of the frames will have their borders removed.
i = crops->n >> 1;
title->crop[0] = EVEN( crops->t[i] );
title->crop[1] = EVEN( crops->b[i] );
title->crop[2] = EVEN( crops->l[i] );
title->crop[3] = EVEN( crops->r[i] );
}
hb_log( "scan: %d previews, %dx%d, %.3f fps, autocrop = %d/%d/%d/%d, "
"aspect %s, PAR %d:%d",
npreviews, title->width, title->height, (float) title->rate /
(float) title->rate_base,
title->crop[0], title->crop[1], title->crop[2], title->crop[3],
aspect_to_string( title->aspect ), title->pixel_aspect_width,
title->pixel_aspect_height );
if( interlaced_preview_count >= ( npreviews / 2 ) )
{
hb_log("Title is likely interlaced or telecined (%i out of %i previews). You should do something about that.",
interlaced_preview_count, npreviews);
title->detected_interlacing = 1;
}
else
{
title->detected_interlacing = 0;
}
}
crop_record_free( crops );
free( info_list );
while( ( buf_es = hb_list_item( list_es, 0 ) ) )
{
hb_list_rem( list_es, buf_es );
hb_buffer_close( &buf_es );
}
hb_list_close( &list_es );
if (data->bd)
hb_bd_stop( data->bd );
if (data->dvd)
hb_dvd_stop( data->dvd );
return npreviews;
}
/*
* This routine is called for every frame from a non-video elementary stream.
* These are a mix of audio & subtitle streams, some of which we want & some
* we're ignoring. This routine checks the frame against all our audio streams
* to see if it's one we want and haven't identified yet. If yes, it passes the
* frame to a codec-specific id routine which is responsible for filling in
* the sample rate, bit rate, channels & other audio parameters.
*
* Since a sample rate is essential for further audio processing, any audio
* stream which isn't successfully id'd by is deleted at the end of the scan.
* This is necessary to avoid ambiguities where things that might be audio
* aren't (e.g., some European DVD Teletext streams use the same IDs as US ATSC
* AC-3 audio).
*/
static void LookForAudio( hb_title_t * title, hb_buffer_t * b )
{
int i;
hb_audio_t * audio = NULL;
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
{
audio = hb_list_item( title->list_audio, i );
/* check if this elementary stream is one we want */
if ( audio->id == b->s.id )
{
break;
}
else
{
audio = NULL;
}
}
if( !audio || audio->config.in.bitrate != 0 )
{
/* not found or already done */
hb_buffer_close( &b );
return;
}
if ( audio->priv.scan_cache == NULL )
audio->priv.scan_cache = hb_fifo_init( 16, 16 );
if ( hb_fifo_size_bytes( audio->priv.scan_cache ) >= 16384 )
{
hb_buffer_t * tmp;
tmp = hb_fifo_get( audio->priv.scan_cache );
hb_buffer_close( &tmp );
}
hb_fifo_push( audio->priv.scan_cache, b );
hb_work_object_t *w = hb_codec_decoder( audio->config.in.codec );
if ( w == NULL || w->bsinfo == NULL )
{
hb_log( "Internal error in scan: unhandled audio type %d for id 0x%x",
audio->config.in.codec, audio->id );
goto drop_audio;
}
hb_work_info_t info;
w->title = title;
w->audio = audio;
w->codec_param = audio->config.in.codec_param;
b = hb_fifo_see( audio->priv.scan_cache );
int ret = w->bsinfo( w, b, &info );
if ( ret < 0 )
{
hb_log( "no info on audio type %d/0x%x for id 0x%x",
audio->config.in.codec, audio->config.in.codec_param,
audio->id );
goto drop_audio;
}
if ( !info.bitrate )
{
/* didn't find any info */
free( w );
return;
}
hb_fifo_flush( audio->priv.scan_cache );
hb_fifo_close( &audio->priv.scan_cache );
audio->config.in.samplerate = info.rate;
audio->config.in.samples_per_frame = info.samples_per_frame;
audio->config.in.bitrate = info.bitrate;
audio->config.in.matrix_encoding = info.matrix_encoding;
audio->config.in.channel_layout = info.channel_layout;
audio->config.in.channel_map = info.channel_map;
audio->config.in.version = info.version;
audio->config.in.flags = info.flags;
audio->config.in.mode = info.mode;
// now that we have all the info, set the audio description
const char *codec_name = NULL;
if (audio->config.in.codec & HB_ACODEC_FF_MASK)
{
AVCodec *codec = avcodec_find_decoder(audio->config.in.codec_param);
if (codec != NULL)
{
if (info.profile != FF_PROFILE_UNKNOWN)
{
codec_name = av_get_profile_name(codec, info.profile);
}
if (codec_name == NULL)
{
// use our own capitalization for the most common codecs
switch (audio->config.in.codec_param)
{
case AV_CODEC_ID_AAC:
codec_name = "AAC";
break;
case AV_CODEC_ID_AC3:
codec_name = "AC3";
break;
case AV_CODEC_ID_EAC3:
codec_name = "E-AC3";
break;
case AV_CODEC_ID_TRUEHD:
codec_name = "TrueHD";
break;
case AV_CODEC_ID_DTS:
codec_name = audio->config.in.codec == HB_ACODEC_DCA_HD ? "DTS-HD" : "DTS";
break;
case AV_CODEC_ID_FLAC:
codec_name = "FLAC";
break;
case AV_CODEC_ID_MP2:
codec_name = "MPEG";
break;
case AV_CODEC_ID_MP3:
codec_name = "MP3";
break;
case AV_CODEC_ID_PCM_BLURAY:
codec_name = "BD LPCM";
break;
case AV_CODEC_ID_OPUS:
codec_name = "Opus";
break;
case AV_CODEC_ID_VORBIS:
codec_name = "Vorbis";
break;
default:
codec_name = codec->name;
break;
}
}
}
else
{
switch (audio->config.in.codec)
{
case HB_ACODEC_DCA:
codec_name = "DTS";
break;
case HB_ACODEC_DCA_HD:
codec_name = "DTS-HD";
break;
case HB_ACODEC_FFAAC:
codec_name = "AAC";
break;
case HB_ACODEC_MP3:
codec_name = "MP3";
break;
default:
codec_name = "Unknown (libav)";
break;
}
}
}
else
{
switch (audio->config.in.codec)
{
case HB_ACODEC_AC3:
codec_name = "AC3";
break;
case HB_ACODEC_LPCM:
codec_name = "LPCM";
break;
default:
codec_name = "Unknown";
break;
}
}
sprintf(audio->config.lang.description, "%s (%s)",
audio->config.lang.simple, codec_name);
switch (audio->config.lang.type)
{
case 2:
strcat(audio->config.lang.description, " (Visually Impaired)");
break;
case 3:
strcat(audio->config.lang.description, " (Director's Commentary 1)");
break;
case 4:
strcat(audio->config.lang.description, " (Director's Commentary 2)");
break;
default:
break;
}
if (audio->config.in.channel_layout)
{
int lfes = (!!(audio->config.in.channel_layout & AV_CH_LOW_FREQUENCY) +
!!(audio->config.in.channel_layout & AV_CH_LOW_FREQUENCY_2));
int channels = av_get_channel_layout_nb_channels(audio->config.in.channel_layout);
char *desc = audio->config.lang.description +
strlen(audio->config.lang.description);
sprintf(desc, " (%d.%d ch)", channels - lfes, lfes);
// describe the matrix encoding mode, if any
switch (audio->config.in.matrix_encoding)
{
case AV_MATRIX_ENCODING_DOLBY:
if (audio->config.in.codec == HB_ACODEC_AC3 ||
audio->config.in.codec_param == AV_CODEC_ID_AC3 ||
audio->config.in.codec_param == AV_CODEC_ID_EAC3 ||
audio->config.in.codec_param == AV_CODEC_ID_TRUEHD)
{
strcat(audio->config.lang.description, " (Dolby Surround)");
break;
}
strcat(audio->config.lang.description, " (Lt/Rt)");
break;
case AV_MATRIX_ENCODING_DPLII:
strcat(audio->config.lang.description, " (Dolby Pro Logic II)");
break;
case AV_MATRIX_ENCODING_DPLIIX:
strcat(audio->config.lang.description, " (Dolby Pro Logic IIx)");
break;
case AV_MATRIX_ENCODING_DPLIIZ:
strcat(audio->config.lang.description, " (Dolby Pro Logic IIz)");
break;
case AV_MATRIX_ENCODING_DOLBYEX:
strcat(audio->config.lang.description, " (Dolby Digital EX)");
break;
case AV_MATRIX_ENCODING_DOLBYHEADPHONE:
strcat(audio->config.lang.description, " (Dolby Headphone)");
break;
default:
break;
}
}
hb_log( "scan: audio 0x%x: %s, rate=%dHz, bitrate=%d %s", audio->id,
info.name, audio->config.in.samplerate, audio->config.in.bitrate,
audio->config.lang.description );
free( w );
return;
// We get here if there's no hope of finding info on an audio bitstream,
// either because we don't have a decoder (or a decoder with a bitstream
// info proc) or because the decoder's info proc said that the stream
// wasn't something it could handle. Delete the item from the title's
// audio list so we won't keep reading packets while trying to get its
// bitstream info.
drop_audio:
if ( w )
free( w );
hb_fifo_flush( audio->priv.scan_cache );
hb_fifo_close( &audio->priv.scan_cache );
hb_list_rem( title->list_audio, audio );
return;
}
/*
* This routine checks to see if we've ID'd all the audio streams associated
* with a title. It returns 0 if there are more to ID & 1 if all are done.
*/
static int AllAudioOK( hb_title_t * title )
{
int i;
hb_audio_t * audio;
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
{
audio = hb_list_item( title->list_audio, i );
if( audio->config.in.bitrate == 0 )
{
return 0;
}
}
return 1;
}
static void UpdateState1(hb_scan_t *scan, int title)
{
hb_state_t state;
#define p state.param.scanning
/* Update the UI */
state.state = HB_STATE_SCANNING;
p.title_cur = title;
p.title_count = scan->dvd ? hb_dvd_title_count( scan->dvd ) :
scan->bd ? hb_bd_title_count( scan->bd ) :
scan->batch ? hb_batch_title_count( scan->batch ) :
hb_list_count(scan->title_set->list_title);
p.preview_cur = 0;
p.preview_count = 1;
p.progress = 0.5 * ((float)p.title_cur + ((float)p.preview_cur / p.preview_count)) / p.title_count;
#undef p
hb_set_state(scan->h, &state);
}
static void UpdateState2(hb_scan_t *scan, int title)
{
hb_state_t state;
#define p state.param.scanning
/* Update the UI */
state.state = HB_STATE_SCANNING;
p.title_cur = title;
p.title_count = hb_list_count( scan->title_set->list_title );
p.preview_cur = 1;
p.preview_count = scan->preview_count;
if (scan->title_index)
p.progress = (float)(p.title_cur - 1) / p.title_count;
else
p.progress = 0.5 + 0.5 * (float)(p.title_cur - 1) / p.title_count;
#undef p
hb_set_state(scan->h, &state);
}
static void UpdateState3(hb_scan_t *scan, int preview)
{
hb_state_t state;
hb_get_state2(scan->h, &state);
#define p state.param.scanning
p.preview_cur = preview;
p.preview_count = scan->preview_count;
if (scan->title_index)
p.progress = ((float)p.title_cur - 1 + ((float)p.preview_cur / p.preview_count)) / p.title_count;
else
p.progress = 0.5 + 0.5 * ((float)p.title_cur - 1 + ((float)p.preview_cur / p.preview_count)) / p.title_count;
#undef p
hb_set_state(scan->h, &state);
}
HandBrake-0.10.2/libhb/nal_units.h 0000664 0001752 0001752 00000003615 12463330511 017267 0 ustar handbrake handbrake /* nal_units.h
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code.
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_NAL_UNITS_H
#define HB_NAL_UNITS_H
#include
#include "common.h"
/*
* Write a NAL unit of the specified size to the provided
* output buffer, using the requested output format.
* Returns the amount (in bytes) of data written to the buffer.
*
* The provided NAL unit must start with the NAL unit header.
*
* Note: the buffer is assumed to be large enough to hold the NAL unit
* as well as any additonal data the function may prepend/append to it.
*
* The caller may check the minimum required buffer size by passing a
* NULL buffer to the fucntion and checking the returned size value.
*/
size_t hb_nal_unit_write_annexb(uint8_t *buf, const uint8_t *nal_unit, const size_t nal_unit_size);
size_t hb_nal_unit_write_isomp4(uint8_t *buf, const uint8_t *nal_unit, const size_t nal_unit_size);
/*
* Search the provided data buffer for NAL units in Annex B format.
*
* Returns a pointer to the start (start code prefix excluded) of the
* first NAL unit found, or NULL if no NAL units were found in the buffer.
*
* On input, size holds the length of the provided data buffer.
* On output, size holds the length of the returned NAL unit.
*/
uint8_t* hb_annexb_find_next_nalu(const uint8_t *start, size_t *size);
/*
* Returns a newly-allocated buffer holding a copy of the provided
* NAL unit bitstream data, converted to the requested format.
*/
hb_buffer_t* hb_nal_bitstream_annexb_to_mp4(const uint8_t *data, const size_t size);
hb_buffer_t* hb_nal_bitstream_mp4_to_annexb(const uint8_t *data, const size_t size, const uint8_t nal_length_size);
#endif // HB_NAL_UNITS_H
HandBrake-0.10.2/libhb/eedi2.c 0000664 0001752 0001752 00000212152 12463330511 016254 0 ustar handbrake handbrake /* eedi2.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
The EEDI2 interpolator was created by tritical:
http://web.missouri.edu/~kes25c/
*/
#include "hb.h"
#include "eedi2.h"
/**
* EEDI2 directional limit lookup table
*
* These values are used to limit the range of edge direction searches and filtering.
*/
const int eedi2_limlut[33] __attribute__ ((aligned (16))) = {
6, 6, 7, 7, 8, 8, 9, 9, 9, 10,
10, 11, 11, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, -1, -1 };
/**
* Analog of _aligned_malloc
* @param size Size of memory being pointed to
* @param align_size Size of memory chunks to align to (must be power of 2)
*/
void *eedi2_aligned_malloc( size_t size, size_t align_size )
{
char * ptr, * ptr2, * aligned_ptr;
int align_mask = align_size - 1;
ptr = (char *)malloc( size + align_size + sizeof( int ) );
if( ptr==NULL ) return( NULL );
ptr2 = ptr + sizeof( int );
aligned_ptr = ptr2 + ( align_size - ( (size_t)ptr2 & align_mask ) );
ptr2 = aligned_ptr - sizeof( int );
*( (int *)ptr2 ) = (int)( aligned_ptr - ptr );
return( aligned_ptr );
}
/**
* Analog of _aligned_free
* @param ptr The aligned pointer, created with eedi2_aligned_malloc, to be freed
*/
void eedi2_aligned_free( void *ptr )
{
int * ptr2 = (int *)ptr - 1;
ptr -= * ptr2;
free(ptr);
}
/**
* Sorts metrics for median filtering
* @param order Pointer to the table of values to sort
* @param length Length of the order array
*/
void eedi2_sort_metrics( int *order, const int length )
{
int i;
for( i = 1; i < length; ++i )
{
int j = i;
const int temp = order[j];
while( j > 0 && order[j-1] > temp )
{
order[j] = order[j-1];
--j;
}
order[j] = temp;
}
}
/**
* Bitblits an image plane (overwrites one bitmap with another)
* @param dtsp Pointer to destination bitmap
* @param dst_pitch Stride of destination bitmap
* @param srcp Pointer to source bitmap
* @param src_pitch Stride of destination bitmap
* @param row_size Width of the bitmap being copied
* @param height Height of the source bitmap
*
* When row_size, dst_pitch, and src_pitch are equal, eedi2_bit_blit can work more quickly by copying the whole plane at once instead of individual lines.
*/
void eedi2_bit_blit( uint8_t * dstp, int dst_pitch,
const uint8_t * srcp, int src_pitch,
int row_size, int height )
{
if( ( !height ) || ( !row_size ) )
return;
if( height == 1 || ( dst_pitch == src_pitch && src_pitch == row_size ) )
{
memcpy( dstp, srcp, row_size * height );
}
else
{
int y;
for( y = height; y > 0; --y )
{
memcpy( dstp, srcp, row_size );
dstp += dst_pitch;
srcp += src_pitch;
}
}
}
/**
* A specialized variant of bit_blit, just for setting up the initial, field-sized bitmap planes that EEDI2 interpolates from.
* @param src Pointer to source bitmap plane being copied from
* @param dst Pointer to the destination bitmap plane being copied to
* @param pitch Stride of both bitmaps
* @param height Height of the original, full-size src plane being copied from
*/
void eedi2_fill_half_height_buffer_plane( uint8_t * src, uint8_t * dst, int pitch, int height )
{
/* When TFF, we want to copy alternating
lines starting at 0, the top field.
When BFF, we want to start at line 1. */
int y;
for( y = height; y > 0; y = y - 2 )
{
memcpy( dst, src, pitch );
dst += pitch;
src += pitch * 2;
}
}
/**
* A specialized variant of bit_blit, just for resizing the field-height maps EEDI2 generates to frame-height...a simple line doubler
* @param srcp Pointer to source bitmap plane being copied from
* @param dstp Pointer to the destination bitmap plane being copied to
* @param height Height of the input, half-size src plane being copied from
* @param pitch Stride of both bitmaps
*/
void eedi2_upscale_by_2( uint8_t * srcp, uint8_t * dstp, int height, int pitch )
{
int y;
for( y = height; y > 0; y-- )
{
memcpy( dstp, srcp, pitch );
dstp += pitch;
memcpy( dstp, srcp, pitch );
srcp += pitch;
dstp += pitch;
}
}
/**
* Finds places where verticaly adjacent pixels abruptly change in intensity, i.e., sharp edges.
* @param dstp Pointer to the destination bitmap
* @param dst_pitch Stride of dstp
* @param srcp Pointer to the source bitmap
* @param src_pitch Stride of srcp
* @param mtresh Magnitude threshold, ensures it doesn't mark edges on pixels that are too similar (10 is a good default value)
* @param vthresh Variance threshold, ensures it doesn't look for edges in highly random pixel blocks (20 is a good default value)
* @param lthresh Laplacian threshold, ensures edges are still prominent in the 2nd spatial derivative of the srcp plane (20 is a good default value)
* @param height Height of half-height single-field frame
* @param width Width of srcp bitmap rows, as opposed to the padded stride in src_pitch
*/
void eedi2_build_edge_mask( uint8_t * dstp, int dst_pitch, uint8_t *srcp, int src_pitch,
int mthresh, int lthresh, int vthresh, int height, int width )
{
int x, y;
mthresh = mthresh * 10;
vthresh = vthresh * 81;
memset( dstp, 0, ( height / 2 ) * dst_pitch );
srcp += src_pitch;
dstp += dst_pitch;
unsigned char *srcpp = srcp-src_pitch;
unsigned char *srcpn = srcp+src_pitch;
for( y = 1; y < height - 1; ++y )
{
for( x = 1; x < width-1; ++x )
{
if( ( abs( srcpp[x] - srcp[x] ) < 10 &&
abs( srcp[x] - srcpn[x] ) < 10 &&
abs( srcpp[x] - srcpn[x] ) < 10 )
||
( abs( srcpp[x-1] - srcp[x-1] ) < 10 &&
abs( srcp[x-1] - srcpn[x-1] ) < 10 &&
abs( srcpp[x-1] - srcpn[x-1] ) < 10 &&
abs( srcpp[x+1] - srcp[x+1] ) < 10 &&
abs( srcp[x+1] - srcpn[x+1] ) < 10 &&
abs( srcpp[x+1] - srcpn[x+1] ) < 10) )
continue;
const int sum = srcpp[x-1] + srcpp[x] + srcpp[x+1] +
srcp[x-1] + srcp[x]+ srcp[x+1] +
srcpn[x-1] + srcpn[x] + srcpn[x+1];
const int sumsq = srcpp[x-1] * srcpp[x-1] +
srcpp[x] * srcpp[x] +
srcpp[x+1] * srcpp[x+1] +
srcp[x-1] * srcp[x-1] +
srcp[x] * srcp[x] +
srcp[x+1] * srcp[x+1] +
srcpn[x-1] * srcpn[x-1] +
srcpn[x] * srcpn[x] +
srcpn[x+1] * srcpn[x+1];
if( 9 * sumsq-sum * sum < vthresh )
continue;
const int Ix = srcp[x+1] - srcp[x-1];
const int Iy = MAX( MAX( abs( srcpp[x] - srcpn[x] ),
abs( srcpp[x] - srcp[x] ) ),
abs( srcp[x] - srcpn[x] ) );
if( Ix * Ix + Iy * Iy >= mthresh )
{
dstp[x] = 255;
continue;
}
const int Ixx = srcp[x-1] - 2 * srcp[x] + srcp[x+1];
const int Iyy = srcpp[x] - 2 * srcp[x] + srcpn[x];
if( abs( Ixx ) + abs( Iyy ) >= lthresh )
dstp[x] = 255;
}
dstp += dst_pitch;
srcpp += src_pitch;
srcp += src_pitch;
srcpn += src_pitch;
}
}
/**
* Expands and smooths out the edge mask
* @param mskp Pointer to the source edge mask being read from
* @param msk_pitch Stride of mskp
* @param dstp Pointer to the destination to store the dilated edge mask
* @param dst_pitch Stride of dstp
* @param dstr Dilation threshold, ensures a pixel is only retained as an edge in dstp if this number of adjacent pixels or greater are also edges in mskp (4 is a good default value)
* @param height Height of half-height field-sized frame
* @param width Width of mskp bitmap rows, as opposed to the pdded stride in msk_pitch
*/
void eedi2_dilate_edge_mask( uint8_t *mskp, int msk_pitch, uint8_t *dstp, int dst_pitch,
int dstr, int height, int width )
{
int x, y;
eedi2_bit_blit( dstp, dst_pitch, mskp, msk_pitch, width, height );
mskp += msk_pitch;
unsigned char *mskpp = mskp - msk_pitch;
unsigned char *mskpn = mskp + msk_pitch;
dstp += dst_pitch;
for( y = 1; y < height - 1; ++y )
{
for( x = 1; x < width - 1; ++x )
{
if( mskp[x] != 0 )
continue;
int count = 0;
if( mskpp[x-1] == 0xFF ) ++count;
if( mskpp[x] == 0xFF ) ++count;
if( mskpp[x+1] == 0xFF ) ++count;
if( mskp[x-1] == 0xFF ) ++count;
if( mskp[x+1] == 0xFF ) ++count;
if( mskpn[x-1] == 0xFF ) ++count;
if( mskpn[x] == 0xFF ) ++count;
if( mskpn[x+1] == 0xFF ) ++count;
if( count >= dstr )
dstp[x] = 0xFF;
}
mskpp += msk_pitch;
mskp += msk_pitch;
mskpn += msk_pitch;
dstp += dst_pitch;
}
}
/**
* Contracts the edge mask
* @param mskp Pointer to the source edge mask being read from
* @param msk_pitch Stride of mskp
* @param dstp Pointer to the destination to store the eroded edge mask
* @param dst_pitch Stride of dstp
* @param estr Erosion threshold, ensures a pixel isn't retained as an edge in dstp if fewer than this number of adjacent pixels are also edges in mskp (2 is a good default value)
* @param height Height of half-height field-sized frame
* @param width Width of mskp bitmap rows, as opposed to the pdded stride in msk_pitch
*/
void eedi2_erode_edge_mask( uint8_t *mskp, int msk_pitch, uint8_t *dstp, int dst_pitch,
int estr, int height, int width )
{
int x, y;
eedi2_bit_blit( dstp, dst_pitch, mskp, msk_pitch, width, height );
mskp += msk_pitch;
unsigned char *mskpp = mskp - msk_pitch;
unsigned char *mskpn = mskp + msk_pitch;
dstp += dst_pitch;
for ( y = 1; y < height - 1; ++y )
{
for ( x = 1; x < width - 1; ++x )
{
if( mskp[x] != 0xFF ) continue;
int count = 0;
if ( mskpp[x-1] == 0xFF ) ++count;
if ( mskpp[x] == 0xFF ) ++count;
if ( mskpp[x+1] == 0xFF ) ++count;
if ( mskp[x-1] == 0xFF ) ++count;
if ( mskp[x+1] == 0xFF ) ++count;
if ( mskpn[x-1] == 0xFF ) ++count;
if ( mskpn[x] == 0xFF ) ++count;
if ( mskpn[x+1] == 0xFF ) ++count;
if ( count < estr) dstp[x] = 0;
}
mskpp += msk_pitch;
mskp += msk_pitch;
mskpn += msk_pitch;
dstp += dst_pitch;
}
}
/**
* Smooths out horizontally aligned holes in the mask
*
* If none of the 6 horizontally adjacent pixels are edges, mark the current pixel as not edged.
* If at least 1 of the 3 on either side are edges, mark the current pixel as an edge.
*
* @param mskp Pointer to the source edge mask being read from
* @param msk_pitch Stride of mskp
* @param dstp Pointer to the destination to store the smoothed edge mask
* @param dst_pitch Stride of dstp
* @param height Height of half-height field-sized frame
* @param width Width of mskp bitmap rows, as opposed to the pdded stride in msk_pitch
*/
void eedi2_remove_small_gaps( uint8_t * mskp, int msk_pitch, uint8_t * dstp, int dst_pitch,
int height, int width )
{
int x, y;
eedi2_bit_blit( dstp, dst_pitch, mskp, msk_pitch, width, height );
mskp += msk_pitch;
dstp += dst_pitch;
for( y = 1; y < height - 1; ++y )
{
for( x = 3; x < width - 3; ++x )
{
if( mskp[x] )
{
if( mskp[x-3] ) continue;
if( mskp[x-2] ) continue;
if( mskp[x-1] ) continue;
if( mskp[x+1] ) continue;
if( mskp[x+2] ) continue;
if( mskp[x+3] ) continue;
dstp[x] = 0;
}
else
{
if ( ( mskp[x+1] && ( mskp[x-1] || mskp[x-2] || mskp[x-3] ) ) ||
( mskp[x+2] && ( mskp[x-1] || mskp[x-2] ) ) ||
( mskp[x+3] && mskp[x-1] ) )
dstp[x] = 0xFF;
}
}
mskp += msk_pitch;
dstp += dst_pitch;
}
}
/**
* Calculates spatial direction vectors for the edges. This is EEDI2's timesink, and can be thought of as YADIF_CHECK on steroids, as both try to discern which angle a given edge follows
* @param plane The plane of the image being processed, to know to reduce maxd for chroma planes (HandBrake only works with YUV420 video so it is assumed they are half-height)
* @param mskp Pointer to the source edge mask being read from
* @param msk_pitch Stride of mskp
* @param srcp Pointer to the source image being filtered
* @param src_pitch Stride of srcp
* @param dstp Pointer to the destination to store the dilated edge mask
* @param dst_pitch Stride of dstp
* @param maxd Maximum pixel distance to search (24 is a good default value)
* @param nt Noise threshold (50 is a good default value)
* @param height Height of half-height field-sized frame
* @param width Width of srcp bitmap rows, as opposed to the pdded stride in src_pitch
*/
void eedi2_calc_directions( const int plane, uint8_t * mskp, int msk_pitch, uint8_t * srcp, int src_pitch,
uint8_t * dstp, int dst_pitch, int maxd, int nt, int height, int width )
{
int x, y, u, i;
memset( dstp, 255, dst_pitch * height );
mskp += msk_pitch;
dstp += dst_pitch;
srcp += src_pitch;
unsigned char *src2p = srcp - src_pitch * 2;
unsigned char *srcpp = srcp - src_pitch;
unsigned char *srcpn = srcp + src_pitch;
unsigned char *src2n = srcp + src_pitch * 2;
unsigned char *mskpp = mskp - msk_pitch;
unsigned char *mskpn = mskp + msk_pitch;
const int maxdt = plane == 0 ? maxd : ( maxd >> 1 );
for( y = 1; y < height - 1; ++y )
{
for( x = 1; x < width - 1; ++x )
{
if( mskp[x] != 0xFF || ( mskp[x-1] != 0xFF && mskp[x+1] != 0xFF ) )
continue;
const int startu = MAX( -x + 1, -maxdt );
const int stopu = MIN( width - 2 - x, maxdt );
int minb = MIN( 13 * nt,
( abs( srcp[x] - srcpn[x] ) +
abs( srcp[x] - srcpp[x] ) ) * 6 );
int mina = MIN( 19 * nt,
( abs( srcp[x] - srcpn[x] ) +
abs( srcp[x] - srcpp[x] ) ) * 9 );
int minc = mina;
int mind = minb;
int mine = minb;
int dira = -5000, dirb = -5000, dirc = -5000, dird = -5000, dire = -5000;
for( u = startu; u <= stopu; ++u )
{
if( y == 1 ||
mskpp[x-1+u] == 0xFF || mskpp[x+u] == 0xFF || mskpp[x+1+u] == 0xFF )
{
if( y == height - 2 ||
mskpn[x-1-u] == 0xFF || mskpn[x-u] == 0xFF || mskpn[x+1-u] == 0xFF )
{
const int diffsn = abs( srcp[x-1] - srcpn[x-1-u] ) +
abs( srcp[x] - srcpn[x-u] ) +
abs( srcp[x+1] - srcpn[x+1-u] );
const int diffsp = abs( srcp[x-1] - srcpp[x-1+u] ) +
abs( srcp[x] - srcpp[x+u] ) +
abs( srcp[x+1] - srcpp[x+1+u] );
const int diffps = abs( srcpp[x-1] - srcp[x-1-u] ) +
abs( srcpp[x] - srcp[x-u] ) +
abs( srcpp[x+1] - srcp[x+1-u] );
const int diffns = abs( srcpn[x-1] - srcp[x-1+u] ) +
abs( srcpn[x] - srcp[x+u] ) +
abs( srcpn[x+1] - srcp[x+1+u] );
const int diff = diffsn + diffsp + diffps + diffns;
int diffd = diffsp + diffns;
int diffe = diffsn + diffps;
if( diff < minb )
{
dirb = u;
minb = diff;
}
if( __builtin_expect( y > 1, 1) )
{
const int diff2pp = abs( src2p[x-1] - srcpp[x-1-u] ) +
abs( src2p[x] - srcpp[x-u] ) +
abs( src2p[x+1] - srcpp[x+1-u] );
const int diffp2p = abs( srcpp[x-1] - src2p[x-1+u] ) +
abs( srcpp[x] - src2p[x+u] ) +
abs( srcpp[x+1] - src2p[x+1+u] );
const int diffa = diff + diff2pp + diffp2p;
diffd += diffp2p;
diffe += diff2pp;
if( diffa < mina )
{
dira = u;
mina = diffa;
}
}
if( __builtin_expect( y < height-2, 1) )
{
const int diff2nn = abs( src2n[x-1] - srcpn[x-1+u] ) +
abs( src2n[x] - srcpn[x+u] ) +
abs( src2n[x+1] - srcpn[x+1+u] );
const int diffn2n = abs( srcpn[x-1] - src2n[x-1-u] ) +
abs( srcpn[x] - src2n[x-u] ) +
abs( srcpn[x+1] - src2n[x+1-u] );
const int diffc = diff + diff2nn + diffn2n;
diffd += diff2nn;
diffe += diffn2n;
if( diffc < minc )
{
dirc = u;
minc = diffc;
}
}
if( diffd < mind )
{
dird = u;
mind = diffd;
}
if( diffe < mine )
{
dire = u;
mine = diffe;
}
}
}
}
int order[5], k=0;
if( dira != -5000 ) order[k++] = dira;
if( dirb != -5000 ) order[k++] = dirb;
if( dirc != -5000 ) order[k++] = dirc;
if( dird != -5000 ) order[k++] = dird;
if( dire != -5000 ) order[k++] = dire;
if( k > 1 )
{
eedi2_sort_metrics( order, k );
const int mid = ( k & 1 ) ?
order[k>>1] :
( order[(k-1)>>1] + order[k>>1] + 1 ) >> 1;
const int tlim = MAX( eedi2_limlut[abs(mid)] >> 2, 2 );
int sum = 0, count = 0;
for( i = 0; i < k; ++i )
{
if( abs( order[i] - mid ) <= tlim )
{
++count;
sum += order[i];
}
}
if( count > 1 )
dstp[x] = 128 + ( (int)( (float)sum / (float)count ) * 4 );
else
dstp[x] = 128;
}
else dstp[x] = 128;
}
mskpp += msk_pitch;
mskp += msk_pitch;
mskpn += msk_pitch;
src2p += src_pitch;
srcpp += src_pitch;
srcp += src_pitch;
srcpn += src_pitch;
src2n += src_pitch;
dstp += dst_pitch;
}
}
/**
* Filters the edge mask
* @param mskp Pointer to the source edge mask being read from
* @param msk_pitch Stride of mskp
* @param dmskp Pointer to the edge direction mask
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the destination to store the filtered edge mask
* @param dst_pitch Stride of dstp
* @param height Height of half-height field-sized frame
* @param width Width of mskp bitmap rows, as opposed to the pdded stride in msk_pitch
*/
void eedi2_filter_map( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int height, int width )
{
int x, y, j;
eedi2_bit_blit( dstp, dst_pitch, dmskp, dmsk_pitch, width, height );
mskp += msk_pitch;
dmskp += dmsk_pitch;
dstp += dst_pitch;
unsigned char *dmskpp = dmskp - dmsk_pitch;
unsigned char *dmskpn = dmskp + dmsk_pitch;
for( y = 1; y < height - 1; ++y )
{
for( x = 1; x < width - 1; ++x )
{
if( dmskp[x] == 0xFF || mskp[x] != 0xFF )
continue;
const int dir = ( dmskp[x] - 128 ) >> 2;
const int lim = MAX( abs( dir ) * 2, 12 );
int ict = 0, icb = 0;
if( dir < 0 )
{
const int dirt = MAX( -x, dir );
for( j = dirt; j <= 0; ++j )
{
if( ( abs( dmskpp[x+j] - dmskp[x] ) > lim && dmskpp[x+j] != 0xFF ) ||
( dmskp[x+j] == 0xFF && dmskpp[x+j] == 0xFF ) ||
( abs( dmskp[x+j] - dmskp[x] ) > lim && dmskp[x+j] != 0xFF ) )
{
ict = 1;
break;
}
}
}
else
{
const int dirt = MIN( width - x - 1, dir );
for( j = 0; j <= dirt; ++j )
{
if( ( abs( dmskpp[x+j] - dmskp[x] ) > lim && dmskpp[x+j] != 0xFF ) ||
( dmskp[x+j] == 0xFF && dmskpp[x+j] == 0xFF ) ||
( abs( dmskp[x+j] - dmskp[x] ) > lim && dmskp[x+j] != 0xFF ) )
{
ict = 1;
break;
}
}
}
if( ict )
{
if( dir < 0 )
{
const int dirt = MIN( width - x - 1, abs( dir ) );
for( j = 0; j <= dirt; ++j )
{
if( ( abs( dmskpn[x+j] - dmskp[x] ) > lim && dmskpn[x+j] != 0xFF ) ||
( dmskpn[x+j] == 0xFF && dmskp[x+j] == 0xFF ) ||
( abs( dmskp[x+j] - dmskp[x] ) > lim && dmskp[x+j] != 0xFF ) )
{
icb = 1;
break;
}
}
}
else
{
const int dirt = MAX( -x, -dir );
for( j = dirt; j <= 0; ++j )
{
if( ( abs( dmskpn[x+j] - dmskp[x] ) > lim && dmskpn[x+j] != 0xFF ) ||
( dmskpn[x+j] == 0xFF && dmskp[x+j] == 0xFF ) ||
( abs( dmskp[x+j] - dmskp[x] ) > lim && dmskp[x+j] != 0xFF ) )
{
icb = 1;
break;
}
}
}
if( icb )
dstp[x] = 255;
}
}
mskp += msk_pitch;
dmskpp += dmsk_pitch;
dmskp += dmsk_pitch;
dmskpn += dmsk_pitch;
dstp += dst_pitch;
}
}
/**
* Filters the edge direction mask
* @param mskp Pointer to the edge mask
* @param msk_pitch Stride of mskp
* @param dmskp Pointer to the edge direction mask being read from
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the destination to store the filtered edge direction mask
* @param dst_pitch Stride of dstp
* @param height Height of half_height field-sized frame
* @param width Width of dmskp bitmap rows, as opposed to the pdded stride in dmsk_pitch
*/
void eedi2_filter_dir_map( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int height, int width )
{
int x, y, i;
eedi2_bit_blit( dstp, dst_pitch, dmskp, dmsk_pitch, width, height );
dmskp += dmsk_pitch;
unsigned char *dmskpp = dmskp - dmsk_pitch;
unsigned char *dmskpn = dmskp + dmsk_pitch;
dstp += dst_pitch;
mskp += msk_pitch;
for( y = 1; y < height - 1; ++y )
{
for( x = 1; x < width - 1; ++x )
{
if( mskp[x] != 0xFF ) continue;
int u = 0, order[9];
if( dmskpp[x-1] != 0xFF ) order[u++] = dmskpp[x-1];
if( dmskpp[x] != 0xFF ) order[u++] = dmskpp[x];
if( dmskpp[x+1] != 0xFF ) order[u++] = dmskpp[x+1];
if( dmskp[x-1] != 0xFF ) order[u++] = dmskp[x-1];
if( dmskp[x] != 0xFF ) order[u++] = dmskp[x];
if( dmskp[x+1] != 0xFF ) order[u++] = dmskp[x+1];
if( dmskpn[x-1] != 0xFF ) order[u++] = dmskpn[x-1];
if( dmskpn[x] != 0xFF ) order[u++] = dmskpn[x];
if( dmskpn[x+1] != 0xFF ) order[u++] = dmskpn[x+1];
if( u < 4 )
{
dstp[x] = 255;
continue;
}
eedi2_sort_metrics( order, u );
const int mid = ( u & 1 ) ?
order[u>>1] : ( order[(u-1)>>1] + order[u>>1] + 1 ) >> 1;
int sum = 0, count = 0;
const int lim = eedi2_limlut[abs(mid-128)>>2];
for( i = 0; i < u; ++i )
{
if( abs( order[i] - mid ) <= lim )
{
++count;
sum += order[i];
}
}
if( count < 4 || ( count < 5 && dmskp[x] == 0xFF ) )
{
dstp[x] = 255;
continue;
}
dstp[x] = (int)( ( (float)( sum + mid ) / (float)( count + 1 ) ) + 0.5f );
}
dmskpp += dmsk_pitch;
dmskp += dmsk_pitch;
dmskpn += dmsk_pitch;
dstp += dst_pitch;
mskp += msk_pitch;
}
}
/**
* Smoothes out the edge direction map
* @param mskp Pointer to the edge mask
* @param msk_pitch Stride of mskp
* @param dmskp Pointer to the edge direction mask being read from
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the destination to store the expanded edge direction mask
* @param dst_pitch Stride of dstp
* @param height Height of half-height field-sized frame
* @param width Width of dmskp bitmap rows, as opposed to the pdded stride in dmsk_pitch
*/
void eedi2_expand_dir_map( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int height, int width )
{
int x, y, i;
eedi2_bit_blit( dstp, dst_pitch, dmskp, dmsk_pitch, width, height );
dmskp += dmsk_pitch;
unsigned char *dmskpp = dmskp - dmsk_pitch;
unsigned char *dmskpn = dmskp + dmsk_pitch;
dstp += dst_pitch;
mskp += msk_pitch;
for( y = 1; y < height - 1; ++y )
{
for( x = 1; x < width - 1; ++x )
{
if( dmskp[x] != 0xFF || mskp[x] != 0xFF ) continue;
int u = 0, order[9];
if( dmskpp[x-1] != 0xFF ) order[u++] = dmskpp[x-1];
if( dmskpp[x] != 0xFF ) order[u++] = dmskpp[x];
if( dmskpp[x+1] != 0xFF ) order[u++] = dmskpp[x+1];
if( dmskp[x-1] != 0xFF ) order[u++] = dmskp[x-1];
if( dmskp[x+1] != 0xFF ) order[u++] = dmskp[x+1];
if( dmskpn[x-1] != 0xFF ) order[u++] = dmskpn[x-1];
if( dmskpn[x] != 0xFF ) order[u++] = dmskpn[x];
if( dmskpn[x+1] != 0xFF ) order[u++] = dmskpn[x+1];
if( u < 5 ) continue;
eedi2_sort_metrics( order, u );
const int mid = ( u & 1 ) ?
order[u>>1] : ( order[(u-1)>>1] + order[u>>1] + 1 ) >> 1;
int sum = 0, count = 0;
const int lim = eedi2_limlut[abs(mid-128)>>2];
for( i = 0; i < u; ++i )
{
if( abs( order[i] - mid ) <= lim )
{
++count;
sum += order[i];
}
}
if( count < 5 ) continue;
dstp[x] = (int)( ( (float)( sum + mid ) / (float)( count + 1 ) ) + 0.5f );
}
dmskpp += dmsk_pitch;
dmskp += dmsk_pitch;
dmskpn += dmsk_pitch;
dstp += dst_pitch;
mskp += msk_pitch;
}
}
/**
* Re-draws a clearer, less blocky frame-height edge direction mask
* @param mskp Pointer to the edge mask
* @param msk_pitch Stride of mskp
* @param dmskp Pointer to the edge direction mask being read from
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the destination to store the redrawn direction mask
* @param dst_pitch Stride of dstp
* @param tff Whether or not the frame parity is Top Field First
* @param height Height of the full-frame output
* @param width Width of dmskp bitmap rows, as opposed to the pdded stride in dmsk_pitch
*/
void eedi2_mark_directions_2x( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int tff, int height, int width )
{
int x, y, i;
memset( dstp, 255, dst_pitch * height );
dstp += dst_pitch * ( 2 - tff );
dmskp += dmsk_pitch * ( 1 - tff );
mskp += msk_pitch * ( 1 - tff );
unsigned char *dmskpn = dmskp + dmsk_pitch * 2;
unsigned char *mskpn = mskp + msk_pitch * 2;
for( y = 2 - tff; y < height - 1; y += 2 )
{
for( x = 1; x < width - 1; ++x )
{
if( mskp[x] != 0xFF && mskpn[x] != 0xFF ) continue;
int v = 0, order[6];
if( dmskp[x-1] != 0xFF ) order[v++] = dmskp[x-1];
if( dmskp[x] != 0xFF ) order[v++] = dmskp[x];
if( dmskp[x+1] != 0xFF ) order[v++] = dmskp[x+1];
if( dmskpn[x-1] != 0xFF ) order[v++] = dmskpn[x-1];
if( dmskpn[x] != 0xFF ) order[v++] = dmskpn[x];
if( dmskpn[x+1] != 0xFF ) order[v++] = dmskpn[x+1];
if( v < 3 ) continue;
else
{
eedi2_sort_metrics( order, v );
const int mid = ( v & 1 ) ? order[v>>1] : ( order[(v-1)>>1] + order[v>>1]+1) >> 1;
const int lim = eedi2_limlut[abs(mid-128)>>2];
int u = 0;
if( abs( dmskp[x-1] - dmskpn[x-1] ) <= lim ||
dmskp[x-1] == 0xFF || dmskpn[x-1] == 0xFF )
++u;
if( abs( dmskp[x] - dmskpn[x] ) <= lim ||
dmskp[x] == 0xFF || dmskpn[x] == 0xFF )
++u;
if( abs( dmskp[x+1] - dmskpn[x-1] ) <= lim ||
dmskp[x+1] == 0xFF || dmskpn[x+1] == 0xFF)
++u;
if( u < 2 ) continue;
int count = 0, sum = 0;
for( i = 0; i < v; ++i )
{
if( abs( order[i] - mid ) <= lim )
{
++count;
sum += order[i];
}
}
if( count < v - 2 || count < 2 ) continue;
dstp[x] = (int)( ( (float)( sum + mid ) / (float)( count + 1 ) ) + 0.5f );
}
}
mskp += msk_pitch * 2;
mskpn += msk_pitch * 2;
dstp += dst_pitch * 2;
dmskp += dmsk_pitch * 2;
dmskpn += dmsk_pitch * 2;
}
}
/**
* Filters the frane-height edge direction mask
* @param mskp Pointer to the edge mask
* @param msk_pitch Stride of mskp
* @param dmskp Pointer to the edge direction mask being read from
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the destination to store the filtered direction mask
* @param dst_pitch Stride of dstp
* @param field Field to filter
* @param height Height of the full-frame output
* @param width Width of dmskp bitmap rows, as opposed to the pdded stride in dmsk_pitch
*/
void eedi2_filter_dir_map_2x( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int field, int height, int width )
{
int x, y, i;
eedi2_bit_blit( dstp, dst_pitch, dmskp, dmsk_pitch, width, height );
dmskp += dmsk_pitch * ( 2 - field );
unsigned char *dmskpp = dmskp - dmsk_pitch * 2;
unsigned char *dmskpn = dmskp + dmsk_pitch * 2;
mskp += msk_pitch * ( 1 - field );
unsigned char *mskpn = mskp + msk_pitch * 2;
dstp += dst_pitch * ( 2 - field );
for( y = 2 - field; y < height - 1; y += 2 )
{
for( x = 1; x < width - 1; ++x )
{
if( mskp[x] != 0xFF && mskpn[x] != 0xFF ) continue;
int u = 0, order[9];
if( y > 1 )
{
if( dmskpp[x-1] != 0xFF ) order[u++] = dmskpp[x-1];
if( dmskpp[x] != 0xFF ) order[u++] = dmskpp[x];
if( dmskpp[x+1] != 0xFF ) order[u++] = dmskpp[x+1];
}
if( dmskp[x-1] != 0xFF ) order[u++] = dmskp[x-1];
if( dmskp[x] != 0xFF ) order[u++] = dmskp[x];
if( dmskp[x+1] != 0xFF ) order[u++] = dmskp[x+1];
if( y < height - 2 )
{
if( dmskpn[x-1] != 0xFF ) order[u++] = dmskpn[x-1];
if( dmskpn[x] != 0xFF ) order[u++] = dmskpn[x];
if( dmskpn[x+1] != 0xFF ) order[u++] = dmskpn[x+1];
}
if( u < 4 )
{
dstp[x] = 255;
continue;
}
eedi2_sort_metrics( order, u );
const int mid = ( u & 1 ) ? order[u>>1] : (order[(u-1)>>1] + order[u>>1] + 1 ) >> 1;
int sum = 0, count = 0;
const int lim = eedi2_limlut[abs(mid-128)>>2];
for( i = 0; i < u; ++i )
{
if( abs( order[i] - mid ) <= lim )
{
++count;
sum += order[i];
}
}
if( count < 4 || ( count < 5 && dmskp[x] == 0xFF ) )
{
dstp[x] = 255;
continue;
}
dstp[x] = (int)( ( (float)( sum + mid ) / (float)( count + 1 ) ) + 0.5f );
}
mskp += msk_pitch * 2;
mskpn += msk_pitch * 2;
dmskpp += dmsk_pitch * 2;
dmskp += dmsk_pitch * 2;
dmskpn += dmsk_pitch * 2;
dstp += dst_pitch * 2;
}
}
/**
* Smoothes out the frame-height edge direction mask
* @param mskp Pointer to the edge mask
* @param msk_pitch Stride of mskp
* @param dmskp Pointer to the edge direction mask being read from
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the destination to store the expanded direction mask
* @param dst_pitch Stride of dstp
* @param field Field to filter
* @param height Height of the full-frame output
* @param width Width of dmskp bitmap rows, as opposed to the pdded stride in dmsk_pitch
*/
void eedi2_expand_dir_map_2x( uint8_t * mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int field, int height, int width )
{
int x, y, i;
eedi2_bit_blit( dstp, dst_pitch, dmskp, dmsk_pitch, width, height );
dmskp += dmsk_pitch * ( 2 - field );
unsigned char *dmskpp = dmskp - dmsk_pitch * 2;
unsigned char *dmskpn = dmskp + dmsk_pitch * 2;
mskp += msk_pitch * ( 1 - field );
unsigned char *mskpn = mskp + msk_pitch * 2;
dstp += dst_pitch * ( 2 - field );
for( y = 2 - field; y < height - 1; y += 2)
{
for( x = 1; x < width - 1; ++x )
{
if( dmskp[x] != 0xFF || ( mskp[x] != 0xFF && mskpn[x] != 0xFF ) ) continue;
int u = 0, order[9];
if( y > 1 )
{
if( dmskpp[x-1] != 0xFF ) order[u++] = dmskpp[x-1];
if( dmskpp[x] != 0xFF ) order[u++] = dmskpp[x];
if( dmskpp[x+1] != 0xFF ) order[u++] = dmskpp[x+1];
}
if( dmskp[x-1] != 0xFF ) order[u++] = dmskp[x-1];
if( dmskp[x+1] != 0xFF ) order[u++] = dmskp[x+1];
if( y < height - 2 )
{
if( dmskpn[x-1] != 0xFF) order[u++] = dmskpn[x-1];
if( dmskpn[x] != 0xFF) order[u++] = dmskpn[x];
if( dmskpn[x+1] != 0xFF) order[u++] = dmskpn[x+1];
}
if( u < 5 ) continue;
eedi2_sort_metrics( order, u );
const int mid = ( u & 1 ) ? order[u>>1] : ( order[(u-1)>>1] + order[u>>1] + 1 ) >> 1;
int sum = 0, count = 0;
const int lim = eedi2_limlut[abs(mid-128)>>2];
for( i = 0; i < u; ++i )
{
if( abs( order[i] - mid ) <= lim )
{
++count;
sum += order[i];
}
}
if( count < 5 ) continue;
dstp[x] = (int)( ( (float)( sum + mid ) / (float)( count + 1 ) ) + 0.5f );
}
mskp += msk_pitch * 2;
mskpn += msk_pitch * 2;
dmskpp += dmsk_pitch * 2;
dmskp += dmsk_pitch * 2;
dmskpn += dmsk_pitch * 2;
dstp += dst_pitch * 2;
}
}
/**
* Like the name suggests, this function fills in gaps in the frame-height edge direction mask
* @param mskp Pointer to the edge mask
* @param msk_pitch Stride of mskp
* @param dmskp Pointer to the edge direction mask being read from
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the destination to store the filled-in direction mask
* @param dst_pitch Stride of dstp
* @param field Field to filter
* @param height Height of the full-frame output
* @param width Width of dmskp bitmap rows, as opposed to the pdded stride in dmsk_pitch
*/
void eedi2_fill_gaps_2x( uint8_t *mskp, int msk_pitch, uint8_t * dmskp, int dmsk_pitch,
uint8_t * dstp, int dst_pitch, int field, int height, int width )
{
int x, y, j;
eedi2_bit_blit( dstp, dst_pitch, dmskp, dmsk_pitch, width, height );
dmskp += dmsk_pitch * ( 2 - field );
unsigned char *dmskpp = dmskp - dmsk_pitch * 2;
unsigned char *dmskpn = dmskp + dmsk_pitch * 2;
mskp += msk_pitch * ( 1 - field );
unsigned char *mskpp = mskp - msk_pitch * 2;
unsigned char *mskpn = mskp + msk_pitch * 2;
unsigned char *mskpnn = mskpn + msk_pitch * 2;
dstp += dst_pitch * ( 2 - field );
for( y = 2 - field; y < height - 1; y += 2 )
{
for( x = 1; x < width - 1; ++x )
{
if( dmskp[x] != 0xFF ||
( mskp[x] != 0xFF && mskpn[x] != 0xFF ) ) continue;
int u = x - 1, back = 500, forward = -500;
while( u )
{
if( dmskp[u] != 0xFF )
{
back = dmskp[u];
break;
}
if( mskp[u] != 0xFF && mskpn[u] != 0xFF ) break;
--u;
}
int v = x + 1;
while( v < width )
{
if( dmskp[v] != 0xFF )
{
forward = dmskp[v];
break;
}
if( mskp[v] != 0xFF && mskpn[v] != 0xFF ) break;
++v;
}
int tc = 1, bc = 1;
int mint = 500, maxt = -20;
int minb = 500, maxb = -20;
for( j = u; j <= v; ++j )
{
if( tc )
{
if( y <= 2 || dmskpp[j] == 0xFF || ( mskpp[j] != 0xFF && mskp[j] != 0xFF ) )
{
tc = 0;
mint = maxt = 20;
}
else
{
if( dmskpp[j] < mint ) mint = dmskpp[j];
if( dmskpp[j] > maxt ) maxt = dmskpp[j];
}
}
if( bc )
{
if( y >= height - 3 || dmskpn[j] == 0xFF || ( mskpn[j] != 0xFF && mskpnn[j] != 0xFF ) )
{
bc = 0;
minb = maxb = 20;
}
else
{
if( dmskpn[j] < minb ) minb = dmskpn[j];
if( dmskpn[j] > maxb ) maxb = dmskpn[j];
}
}
}
if( maxt == -20 ) maxt = mint = 20;
if( maxb == -20 ) maxb = minb = 20;
int thresh = MAX(
MAX( MAX( abs( forward - 128 ), abs( back - 128 ) ) >> 2, 8 ),
MAX( abs( mint - maxt ), abs( minb - maxb ) ) );
const int flim = MIN(
MAX( abs( forward - 128 ), abs( back - 128 ) ) >> 2,
6 );
if( abs( forward - back ) <= thresh && ( v - u - 1 <= flim || tc || bc ) )
{
double step = (double)( forward - back ) / (double)( v - u );
for( j = 0; j < v - u - 1; ++j )
dstp[u+j+1] = back + (int)( j * step + 0.5 );
}
}
mskpp += msk_pitch * 2;
mskp += msk_pitch * 2;
mskpn += msk_pitch * 2;
mskpnn += msk_pitch * 2;
dmskpp += dmsk_pitch * 2;
dmskp += dmsk_pitch * 2;
dmskpn += dmsk_pitch * 2;
dstp += dst_pitch * 2;
}
}
/**
* Actually renders the output frame, based on the edge and edge direction masks
* @param plane The plane of the image being processed, to know to reduce a search distance for chroma planes (HandBrake only works with YUV420 video so it is assumed they are half-height)
* @param dmskp Pointer to the edge direction mask being read from
* @param dmsk_pitch Stride of dmskp
* @param dstp Pointer to the line-doubled source field used being filtered in place
* @param dst_pitch Stride of dstp
* @param omskp Pointer to the destination to store the output edge mask used for post-processing
* @param osmk_pitch Stride of omskp
* @param field Field to filter
* @nt Noise threshold, (50 is a good default value)
* @param height Height of the full-frame output
* @param width Width of dstp bitmap rows, as opposed to the pdded stride in dst_pitch
*/
void eedi2_interpolate_lattice( const int plane, uint8_t * dmskp, int dmsk_pitch, uint8_t * dstp,
int dst_pitch, uint8_t * omskp, int omsk_pitch, int field, int nt,
int height, int width )
{
int x, y, u;
if( field == 1 )
{
eedi2_bit_blit( dstp + ( height - 1 ) * dst_pitch,
dst_pitch,
dstp + ( height - 2 ) * dst_pitch,
dst_pitch,
width,
1 );
}
else
{
eedi2_bit_blit( dstp,
dst_pitch,
dstp + dst_pitch,
dst_pitch,
width,
1 );
}
dstp += dst_pitch * ( 1 - field );
omskp += omsk_pitch * ( 1 - field );
unsigned char *dstpn = dstp + dst_pitch;
unsigned char *dstpnn = dstp + dst_pitch * 2;
unsigned char *omskn = omskp + omsk_pitch * 2;
dmskp += dmsk_pitch * ( 2 - field );
for( y = 2 - field; y < height - 1; y += 2 )
{
for( x = 0; x < width; ++x )
{
int dir = dmskp[x];
const int lim = eedi2_limlut[abs(dir-128)>>2];
if( dir == 255 ||
( abs( dmskp[x] - dmskp[x-1] ) > lim &&
abs( dmskp[x] - dmskp[x+1] ) > lim ) )
{
dstpn[x] = ( dstp[x] + dstpnn[x] + 1 ) >> 1;
if( dir != 255 ) dmskp[x] = 128;
continue;
}
if( lim < 9 )
{
const int sum = dstp[x-1] + dstp[x] + dstp[x+1] +
dstpnn[x-1] + dstpnn[x] + dstpnn[x+1];
const int sumsq = dstp[x-1] * dstp[x-1] +
dstp[x] * dstp[x] +
dstp[x+1] * dstp[x+1] +
dstpnn[x-1] * dstpnn[x-1] +
dstpnn[x] * dstpnn[x] +
dstpnn[x+1] * dstpnn[x+1];
if( 6 * sumsq - sum * sum < 576 )
{
dstpn[x] = ( dstp[x] + dstpnn[x] + 1 ) >> 1;
dmskp[x] = 255;
continue;
}
}
if( x > 1 && x < width - 2 &&
( ( dstp[x] < MAX( dstp[x-2], dstp[x-1] ) - 3 &&
dstp[x] < MAX( dstp[x+2], dstp[x+1] ) - 3 &&
dstpnn[x] < MAX( dstpnn[x-2], dstpnn[x-1] ) - 3 &&
dstpnn[x] < MAX( dstpnn[x+2], dstpnn[x+1] ) - 3 )
||
( dstp[x] > MIN( dstp[x-2], dstp[x-1] ) + 3 &&
dstp[x] > MIN( dstp[x+2], dstp[x+1] ) + 3 &&
dstpnn[x] > MIN( dstpnn[x-2], dstpnn[x-1] ) + 3 &&
dstpnn[x] > MIN( dstpnn[x+2], dstpnn[x+1] ) + 3 ) ) )
{
dstpn[x] = ( dstp[x] + dstpnn[x] + 1 ) >> 1;
dmskp[x] = 128;
continue;
}
dir = ( dir - 128 + 2 ) >> 2;
int val = ( dstp[x] + dstpnn[x] + 1 ) >> 1;
const int startu = ( dir - 2 < 0 ) ?
MAX( -x + 1, MAX( dir - 2, -width + 2 + x ) )
:
MIN( x - 1, MIN( dir - 2, width - 2 - x ) );
const int stopu = ( dir + 2 < 0 ) ?
MAX( -x + 1, MAX( dir + 2, -width + 2 + x ) )
:
MIN( x - 1, MIN( dir + 2, width - 2 - x ) );
int min = 8 * nt;
for( u = startu; u <= stopu; ++u )
{
const int diff =
abs( dstp[x-1] - dstpnn[x-u-1] ) +
abs( dstp[x] - dstpnn[x-u] ) +
abs( dstp[x+1] - dstpnn[x-u+1] ) +
abs( dstpnn[x-1] - dstp[x+u-1] ) +
abs( dstpnn[x] - dstp[x+u] ) +
abs( dstpnn[x+1] - dstp[x+u+1] );
if( diff < min &&
( ( omskp[x-1+u] != 0xFF && abs( omskp[x-1+u] - dmskp[x] ) <= lim ) ||
( omskp[x+u] != 0xFF && abs( omskp[x+u] - dmskp[x]) <= lim ) ||
( omskp[x+1+u] != 0xFF && abs( omskp[x+1+u] - dmskp[x]) <= lim ) ) &&
( ( omskn[x-1-u] != 0xFF && abs( omskn[x-1-u] - dmskp[x]) <= lim ) ||
( omskn[x-u] != 0xFF && abs( omskn[x-u] - dmskp[x]) <= lim ) ||
( omskn[x+1-u] != 0xFF && abs( omskn[x+1-u] - dmskp[x]) <= lim ) ) )
{
const int diff2 =
abs( dstp[x+(u>>1)-1] - dstpnn[x-(u>>1)-1] ) +
abs( dstp[x+(u>>1)] - dstpnn[x-(u>>1)] ) +
abs( dstp[x+(u>>1)+1] - dstpnn[x-(u>>1)+1] );
if( diff2 < 4 * nt &&
( ( ( abs( omskp[x+(u>>1)] - omskn[x-(u>>1)] ) <= lim ||
abs( omskp[x+(u>>1)] - omskn[x-((u+1)>>1)] ) <= lim ) &&
omskp[x+(u>>1)] != 0xFF )
||
( ( abs( omskp[x+((u+1)>>1)] - omskn[x-(u>>1)] ) <= lim ||
abs( omskp[x+((u+1)>>1)] - omskn[x-((u+1)>>1)] ) <= lim ) &&
omskp[x+((u+1)>>1)] != 0xFF ) ) )
{
if( ( abs( dmskp[x] - omskp[x+(u>>1)] ) <= lim ||
abs( dmskp[x] - omskp[x+((u+1)>>1)] ) <= lim ) &&
( abs( dmskp[x] - omskn[x-(u>>1)] ) <= lim ||
abs( dmskp[x] - omskn[x-((u+1)>>1)] ) <= lim ) )
{
val = ( dstp[x+(u>>1)] + dstp[x+((u+1)>>1)] +
dstpnn[x-(u>>1)] + dstpnn[x-((u+1)>>1)] + 2 ) >> 2;
min = diff;
dir = u;
}
}
}
}
if( min != 8 * nt )
{
dstpn[x] = val;
dmskp[x] = 128 + dir * 4;
}
else
{
const int minm = MIN( dstp[x], dstpnn[x] );
const int maxm = MAX( dstp[x], dstpnn[x] );
const int d = plane == 0 ? 4 : 2;
const int startu = MAX( -x + 1, -d );
const int stopu = MIN( width - 2 - x, d );
min = 7 * nt;
for( u = startu; u <= stopu; ++u )
{
const int p1 = dstp[x+(u>>1)] + dstp[x+((u+1)>>1)];
const int p2 = dstpnn[x-(u>>1)] + dstpnn[x-((u+1)>>1)];
const int diff =
abs( dstp[x-1] - dstpnn[x-u-1] ) +
abs( dstp[x] - dstpnn[x-u] ) +
abs( dstp[x+1] - dstpnn[x-u+1] ) +
abs( dstpnn[x-1] - dstp[x+u-1] ) +
abs( dstpnn[x] - dstp[x+u] ) +
abs( dstpnn[x+1] - dstp[x+u+1] ) +
abs( p1 - p2 );
if( diff < min )
{
const int valt = ( p1 + p2 + 2 ) >> 2;
if( valt >= minm && valt <= maxm )
{
val = valt;
min = diff;
dir = u;
}
}
}
dstpn[x] = val;
if( min == 7*nt ) dmskp[x] = 128;
else dmskp[x] = 128 + dir * 4;
}
}
dstp += dst_pitch * 2;
dstpn += dst_pitch * 2;
dstpnn += dst_pitch * 2;
dmskp += dmsk_pitch * 2;
omskp += omsk_pitch * 2;
omskn += omsk_pitch * 2;
}
}
/**
* Applies some extra filtering to smooth the edge direction mask
* @param nmskp Pointer to the newly-filtered edge direction mask being read from
* @param nmsk_pitch Stride of nmskp
* @param omskp Pointer to the old unfiltered edge direction mask being read from
* @param omsk_pitch Stride of osmkp
* @param dstp Pointer to the output image being filtered in place
* @param src_pitch Stride of dstp ....not sure why it's named this
* @param field Field to filter
* @param height Height of the full-frame output
* @param width Width of dstp bitmap rows, as opposed to the pdded stride in src_pitch
*/
void eedi2_post_process( uint8_t * nmskp, int nmsk_pitch, uint8_t * omskp, int omsk_pitch,
uint8_t * dstp, int src_pitch, int field, int height, int width )
{
int x, y;
nmskp += ( 2 - field ) * nmsk_pitch;
omskp += ( 2 - field ) * omsk_pitch;
dstp += ( 2 - field ) * src_pitch;
unsigned char *srcpp = dstp - src_pitch;
unsigned char *srcpn = dstp + src_pitch;
for( y = 2 - field; y < height - 1; y += 2 )
{
for( x = 0; x < width; ++x )
{
const int lim = eedi2_limlut[abs(nmskp[x]-128)>>2];
if( abs( nmskp[x] - omskp[x] ) > lim && omskp[x] != 255 && omskp[x] != 128 )
dstp[x] = ( srcpp[x] + srcpn[x] + 1 ) >> 1;
}
nmskp += nmsk_pitch * 2;
omskp += omsk_pitch * 2;
srcpp += src_pitch * 2;
dstp += src_pitch * 2;
srcpn += src_pitch * 2;
}
}
/**
* Blurs the source field plane
* @param src Pointer to the half-height source field plane
* @param src_pitch Stride of src
* @param tmp Pointer to a temporary buffer for juggling bitmaps
* @param tmp_pitch Stride of tmp
* @param dst Pointer to the destination to store the blurred field plane
* @param dst_pitch Stride of dst
* @param height Height of the hakf-height field-sized frame
* @param width Width of dstp bitmap rows, as opposed to the padded stride in dst_pitch
*/
void eedi2_gaussian_blur1( uint8_t * src, int src_pitch, uint8_t * tmp, int tmp_pitch, uint8_t * dst, int dst_pitch, int height, int width )
{
uint8_t * srcp = src;
uint8_t * dstp = tmp;
int x, y;
for( y = 0; y < height; ++y )
{
dstp[0] = ( srcp[3] * 582 + srcp[2] * 7078 + srcp[1] * 31724 +
srcp[0] * 26152 + 32768 ) >> 16;
dstp[1] = ( srcp[4] * 582 + srcp[3] * 7078 +
( srcp[0] + srcp[2] ) * 15862 +
srcp[1] * 26152 + 32768 ) >> 16;
dstp[2] = ( srcp[5] * 582 + ( srcp[0] + srcp[4] ) * 3539 +
( srcp[1] + srcp[3] ) * 15862 +
srcp[2]*26152 + 32768 ) >> 16;
for( x = 3; x < width - 3; ++x )
{
dstp[x] = ( ( srcp[x-3] + srcp[x+3] ) * 291 +
( srcp[x-2] + srcp[x+2] ) * 3539 +
( srcp[x-1] + srcp[x+1] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
}
dstp[x] = ( srcp[x-3] * 582 + ( srcp[x-2] + srcp[x+2] ) * 3539 +
( srcp[x-1] + srcp[x+1] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x-3] * 582 + srcp[x-2] * 7078 +
( srcp[x-1] + srcp[x+1] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x-3] * 582 + srcp[x-2] * 7078 +
srcp[x-1] * 31724 + srcp[x] * 26152 + 32768 ) >> 16;
srcp += src_pitch;
dstp += tmp_pitch;
}
srcp = tmp;
dstp = dst;
unsigned char *src3p = srcp - tmp_pitch * 3;
unsigned char *src2p = srcp - tmp_pitch * 2;
unsigned char *srcpp = srcp - tmp_pitch;
unsigned char *srcpn = srcp + tmp_pitch;
unsigned char *src2n = srcp + tmp_pitch * 2;
unsigned char *src3n = srcp + tmp_pitch * 3;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src3n[x] * 582 + src2n[x] * 7078 + srcpn[x] * 31724 +
srcp[x] * 26152 + 32768 ) >> 16;
}
src3p += tmp_pitch;
src2p += tmp_pitch;
srcpp += tmp_pitch;
srcp += tmp_pitch;
srcpn += tmp_pitch;
src2n += tmp_pitch;
src3n += tmp_pitch;
dstp += dst_pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src3n[x] * 582 + src2n[x] * 7078 +
( srcpp[x] + srcpn[x] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
}
src3p += tmp_pitch;
src2p += tmp_pitch;
srcpp += tmp_pitch;
srcp += tmp_pitch;
srcpn += tmp_pitch;
src2n += tmp_pitch;
src3n += tmp_pitch;
dstp += dst_pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src3n[x] * 582 + ( src2p[x] + src2n[x] ) * 3539 +
( srcpp[x] + srcpn[x] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
}
src3p += src_pitch;
src2p += src_pitch;
srcpp += src_pitch;
srcp += src_pitch;
srcpn += src_pitch;
src2n += src_pitch;
src3n += src_pitch;
dstp += dst_pitch;
for( y = 3; y < height - 3; ++y )
{
for( x = 0; x < width; ++x )
{
dstp[x] = ( ( src3p[x] + src3n[x] ) * 291 +
( src2p[x] + src2n[x] ) * 3539 +
( srcpp[x] + srcpn[x] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
}
src3p += tmp_pitch;
src2p += tmp_pitch;
srcpp += tmp_pitch;
srcp += tmp_pitch;
srcpn += tmp_pitch;
src2n += tmp_pitch;
src3n += tmp_pitch;
dstp += dst_pitch;
}
for( x = 0; x < width; ++x )
{
dstp[x] = ( src3p[x] * 582 + ( src2p[x] + src2n[x] ) *3539 +
( srcpp[x] + srcpn[x] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
}
src3p += tmp_pitch;
src2p += tmp_pitch;
srcpp += tmp_pitch;
srcp += tmp_pitch;
srcpn += tmp_pitch;
src2n += tmp_pitch;
src3n += tmp_pitch;
dstp += dst_pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src3p[x] * 582 + src2p[x] * 7078 +
( srcpp[x] + srcpn[x] ) * 15862 +
srcp[x] * 26152 + 32768 ) >> 16;
}
src3p += tmp_pitch;
src2p += tmp_pitch;
srcpp += tmp_pitch;
srcp += tmp_pitch;
srcpn += tmp_pitch;
src2n += tmp_pitch;
src3n += tmp_pitch;
dstp += dst_pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src3p[x] * 582 + src2p[x] * 7078 +
srcpp[x] * 31724 + srcp[x] * 26152 + 32768 ) >> 16;
}
}
/**
* Blurs the spatial derivatives of the source field plane
* @param src Pointer to the derivative array to filter
* @param tmp Pointer to a temporary storage for the derivative array while it's being filtered
* @param dst Pointer to the destination to store the filtered output derivative array
* @param pitch Stride of the bitmap from which the src array is derived
* @param height Height of the half-height field-sized frame from which the src array derivs were taken
* @param width Width of the bitmap from which the src array is derived, as opposed to the padded stride in pitch
*/
void eedi2_gaussian_blur_sqrt2( int *src, int *tmp, int *dst, const int pitch, int height, const int width )
{
int * srcp = src;
int * dstp = tmp;
int x, y;
for( y = 0; y < height; ++y )
{
x = 0;
dstp[x] = ( srcp[x+4] * 678 + srcp[x+3] * 3902 + srcp[x+2] * 13618 +
srcp[x+1] * 28830 + srcp[x] * 18508 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x+4] * 678 + srcp[x+3] * 3902 + srcp[x+2] * 13618 +
( srcp[x-1] + srcp[x+1] ) *14415 +
srcp[x] * 18508 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x+4] * 678 + srcp[x+3] * 3902 +
( srcp[x-2] + srcp[x+2] ) * 6809 +
( srcp[x-1] + srcp[x+1] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x+4] * 678 + ( srcp[x-3] + srcp[x+3] ) * 1951 +
( srcp[x-2] + srcp[x+2] ) * 6809 +
( srcp[x-1] + srcp[x+1] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 16;
for( x = 4; x < width - 4; ++x )
{
dstp[x] = ( ( srcp[x-4] + srcp[x+4] ) * 339 +
( srcp[x-3] + srcp[x+3] ) * 1951 +
( srcp[x-2] + srcp[x+2] ) * 6809 +
( srcp[x-1] + srcp[x+1] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 16;
}
dstp[x] = ( srcp[x-4] * 678 + ( srcp[x-3] + srcp[x+3] ) * 1951 +
( srcp[x-2] + srcp[x+2] ) * 6809 +
( srcp[x-1] + srcp[x+1] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x-4] * 678 + srcp[x-3] * 3902 +
( srcp[x-2] + srcp[x+2] ) * 6809 +
( srcp[x-1] + srcp[x+1] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x-4] * 678 + srcp[x+3] * 3902 + srcp[x-2] * 13618 +
( srcp[x-1] + srcp[x+1] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 16;
++x;
dstp[x] = ( srcp[x-4] * 678 + srcp[x-3] * 3902 + srcp[x-2] * 13618 +
srcp[x-1] * 28830 +
srcp[x] * 18508 + 32768 ) >> 16;
srcp += pitch;
dstp += pitch;
}
dstp = dst;
srcp = tmp;
int * src4p = srcp - pitch * 4;
int * src3p = srcp - pitch * 3;
int * src2p = srcp - pitch * 2;
int * srcpp = srcp - pitch;
int * srcpn = srcp + pitch;
int * src2n = srcp + pitch * 2;
int * src3n = srcp + pitch * 3;
int * src4n = srcp + pitch * 4;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4n[x] * 678 + src3n[x] * 3902 +
src2n[x] * 13618 + srcpn[x] * 28830 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4n[x] * 678 + src3n[x] * 3902 + src2n[x] * 13618 +
( srcpp[x] + srcpn[x] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4n[x] * 678 + src3n[x] * 3902 +
( src2p[x] + src2n[x] ) * 6809 +
( srcpp[x] + srcpn[x] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4n[x] * 678 + ( src3p[x] + src3n[x] ) * 1951 +
( src2p[x] + src2n[x] ) * 6809 +
( srcpp[x] + srcpn[x] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
for( y = 4; y < height - 4; ++y )
{
for( x = 0; x < width; ++x )
{
dstp[x] = ( ( src4p[x] + src4n[x] ) * 339 +
( src3p[x] + src3n[x] ) * 1951 +
( src2p[x] + src2n[x] ) * 6809 +
( srcpp[x] + srcpn[x] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
}
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4p[x] * 678 +
( src3p[x] + src3n[x] ) * 1951 +
( src2p[x] + src2n[x] ) * 6809 +
( srcpp[x] + srcpn[x] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4p[x] * 678 + src3p[x] * 3902 +
( src2p[x] + src2n[x] ) * 6809 +
( srcpp[x] + srcpn[x] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4p[x] * 678 + src3p[x] * 3902 + src2p[x] * 13618 +
( srcpp[x] + srcpn[x] ) * 14415 +
srcp[x] * 18508 + 32768 ) >> 18;
}
src4p += pitch;
src3p += pitch;
src2p += pitch;
srcpp += pitch;
srcp += pitch;
srcpn += pitch;
src2n += pitch;
src3n += pitch;
src4n += pitch;
dstp += pitch;
for( x = 0; x < width; ++x )
{
dstp[x] = ( src4p[x] * 678 + src3p[x] * 3902 +
src2p[x] * 13618 + srcpp[x] * 28830 +
srcp[x] * 18508 + 32768 ) >> 18;
}
}
/**
* Finds spatial derivatives for a a source field plane
* @param srcp Pointer to the plane to derive
* @param src_pitch Stride of srcp
* @param height Height of the half-height field-sized frame
* @param width Width of srcp bitmap rows, as opposed to the padded stride in src_pitch
* @param x2 Pointed to the array to store the x/x derivatives
* @param y2 Pointer to the array to store the y/y derivatives
* @param xy Pointer to the array to store the x/y derivatives
*/
void eedi2_calc_derivatives( uint8_t *srcp, int src_pitch, int height, int width, int *x2, int *y2, int *xy)
{
unsigned char * srcpp = srcp - src_pitch;
unsigned char * srcpn = srcp + src_pitch;
int x, y;
{
const int Ix = srcp[1] - srcp[0];
const int Iy = srcp[0] - srcpn[0];
x2[0] = ( Ix * Ix ) >> 1;
y2[0] = ( Iy * Iy ) >> 1;
xy[0] = ( Ix * Iy ) >> 1;
}
for( x = 1; x < width - 1; ++x )
{
const int Ix = srcp[x+1] - srcp[x-1];
const int Iy = srcp[x] - srcpn[x];
x2[x] = ( Ix * Ix ) >> 1;
y2[x] = ( Iy * Iy ) >> 1;
xy[x] = ( Ix * Iy ) >> 1;
}
{
const int Ix = srcp[x] - srcp[x-1];
const int Iy = srcp[x] - srcpn[x];
x2[x] = ( Ix * Ix ) >> 1;
y2[x] = ( Iy * Iy ) >> 1;
xy[x] = ( Ix * Iy ) >> 1;
}
srcpp += src_pitch;
srcp += src_pitch;
srcpn += src_pitch;
x2 += src_pitch;
y2 += src_pitch;
xy += src_pitch;
for( y = 1; y < height - 1; ++y )
{
{
const int Ix = srcp[1] - srcp[0];
const int Iy = srcpp[0] - srcpn[0];
x2[0] = ( Ix * Ix ) >> 1;
y2[0] = ( Iy * Iy ) >> 1;
xy[0] = ( Ix * Iy ) >> 1;
}
for ( x = 1; x < width - 1; ++x )
{
const int Ix = srcp[x+1] - srcp[x-1];
const int Iy = srcpp[x] - srcpn[x];
x2[x] = ( Ix * Ix ) >> 1;
y2[x] = ( Iy * Iy ) >> 1;
xy[x] = ( Ix * Iy ) >> 1;
}
{
const int Ix = srcp[x] - srcp[x-1];
const int Iy = srcpp[x] - srcpn[x];
x2[x] = ( Ix *Ix ) >> 1;
y2[x] = ( Iy *Iy ) >> 1;
xy[x] = ( Ix *Iy ) >> 1;
}
srcpp += src_pitch;
srcp += src_pitch;
srcpn += src_pitch;
x2 += src_pitch;
y2 += src_pitch;
xy += src_pitch;
}
{
const int Ix = srcp[1] - srcp[0];
const int Iy = srcpp[0] - srcp[0];
x2[0] = ( Ix * Ix ) >> 1;
y2[0] = ( Iy * Iy ) >> 1;
xy[0] = ( Ix * Iy ) >> 1;
}
for( x = 1; x < width - 1; ++x )
{
const int Ix = srcp[x+1] - srcp[x-1];
const int Iy = srcpp[x] - srcp[x];
x2[x] = ( Ix * Ix ) >> 1;
y2[x] = ( Iy * Iy ) >> 1;
xy[x] = ( Ix * Iy ) >> 1;
}
{
const int Ix = srcp[x] - srcp[x-1];
const int Iy = srcpp[x] - srcp[x];
x2[x] = ( Ix * Ix ) >> 1;
y2[x] = ( Iy * Iy ) >> 1;
xy[x] = ( Ix * Iy ) >> 1;
}
}
/**
* Filters junctions and corners for the output image
* @param x2 Pointer to the x/x derivatives
* @param y2 Pointer to the y/y derivatives
* @param xy Pointer to the x/y derivatives
* @param pitch Stride of the source field plane from which the derivatives were calculated
* @param mskp Pointer to the edge direction mask
* @param msk_pitch Stride of mskp
* @param dstp Pointer to the output image being filtered in place
* @param dst_pitch Stride of dstp
* @param height Height of the full-frame output plane
* @param width Width of dstp bitmap rows, as opposed to the padded stride in dst_pitch
* @param field Field to filter
*/
void eedi2_post_process_corner( int *x2, int *y2, int *xy, const int pitch, uint8_t * mskp, int msk_pitch, uint8_t * dstp, int dst_pitch, int height, int width, int field )
{
mskp += ( 8 - field ) * msk_pitch;
dstp += ( 8 - field ) * dst_pitch;
unsigned char * dstpp = dstp - dst_pitch;
unsigned char * dstpn = dstp + dst_pitch;
x2 += pitch * 3;
y2 += pitch * 3;
xy += pitch * 3;
int *x2n = x2 + pitch;
int *y2n = y2 + pitch;
int *xyn = xy + pitch;
int x, y;
for( y = 8 - field; y < height - 7; y += 2 )
{
for( x = 4; x < width - 4; ++x )
{
if( mskp[x] == 255 || mskp[x] == 128 ) continue;
const int c1 = (int)( x2[x] * y2[x] - xy[x] * xy[x] - 0.09 *
( x2[x] + y2[x] ) * ( x2[x] + y2[x] ) );
const int c2 = (int)( x2n[x] * y2n[x] - xyn[x]* xyn[x] - 0.09 *
( x2n[x] + y2n[x] ) * ( x2n[x] + y2n[x] ) );
if (c1 > 775 || c2 > 775)
dstp[x] = ( dstpp[x] + dstpn[x] + 1 ) >> 1;
}
mskp += msk_pitch * 2;
dstpp += dst_pitch * 2;
dstp += dst_pitch * 2;
dstpn += dst_pitch * 2;
x2 += pitch;
x2n += pitch;
y2 += pitch;
y2n += pitch;
xy += pitch;
xyn += pitch;
}
}
HandBrake-0.10.2/libhb/dvd.h 0000664 0001752 0001752 00000004634 12463330511 016052 0 ustar handbrake handbrake /* dvd.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_DVD_H
#define HB_DVD_H
#include "dvdnav/dvdnav.h"
#include "dvdread/ifo_read.h"
#include "dvdread/nav_read.h"
struct hb_dvdread_s
{
char * path;
dvd_reader_t * reader;
ifo_handle_t * vmg;
int vts;
int ttn;
ifo_handle_t * ifo;
dvd_file_t * file;
pgc_t * pgc;
int cell_start;
int cell_end;
int title_start;
int title_end;
int title_block_count;
int cell_cur;
int cell_next;
int cell_overlap;
int block;
int pack_len;
int next_vobu;
int in_cell;
int in_sync;
uint16_t cur_vob_id;
uint8_t cur_cell_id;
};
struct hb_dvdnav_s
{
char * path;
dvdnav_t * dvdnav;
dvd_reader_t * reader;
ifo_handle_t * vmg;
int title;
int title_block_count;
int chapter;
int cell;
hb_list_t * list_chapter;
int stopped;
};
typedef struct hb_dvdnav_s hb_dvdnav_t;
typedef struct hb_dvdread_s hb_dvdread_t;
union hb_dvd_s
{
hb_dvdread_t dvdread;
hb_dvdnav_t dvdnav;
};
struct hb_dvd_func_s
{
hb_dvd_t * (* init) ( char * );
void (* close) ( hb_dvd_t ** );
char * (* name) ( char * );
int (* title_count) ( hb_dvd_t * );
hb_title_t * (* title_scan) ( hb_dvd_t *, int, uint64_t );
int (* start) ( hb_dvd_t *, hb_title_t *, int );
void (* stop) ( hb_dvd_t * );
int (* seek) ( hb_dvd_t *, float );
hb_buffer_t * (* read) ( hb_dvd_t * );
int (* chapter) ( hb_dvd_t * );
int (* angle_count) ( hb_dvd_t * );
void (* set_angle) ( hb_dvd_t *, int );
int (* main_feature)( hb_dvd_t *, hb_list_t * );
};
typedef struct hb_dvd_func_s hb_dvd_func_t;
hb_dvd_func_t * hb_dvdnav_methods( void );
hb_dvd_func_t * hb_dvdread_methods( void );
#endif // HB_DVD_H
HandBrake-0.10.2/libhb/qsv_common.c 0000664 0001752 0001752 00000153367 12463330511 017461 0 ustar handbrake handbrake /* qsv_common.c
*
* Copyright (c) 2003-2015 HandBrake Team
* This file is part of the HandBrake source code.
* Homepage: .
* It may be used under the terms of the GNU General Public License v2.
* For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifdef USE_QSV
#include
#include "hb.h"
#include "ports.h"
#include "common.h"
#include "hb_dict.h"
#include "qsv_common.h"
#include "h264_common.h"
// QSV info for each codec
static hb_qsv_info_t *hb_qsv_info_avc = NULL;
static hb_qsv_info_t *hb_qsv_info_hevc = NULL;
// API versions
static mfxVersion qsv_software_version = { .Version = 0, };
static mfxVersion qsv_hardware_version = { .Version = 0, };
// AVC implementations
static hb_qsv_info_t qsv_software_info_avc = { .available = 0, .codec_id = MFX_CODEC_AVC, .implementation = MFX_IMPL_SOFTWARE, };
static hb_qsv_info_t qsv_hardware_info_avc = { .available = 0, .codec_id = MFX_CODEC_AVC, .implementation = MFX_IMPL_HARDWARE_ANY|MFX_IMPL_VIA_ANY, };
// HEVC implementations
static mfxPluginUID qsv_encode_plugin_hevc = { .Data = { 0x2F, 0xCA, 0x99, 0x74, 0x9F, 0xDB, 0x49, 0xAE, 0xB1, 0x21, 0xA5, 0xB6, 0x3E, 0xF5, 0x68, 0xF7 } };
static hb_qsv_info_t qsv_software_info_hevc = { .available = 0, .codec_id = MFX_CODEC_HEVC, .implementation = MFX_IMPL_SOFTWARE, };
static hb_qsv_info_t qsv_hardware_info_hevc = { .available = 0, .codec_id = MFX_CODEC_HEVC, .implementation = MFX_IMPL_HARDWARE_ANY|MFX_IMPL_VIA_ANY, };
// check available Intel Media SDK version against a minimum
#define HB_CHECK_MFX_VERSION(MFX_VERSION, MAJOR, MINOR) \
(MFX_VERSION.Major == MAJOR && MFX_VERSION.Minor >= MINOR)
/*
* Determine the "generation" of QSV hardware based on the CPU microarchitecture.
* Anything unknown is assumed to be more recent than the latest known generation.
* This avoids having to order the hb_cpu_platform enum depending on QSV hardware.
*/
enum
{
QSV_G0, // third party hardware
QSV_G1, // Sandy Bridge or equivalent
QSV_G2, // Ivy Bridge or equivalent
QSV_G3, // Haswell or equivalent
};
static int qsv_hardware_generation(int cpu_platform)
{
switch (cpu_platform)
{
case HB_CPU_PLATFORM_INTEL_BNL:
return QSV_G0;
case HB_CPU_PLATFORM_INTEL_SNB:
return QSV_G1;
case HB_CPU_PLATFORM_INTEL_IVB:
case HB_CPU_PLATFORM_INTEL_SLM:
return QSV_G2;
case HB_CPU_PLATFORM_INTEL_HSW:
default:
return QSV_G3;
}
}
/*
* Determine whether a given mfxIMPL is hardware-accelerated.
*/
static int qsv_implementation_is_hardware(mfxIMPL implementation)
{
return MFX_IMPL_BASETYPE(implementation) != MFX_IMPL_SOFTWARE;
}
int hb_qsv_available()
{
return hb_qsv_video_encoder_is_enabled(HB_VCODEC_QSV_H264);
}
int hb_qsv_video_encoder_is_enabled(int encoder)
{
switch (encoder)
{
case HB_VCODEC_QSV_H264:
return hb_qsv_info_avc != NULL && hb_qsv_info_avc->available;
default:
return 0;
}
}
int hb_qsv_audio_encoder_is_enabled(int encoder)
{
switch (encoder)
{
default:
return 0;
}
}
static void init_video_param(mfxVideoParam *videoParam)
{
if (videoParam == NULL)
{
return;
}
memset(videoParam, 0, sizeof(mfxVideoParam));
videoParam->mfx.CodecId = MFX_CODEC_AVC;
videoParam->mfx.CodecLevel = MFX_LEVEL_UNKNOWN;
videoParam->mfx.CodecProfile = MFX_PROFILE_UNKNOWN;
videoParam->mfx.RateControlMethod = MFX_RATECONTROL_VBR;
videoParam->mfx.TargetUsage = MFX_TARGETUSAGE_BALANCED;
videoParam->mfx.TargetKbps = 5000;
videoParam->mfx.GopOptFlag = MFX_GOP_CLOSED;
videoParam->mfx.FrameInfo.FourCC = MFX_FOURCC_NV12;
videoParam->mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
videoParam->mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
videoParam->mfx.FrameInfo.FrameRateExtN = 25;
videoParam->mfx.FrameInfo.FrameRateExtD = 1;
videoParam->mfx.FrameInfo.Width = 1920;
videoParam->mfx.FrameInfo.CropW = 1920;
videoParam->mfx.FrameInfo.AspectRatioW = 1;
videoParam->mfx.FrameInfo.Height = 1088;
videoParam->mfx.FrameInfo.CropH = 1080;
videoParam->mfx.FrameInfo.AspectRatioH = 1;
videoParam->AsyncDepth = AV_QSV_ASYNC_DEPTH_DEFAULT;
videoParam->IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
}
static void init_ext_coding_option2(mfxExtCodingOption2 *extCodingOption2)
{
if (extCodingOption2 == NULL)
{
return;
}
memset(extCodingOption2, 0, sizeof(mfxExtCodingOption2));
extCodingOption2->Header.BufferId = MFX_EXTBUFF_CODING_OPTION2;
extCodingOption2->Header.BufferSz = sizeof(mfxExtCodingOption2);
extCodingOption2->MBBRC = MFX_CODINGOPTION_ON;
extCodingOption2->ExtBRC = MFX_CODINGOPTION_ON;
extCodingOption2->Trellis = MFX_TRELLIS_I|MFX_TRELLIS_P|MFX_TRELLIS_B;
extCodingOption2->RepeatPPS = MFX_CODINGOPTION_ON;
extCodingOption2->BRefType = MFX_B_REF_PYRAMID;
extCodingOption2->AdaptiveI = MFX_CODINGOPTION_ON;
extCodingOption2->AdaptiveB = MFX_CODINGOPTION_ON;
extCodingOption2->LookAheadDS = MFX_LOOKAHEAD_DS_4x;
extCodingOption2->NumMbPerSlice = 2040; // 1920x1088/4
}
static int query_capabilities(mfxSession session, mfxVersion version, hb_qsv_info_t *info)
{
/*
* MFXVideoENCODE_Query(mfxSession, mfxVideoParam *in, mfxVideoParam *out);
*
* Mode 1:
* - in is NULL
* - out has the parameters we want to query set to 1
* - out->mfx.CodecId field has to be set (mandatory)
* - MFXVideoENCODE_Query should zero out all unsupported parameters
*
* Mode 2:
* - the paramaters we want to query are set for in
* - in ->mfx.CodecId field has to be set (mandatory)
* - out->mfx.CodecId field has to be set (mandatory)
* - MFXVideoENCODE_Query should sanitize all unsupported parameters
*/
mfxStatus status;
mfxPluginUID *pluginUID;
mfxExtBuffer *videoExtParam[1];
mfxVideoParam videoParam, inputParam;
mfxExtCodingOption2 extCodingOption2;
/* Reset capabilities before querying */
info->capabilities = 0;
/* Load optional codec plug-ins */
switch (info->codec_id)
{
case MFX_CODEC_HEVC:
pluginUID = &qsv_encode_plugin_hevc;
break;
default:
pluginUID = NULL;
break;
}
if (pluginUID != NULL && HB_CHECK_MFX_VERSION(version, 1, 8) &&
MFXVideoUSER_Load(session, pluginUID, 0) < MFX_ERR_NONE)
{
// couldn't load plugin successfully
return 0;
}
/*
* First of all, check availability of an encoder for
* this combination of a codec ID and implementation.
*
* Note: can error out rather than sanitizing
* unsupported codec IDs, so don't log errors.
*/
if (HB_CHECK_MFX_VERSION(version, HB_QSV_MINVERSION_MAJOR, HB_QSV_MINVERSION_MINOR))
{
if (info->implementation & MFX_IMPL_AUDIO)
{
/* Not yet supported */
return 0;
}
else
{
init_video_param(&inputParam);
inputParam.mfx.CodecId = info->codec_id;
memset(&videoParam, 0, sizeof(mfxVideoParam));
videoParam.mfx.CodecId = inputParam.mfx.CodecId;
if (MFXVideoENCODE_Query(session, &inputParam, &videoParam) >= MFX_ERR_NONE &&
videoParam.mfx.CodecId == info->codec_id)
{
info->available = 1;
}
}
}
if (!info->available)
{
/* Don't check capabilities for unavailable encoders */
return 0;
}
if (info->implementation & MFX_IMPL_AUDIO)
{
/* We don't have any audio capability checks yet */
return 0;
}
else
{
/* Implementation-specific features that can't be queried */
if (qsv_implementation_is_hardware(info->implementation))
{
if (qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G3)
{
info->capabilities |= HB_QSV_CAP_B_REF_PYRAMID;
}
}
else
{
if (HB_CHECK_MFX_VERSION(version, 1, 6))
{
info->capabilities |= HB_QSV_CAP_B_REF_PYRAMID;
}
}
/* API-specific features that can't be queried */
if (HB_CHECK_MFX_VERSION(version, 1, 6))
{
// API >= 1.6 (mfxBitstream::DecodeTimeStamp, mfxExtCodingOption2)
info->capabilities |= HB_QSV_CAP_MSDK_API_1_6;
}
/*
* Check availability of optional rate control methods.
*
* Mode 2 tends to error out, but mode 1 gives false negatives, which
* is worse. So use mode 2 and assume an error means it's unsupported.
*
* Also assume that LA and ICQ combined imply LA_ICQ is
* supported, so we don't need to check the latter too.
*/
if (HB_CHECK_MFX_VERSION(version, 1, 7))
{
init_video_param(&inputParam);
inputParam.mfx.CodecId = info->codec_id;
inputParam.mfx.RateControlMethod = MFX_RATECONTROL_LA;
memset(&videoParam, 0, sizeof(mfxVideoParam));
videoParam.mfx.CodecId = inputParam.mfx.CodecId;
if (MFXVideoENCODE_Query(session, &inputParam, &videoParam) >= MFX_ERR_NONE &&
videoParam.mfx.RateControlMethod == MFX_RATECONTROL_LA)
{
info->capabilities |= HB_QSV_CAP_RATECONTROL_LA;
// also check for LA + interlaced support
init_video_param(&inputParam);
inputParam.mfx.CodecId = info->codec_id;
inputParam.mfx.RateControlMethod = MFX_RATECONTROL_LA;
inputParam.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_TFF;
memset(&videoParam, 0, sizeof(mfxVideoParam));
videoParam.mfx.CodecId = inputParam.mfx.CodecId;
if (MFXVideoENCODE_Query(session, &inputParam, &videoParam) >= MFX_ERR_NONE &&
videoParam.mfx.FrameInfo.PicStruct == MFX_PICSTRUCT_FIELD_TFF &&
videoParam.mfx.RateControlMethod == MFX_RATECONTROL_LA)
{
info->capabilities |= HB_QSV_CAP_RATECONTROL_LAi;
}
}
}
if (HB_CHECK_MFX_VERSION(version, 1, 8))
{
init_video_param(&inputParam);
inputParam.mfx.CodecId = info->codec_id;
inputParam.mfx.RateControlMethod = MFX_RATECONTROL_ICQ;
memset(&videoParam, 0, sizeof(mfxVideoParam));
videoParam.mfx.CodecId = inputParam.mfx.CodecId;
if (MFXVideoENCODE_Query(session, &inputParam, &videoParam) >= MFX_ERR_NONE &&
videoParam.mfx.RateControlMethod == MFX_RATECONTROL_ICQ)
{
info->capabilities |= HB_QSV_CAP_RATECONTROL_ICQ;
}
}
/*
* Check mfxExtCodingOption2 fields.
*
* Mode 2 suffers from false negatives with some drivers, whereas mode 1
* suffers from false positives instead. The latter is probably easier
* and/or safer to sanitize for us, so use mode 1.
*/
if (HB_CHECK_MFX_VERSION(version, 1, 6) && info->codec_id == MFX_CODEC_AVC)
{
init_video_param(&videoParam);
videoParam.mfx.CodecId = info->codec_id;
init_ext_coding_option2(&extCodingOption2);
videoParam.ExtParam = videoExtParam;
videoParam.ExtParam[0] = (mfxExtBuffer*)&extCodingOption2;
videoParam.NumExtParam = 1;
status = MFXVideoENCODE_Query(session, NULL, &videoParam);
if (status >= MFX_ERR_NONE)
{
#if 0
// testing code that could come in handy
fprintf(stderr, "-------------------\n");
fprintf(stderr, "MBBRC: 0x%02X\n", extCodingOption2.MBBRC);
fprintf(stderr, "ExtBRC: 0x%02X\n", extCodingOption2.ExtBRC);
fprintf(stderr, "Trellis: 0x%02X\n", extCodingOption2.Trellis);
fprintf(stderr, "RepeatPPS: 0x%02X\n", extCodingOption2.RepeatPPS);
fprintf(stderr, "BRefType: %4"PRIu16"\n", extCodingOption2.BRefType);
fprintf(stderr, "AdaptiveI: 0x%02X\n", extCodingOption2.AdaptiveI);
fprintf(stderr, "AdaptiveB: 0x%02X\n", extCodingOption2.AdaptiveB);
fprintf(stderr, "LookAheadDS: %4"PRIu16"\n", extCodingOption2.LookAheadDS);
fprintf(stderr, "-------------------\n");
#endif
/*
* Sanitize API 1.6 fields:
*
* - MBBRC requires G3 hardware (Haswell or equivalent)
* - ExtBRC requires G2 hardware (Ivy Bridge or equivalent)
*/
if (qsv_implementation_is_hardware(info->implementation) &&
qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G3)
{
if (extCodingOption2.MBBRC)
{
info->capabilities |= HB_QSV_CAP_OPTION2_MBBRC;
}
}
if (qsv_implementation_is_hardware(info->implementation) &&
qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G2)
{
if (extCodingOption2.ExtBRC)
{
info->capabilities |= HB_QSV_CAP_OPTION2_EXTBRC;
}
}
/*
* Sanitize API 1.7 fields:
*
* - Trellis requires G3 hardware (Haswell or equivalent)
*/
if (HB_CHECK_MFX_VERSION(version, 1, 7))
{
if (qsv_implementation_is_hardware(info->implementation) &&
qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G3)
{
if (extCodingOption2.Trellis)
{
info->capabilities |= HB_QSV_CAP_OPTION2_TRELLIS;
}
}
}
/*
* Sanitize API 1.8 fields:
*
* - BRefType requires B-pyramid support
* - LookAheadDS requires lookahead support
* - AdaptiveI, AdaptiveB, NumMbPerSlice unknown (trust Query)
*/
if (HB_CHECK_MFX_VERSION(version, 1, 8))
{
if (info->capabilities & HB_QSV_CAP_B_REF_PYRAMID)
{
if (extCodingOption2.BRefType)
{
info->capabilities |= HB_QSV_CAP_OPTION2_BREFTYPE;
}
}
if (info->capabilities & HB_QSV_CAP_RATECONTROL_LA)
{
if (extCodingOption2.LookAheadDS)
{
info->capabilities |= HB_QSV_CAP_OPTION2_LA_DOWNS;
}
}
if (extCodingOption2.AdaptiveI && extCodingOption2.AdaptiveB)
{
info->capabilities |= HB_QSV_CAP_OPTION2_IB_ADAPT;
}
if (extCodingOption2.NumMbPerSlice)
{
info->capabilities |= HB_QSV_CAP_OPTION2_NMBSLICE;
}
}
}
else
{
fprintf(stderr,
"hb_qsv_info_init: mfxExtCodingOption2 check failed (0x%"PRIX32", 0x%"PRIX32", %d)\n",
info->codec_id, info->implementation, status);
}
}
}
/* Unload optional codec plug-ins */
if (pluginUID != NULL && HB_CHECK_MFX_VERSION(version, 1, 8))
{
MFXVideoUSER_UnLoad(session, pluginUID);
}
return 0;
}
int hb_qsv_info_init()
{
static int init_done = 0;
if (init_done)
return 0;
init_done = 1;
/*
* First, check for any MSDK version to determine whether one or
* more implementations are present; then check if we can use them.
*
* I've had issues using a NULL version with some combinations of
* hardware and driver, so use a low version number (1.0) instead.
*/
mfxSession session;
mfxVersion version = { .Major = 1, .Minor = 0, };
// check for software fallback
if (MFXInit(MFX_IMPL_SOFTWARE, &version, &session) == MFX_ERR_NONE)
{
// Media SDK software found, but check that our minimum is supported
MFXQueryVersion(session, &qsv_software_version);
if (HB_CHECK_MFX_VERSION(qsv_software_version,
HB_QSV_MINVERSION_MAJOR,
HB_QSV_MINVERSION_MINOR))
{
query_capabilities(session, qsv_software_version, &qsv_software_info_avc);
query_capabilities(session, qsv_software_version, &qsv_software_info_hevc);
// now that we know which hardware encoders are
// available, we can set the preferred implementation
hb_qsv_impl_set_preferred("software");
}
MFXClose(session);
}
// check for actual hardware support
if (MFXInit(MFX_IMPL_HARDWARE_ANY, &version, &session) == MFX_ERR_NONE)
{
// Media SDK hardware found, but check that our minimum is supported
//
// Note: this-party hardware (QSV_G0) is unsupported for the time being
MFXQueryVersion(session, &qsv_hardware_version);
if (qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G1 &&
HB_CHECK_MFX_VERSION(qsv_hardware_version,
HB_QSV_MINVERSION_MAJOR,
HB_QSV_MINVERSION_MINOR))
{
query_capabilities(session, qsv_hardware_version, &qsv_hardware_info_avc);
query_capabilities(session, qsv_hardware_version, &qsv_hardware_info_hevc);
// now that we know which hardware encoders are
// available, we can set the preferred implementation
hb_qsv_impl_set_preferred("hardware");
}
MFXClose(session);
}
// success
return 0;
}
static void log_capabilities(int log_level, uint64_t caps, const char *prefix)
{
if (!caps)
{
hb_deep_log(log_level, "%s none (standard feature set)", prefix);
}
else
{
hb_deep_log(log_level, "%s%s%s%s%s%s%s%s%s%s%s%s%s", prefix,
!(caps & HB_QSV_CAP_MSDK_API_1_6) ? "" : " api1.6",
!(caps & HB_QSV_CAP_B_REF_PYRAMID) ? "" : " bpyramid",
!(caps & HB_QSV_CAP_OPTION2_BREFTYPE) ? "" : " breftype",
!(caps & HB_QSV_CAP_RATECONTROL_LA) ? "" : " lookahead",
!(caps & HB_QSV_CAP_RATECONTROL_LAi) ? "" : " lookaheadi",
!(caps & HB_QSV_CAP_OPTION2_LA_DOWNS) ? "" : " lookaheadds",
!(caps & HB_QSV_CAP_RATECONTROL_ICQ) ? "" : " icq",
!(caps & HB_QSV_CAP_OPTION2_MBBRC) ? "" : " mbbrc",
!(caps & HB_QSV_CAP_OPTION2_EXTBRC) ? "" : " extbrc",
!(caps & HB_QSV_CAP_OPTION2_TRELLIS) ? "" : " trellis",
!(caps & HB_QSV_CAP_OPTION2_IB_ADAPT) ? "" : " adaptivei adaptiveb",
!(caps & HB_QSV_CAP_OPTION2_NMBSLICE) ? "" : " nummbperslice");
}
}
void hb_qsv_info_print()
{
// is QSV available and usable?
hb_log("Intel Quick Sync Video support: %s",
hb_qsv_available() ? "yes": "no");
// also print the details
if (qsv_hardware_version.Version)
{
hb_log(" - Intel Media SDK hardware: API %"PRIu16".%"PRIu16" (minimum: %"PRIu16".%"PRIu16")",
qsv_hardware_version.Major, qsv_hardware_version.Minor,
HB_QSV_MINVERSION_MAJOR, HB_QSV_MINVERSION_MINOR);
}
if (qsv_software_version.Version)
{
hb_log(" - Intel Media SDK software: API %"PRIu16".%"PRIu16" (minimum: %"PRIu16".%"PRIu16")",
qsv_software_version.Major, qsv_software_version.Minor,
HB_QSV_MINVERSION_MAJOR, HB_QSV_MINVERSION_MINOR);
}
if (hb_qsv_available())
{
if (hb_qsv_info_avc != NULL && hb_qsv_info_avc->available)
{
hb_log(" - H.264 encoder: yes");
hb_log(" - preferred implementation: %s",
hb_qsv_impl_get_name(hb_qsv_info_avc->implementation));
if (qsv_hardware_info_avc.available)
{
log_capabilities(2, qsv_hardware_info_avc.capabilities,
" - capabilities (hardware): ");
}
if (qsv_software_info_avc.available)
{
log_capabilities(2, qsv_software_info_avc.capabilities,
" - capabilities (software): ");
}
}
else
{
hb_log(" - H.264 encoder: no");
}
if (hb_qsv_info_hevc != NULL && hb_qsv_info_hevc->available)
{
hb_log(" - H.265 encoder: yes (unsupported)");
hb_log(" - preferred implementation: %s",
hb_qsv_impl_get_name(hb_qsv_info_hevc->implementation));
if (qsv_hardware_info_hevc.available)
{
log_capabilities(2, qsv_hardware_info_hevc.capabilities,
" - capabilities (hardware): ");
}
if (qsv_software_info_hevc.available)
{
log_capabilities(2, qsv_software_info_hevc.capabilities,
" - capabilities (software): ");
}
}
else
{
hb_log(" - H.265 encoder: no");
}
}
}
hb_qsv_info_t* hb_qsv_info_get(int encoder)
{
switch (encoder)
{
case HB_VCODEC_QSV_H264:
return hb_qsv_info_avc;
default:
return NULL;
}
}
const char* hb_qsv_decode_get_codec_name(enum AVCodecID codec_id)
{
switch (codec_id)
{
case AV_CODEC_ID_H264:
return "h264_qsv";
default:
return NULL;
}
}
int hb_qsv_decode_is_enabled(hb_job_t *job)
{
return ((job != NULL && job->qsv.decode) &&
(job->vcodec & HB_VCODEC_QSV_MASK) &&
(job->title->video_decode_support & HB_DECODE_SUPPORT_QSV));
}
int hb_qsv_copyframe_is_slow(int encoder)
{
hb_qsv_info_t *info = hb_qsv_info_get(encoder);
if (info != NULL && qsv_implementation_is_hardware(info->implementation))
{
// we should really check the driver version, but since it's not
// available, checking the API version is the best we can do :-(
return !HB_CHECK_MFX_VERSION(qsv_hardware_version, 1, 7);
}
return 0;
}
int hb_qsv_codingoption_xlat(int val)
{
switch (HB_QSV_CLIP3(-1, 2, val))
{
case 0:
return MFX_CODINGOPTION_OFF;
case 1:
case 2: // MFX_CODINGOPTION_ADAPTIVE, reserved
return MFX_CODINGOPTION_ON;
case -1:
default:
return MFX_CODINGOPTION_UNKNOWN;
}
}
int hb_qsv_trellisvalue_xlat(int val)
{
switch (HB_QSV_CLIP3(0, 3, val))
{
case 0:
return MFX_TRELLIS_OFF;
case 1: // I-frames only
return MFX_TRELLIS_I;
case 2: // I- and P-frames
return MFX_TRELLIS_I|MFX_TRELLIS_P;
case 3: // all frames
return MFX_TRELLIS_I|MFX_TRELLIS_P|MFX_TRELLIS_B;
default:
return MFX_TRELLIS_UNKNOWN;
}
}
const char* hb_qsv_codingoption_get_name(int val)
{
switch (val)
{
case MFX_CODINGOPTION_ON:
return "on";
case MFX_CODINGOPTION_OFF:
return "off";
case MFX_CODINGOPTION_ADAPTIVE:
return "adaptive";
case MFX_CODINGOPTION_UNKNOWN:
return "unknown (auto)";
default:
return NULL;
}
}
int hb_qsv_atoindex(const char* const *arr, const char *str, int *err)
{
int i;
for (i = 0; arr[i] != NULL; i++)
{
if (!strcasecmp(arr[i], str))
{
break;
}
}
*err = (arr[i] == NULL);
return i;
}
// adapted from libx264
int hb_qsv_atobool(const char *str, int *err)
{
if (!strcasecmp(str, "1") ||
!strcasecmp(str, "yes") ||
!strcasecmp(str, "true"))
{
return 1;
}
if (!strcasecmp(str, "0") ||
!strcasecmp(str, "no") ||
!strcasecmp(str, "false"))
{
return 0;
}
*err = 1;
return 0;
}
// adapted from libx264
int hb_qsv_atoi(const char *str, int *err)
{
char *end;
int v = strtol(str, &end, 0);
if (end == str || end[0] != '\0')
{
*err = 1;
}
return v;
}
// adapted from libx264
float hb_qsv_atof(const char *str, int *err)
{
char *end;
float v = strtod(str, &end);
if (end == str || end[0] != '\0')
{
*err = 1;
}
return v;
}
int hb_qsv_param_parse(hb_qsv_param_t *param, hb_qsv_info_t *info,
const char *key, const char *value)
{
float fvalue;
int ivalue, error = 0;
if (param == NULL || info == NULL)
{
return HB_QSV_PARAM_ERROR;
}
if (value == NULL || value[0] == '\0')
{
value = "true";
}
else if (value[0] == '=')
{
value++;
}
if (key == NULL || key[0] == '\0')
{
return HB_QSV_PARAM_BAD_NAME;
}
else if (!strncasecmp(key, "no-", 3))
{
key += 3;
value = hb_qsv_atobool(value, &error) ? "false" : "true";
if (error)
{
return HB_QSV_PARAM_BAD_VALUE;
}
}
if (!strcasecmp(key, "target-usage") ||
!strcasecmp(key, "tu"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->videoParam->mfx.TargetUsage = HB_QSV_CLIP3(MFX_TARGETUSAGE_1,
MFX_TARGETUSAGE_7,
ivalue);
}
}
else if (!strcasecmp(key, "num-ref-frame") ||
!strcasecmp(key, "ref"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->videoParam->mfx.NumRefFrame = HB_QSV_CLIP3(0, 16, ivalue);
}
}
else if (!strcasecmp(key, "gop-ref-dist"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->gop.gop_ref_dist = HB_QSV_CLIP3(-1, 32, ivalue);
}
}
else if (!strcasecmp(key, "gop-pic-size") ||
!strcasecmp(key, "keyint"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->gop.gop_pic_size = HB_QSV_CLIP3(-1, UINT16_MAX, ivalue);
}
}
else if (!strcasecmp(key, "b-pyramid"))
{
if (info->capabilities & HB_QSV_CAP_B_REF_PYRAMID)
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->gop.b_pyramid = HB_QSV_CLIP3(-1, 1, ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "scenecut"))
{
ivalue = hb_qsv_atobool(value, &error);
if (!error)
{
if (!ivalue)
{
param->videoParam->mfx.GopOptFlag |= MFX_GOP_STRICT;
}
else
{
param->videoParam->mfx.GopOptFlag &= ~MFX_GOP_STRICT;
}
}
}
else if (!strcasecmp(key, "adaptive-i") ||
!strcasecmp(key, "i-adapt"))
{
if (info->capabilities & HB_QSV_CAP_OPTION2_IB_ADAPT)
{
ivalue = hb_qsv_atobool(value, &error);
if (!error)
{
param->codingOption2.AdaptiveI = hb_qsv_codingoption_xlat(ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "adaptive-b") ||
!strcasecmp(key, "b-adapt"))
{
if (info->capabilities & HB_QSV_CAP_OPTION2_IB_ADAPT)
{
ivalue = hb_qsv_atobool(value, &error);
if (!error)
{
param->codingOption2.AdaptiveB = hb_qsv_codingoption_xlat(ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "force-cqp"))
{
ivalue = hb_qsv_atobool(value, &error);
if (!error)
{
param->rc.icq = !ivalue;
}
}
else if (!strcasecmp(key, "cqp-offset-i"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->rc.cqp_offsets[0] = HB_QSV_CLIP3(INT16_MIN, INT16_MAX, ivalue);
}
}
else if (!strcasecmp(key, "cqp-offset-p"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->rc.cqp_offsets[1] = HB_QSV_CLIP3(INT16_MIN, INT16_MAX, ivalue);
}
}
else if (!strcasecmp(key, "cqp-offset-b"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->rc.cqp_offsets[2] = HB_QSV_CLIP3(INT16_MIN, INT16_MAX, ivalue);
}
}
else if (!strcasecmp(key, "vbv-init"))
{
fvalue = hb_qsv_atof(value, &error);
if (!error)
{
param->rc.vbv_buffer_init = HB_QSV_CLIP3(0, UINT16_MAX, fvalue);
}
}
else if (!strcasecmp(key, "vbv-bufsize"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->rc.vbv_buffer_size = HB_QSV_CLIP3(0, UINT16_MAX, ivalue);
}
}
else if (!strcasecmp(key, "vbv-maxrate"))
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->rc.vbv_max_bitrate = HB_QSV_CLIP3(0, UINT16_MAX, ivalue);
}
}
else if (!strcasecmp(key, "cavlc") || !strcasecmp(key, "cabac"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atobool(value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
if (!strcasecmp(key, "cabac"))
{
ivalue = !ivalue;
}
param->codingOption.CAVLC = hb_qsv_codingoption_xlat(ivalue);
}
}
else if (!strcasecmp(key, "videoformat"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atoindex(hb_h264_vidformat_names, value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
param->videoSignalInfo.VideoFormat = ivalue;
}
}
else if (!strcasecmp(key, "fullrange"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atoindex(hb_h264_fullrange_names, value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
param->videoSignalInfo.VideoFullRange = ivalue;
}
}
else if (!strcasecmp(key, "colorprim"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atoindex(hb_h264_colorprim_names, value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
param->videoSignalInfo.ColourDescriptionPresent = 1;
param->videoSignalInfo.ColourPrimaries = ivalue;
}
}
else if (!strcasecmp(key, "transfer"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atoindex(hb_h264_transfer_names, value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
param->videoSignalInfo.ColourDescriptionPresent = 1;
param->videoSignalInfo.TransferCharacteristics = ivalue;
}
}
else if (!strcasecmp(key, "colormatrix"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atoindex(hb_h264_colmatrix_names, value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
param->videoSignalInfo.ColourDescriptionPresent = 1;
param->videoSignalInfo.MatrixCoefficients = ivalue;
}
}
else if (!strcasecmp(key, "tff") ||
!strcasecmp(key, "interlaced"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atobool(value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
param->videoParam->mfx.FrameInfo.PicStruct = (ivalue ?
MFX_PICSTRUCT_FIELD_TFF :
MFX_PICSTRUCT_PROGRESSIVE);
}
}
else if (!strcasecmp(key, "bff"))
{
switch (info->codec_id)
{
case MFX_CODEC_AVC:
ivalue = hb_qsv_atobool(value, &error);
break;
default:
return HB_QSV_PARAM_UNSUPPORTED;
}
if (!error)
{
param->videoParam->mfx.FrameInfo.PicStruct = (ivalue ?
MFX_PICSTRUCT_FIELD_BFF :
MFX_PICSTRUCT_PROGRESSIVE);
}
}
else if (!strcasecmp(key, "mbbrc"))
{
if (info->capabilities & HB_QSV_CAP_OPTION2_MBBRC)
{
ivalue = hb_qsv_atobool(value, &error);
if (!error)
{
param->codingOption2.MBBRC = hb_qsv_codingoption_xlat(ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "extbrc"))
{
if (info->capabilities & HB_QSV_CAP_OPTION2_EXTBRC)
{
ivalue = hb_qsv_atobool(value, &error);
if (!error)
{
param->codingOption2.ExtBRC = hb_qsv_codingoption_xlat(ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "lookahead") ||
!strcasecmp(key, "la"))
{
if (info->capabilities & HB_QSV_CAP_RATECONTROL_LA)
{
ivalue = hb_qsv_atobool(value, &error);
if (!error)
{
param->rc.lookahead = ivalue;
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "lookahead-depth") ||
!strcasecmp(key, "la-depth"))
{
if (info->capabilities & HB_QSV_CAP_RATECONTROL_LA)
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
// LookAheadDepth 10 will cause a hang with some driver versions
param->codingOption2.LookAheadDepth = HB_QSV_CLIP3(11, 100,
ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "lookahead-ds") ||
!strcasecmp(key, "la-ds"))
{
if (info->capabilities & HB_QSV_CAP_OPTION2_LA_DOWNS)
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->codingOption2.LookAheadDS = HB_QSV_CLIP3(MFX_LOOKAHEAD_DS_UNKNOWN,
MFX_LOOKAHEAD_DS_4x,
ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else if (!strcasecmp(key, "trellis"))
{
if (info->capabilities & HB_QSV_CAP_OPTION2_TRELLIS)
{
ivalue = hb_qsv_atoi(value, &error);
if (!error)
{
param->codingOption2.Trellis = hb_qsv_trellisvalue_xlat(ivalue);
}
}
else
{
return HB_QSV_PARAM_UNSUPPORTED;
}
}
else
{
/*
* TODO:
* - slice count (num-slice/slices, num-mb-per-slice/slice-max-mbs)
* - open-gop
* - fake-interlaced (mfxExtCodingOption.FramePicture???)
* - intra-refresh
*/
return HB_QSV_PARAM_BAD_NAME;
}
return error ? HB_QSV_PARAM_BAD_VALUE : HB_QSV_PARAM_OK;
}
#ifdef HB_API_OLD_PRESET_GETTERS
const char* const* hb_qsv_presets()
{
return hb_qsv_preset_get_names();
}
#endif
const char* const* hb_qsv_preset_get_names()
{
if (qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G3)
{
return hb_qsv_preset_names2;
}
else
{
return hb_qsv_preset_names1;
}
}
const char* const* hb_qsv_profile_get_names(int encoder)
{
switch (encoder)
{
case HB_VCODEC_QSV_H264:
return hb_h264_profile_names;
default:
return NULL;
}
}
const char* const* hb_qsv_level_get_names(int encoder)
{
switch (encoder)
{
case HB_VCODEC_QSV_H264:
return hb_h264_level_names;
default:
return NULL;
}
}
const char* hb_qsv_video_quality_get_name(uint32_t codec)
{
uint64_t caps;
switch (codec)
{
case HB_VCODEC_QSV_H264:
caps = hb_qsv_info_avc != NULL ? hb_qsv_info_avc->capabilities : 0;
return (caps & HB_QSV_CAP_RATECONTROL_ICQ) ? "ICQ" : "QP";
default:
return "QP";
}
}
void hb_qsv_video_quality_get_limits(uint32_t codec, float *low, float *high,
float *granularity, int *direction)
{
uint64_t caps;
switch (codec)
{
case HB_VCODEC_QSV_H264:
caps = hb_qsv_info_avc != NULL ? hb_qsv_info_avc->capabilities : 0;
*direction = 1;
*granularity = 1.;
*low = (caps & HB_QSV_CAP_RATECONTROL_ICQ) ? 1. : 0.;
*high = 51.;
break;
default:
*direction = 1;
*granularity = 1.;
*low = 0.;
*high = 51.;
break;
}
}
int hb_qsv_param_default_preset(hb_qsv_param_t *param,
mfxVideoParam *videoParam,
hb_qsv_info_t *info, const char *preset)
{
if (param != NULL && videoParam != NULL && info != NULL)
{
int ret = hb_qsv_param_default(param, videoParam, info);
if (ret)
{
return ret;
}
}
else
{
hb_error("hb_qsv_param_default_preset: invalid pointer(s)");
return -1;
}
if (preset != NULL && preset[0] != '\0')
{
if (!strcasecmp(preset, "quality"))
{
/*
* HSW TargetUsage: 2
* NumRefFrame: 0
* GopRefDist: 4 (CQP), 3 (VBR) -> -1 (set by encoder)
* GopPicSize: 32 (CQP), 1 second (VBR) -> -1 (set by encoder)
* BPyramid: 1 (CQP), 0 (VBR) -> -1 (set by encoder)
* LookAhead: 1 (on)
* LookAheadDepth: 40
*
*
* SNB
* IVB Preset Not Available
*
* Note: this preset is the libhb default (like x264's "medium").
*/
}
else if (!strcasecmp(preset, "balanced"))
{
/*
* HSW TargetUsage: 4
* NumRefFrame: 1
* GopRefDist: 4 (CQP), 3 (VBR) -> -1 (set by encoder)
* GopPicSize: 32 (CQP), 1 second (VBR) -> -1 (set by encoder)
* BPyramid: 1 (CQP), 0 (VBR) -> -1 (set by encoder)
* LookAhead: 0 (off)
* LookAheadDepth: Not Applicable
*/
if (qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G3)
{
param->rc.lookahead = 0;
param->videoParam->mfx.NumRefFrame = 1;
param->videoParam->mfx.TargetUsage = MFX_TARGETUSAGE_4;
}
else
{
/*
* SNB
* IVB TargetUsage: 2
* NumRefFrame: 0
* GopRefDist: 4 (CQP), 3 (VBR) -> -1 (set by encoder)
* GopPicSize: 32 (CQP), 1 second (VBR) -> -1 (set by encoder)
* BPyramid: Not Applicable
* LookAhead: Not Applicable
* LookAheadDepth: Not Applicable
*
* Note: this preset is not the libhb default,
* but the settings are the same so do nothing.
*/
}
}
else if (!strcasecmp(preset, "speed"))
{
if (qsv_hardware_generation(hb_get_cpu_platform()) >= QSV_G3)
{
/*
* HSW TargetUsage: 6
* NumRefFrame: 0 (CQP), 1 (VBR) -> see note
* GopRefDist: 4 (CQP), 3 (VBR) -> -1 (set by encoder)
* GopPicSize: 32 (CQP), 1 second (VBR) -> -1 (set by encoder)
* BPyramid: 1 (CQP), 0 (VBR) -> -1 (set by encoder)
* LookAhead: 0 (off)
* LookAheadDepth: Not Applicable
*
* Note: NumRefFrame depends on the RC method, which we don't
* know here. Rather than have an additional variable and
* having the encoder set it, we set it to 1 and let the
* B-pyramid code sanitize it. Since BPyramid is 1 w/CQP,
* the result (3) is the same as what MSDK would pick for
* NumRefFrame 0 GopRefDist 4 GopPicSize 32.
*/
param->rc.lookahead = 0;
param->videoParam->mfx.NumRefFrame = 1;
param->videoParam->mfx.TargetUsage = MFX_TARGETUSAGE_6;
}
else
{
/*
* SNB
* IVB TargetUsage: 4
* NumRefFrame: 0
* GopRefDist: 4 (CQP), 3 (VBR) -> -1 (set by encoder)
* GopPicSize: 32 (CQP), 1 second (VBR) -> -1 (set by encoder)
* BPyramid: Not Applicable
* LookAhead: Not Applicable
* LookAheadDepth: Not Applicable
*/
param->videoParam->mfx.TargetUsage = MFX_TARGETUSAGE_4;
}
}
else
{
hb_error("hb_qsv_param_default_preset: invalid preset '%s'", preset);
return -1;
}
}
return 0;
}
int hb_qsv_param_default(hb_qsv_param_t *param, mfxVideoParam *videoParam,
hb_qsv_info_t *info)
{
if (param != NULL && videoParam != NULL && info != NULL)
{
// introduced in API 1.0
memset(¶m->codingOption, 0, sizeof(mfxExtCodingOption));
param->codingOption.Header.BufferId = MFX_EXTBUFF_CODING_OPTION;
param->codingOption.Header.BufferSz = sizeof(mfxExtCodingOption);
param->codingOption.MECostType = 0; // reserved, must be 0
param->codingOption.MESearchType = 0; // reserved, must be 0
param->codingOption.MVSearchWindow.x = 0; // reserved, must be 0
param->codingOption.MVSearchWindow.y = 0; // reserved, must be 0
param->codingOption.RefPicListReordering = 0; // reserved, must be 0
param->codingOption.IntraPredBlockSize = 0; // reserved, must be 0
param->codingOption.InterPredBlockSize = 0; // reserved, must be 0
param->codingOption.MVPrecision = 0; // reserved, must be 0
param->codingOption.EndOfSequence = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.RateDistortionOpt = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.ResetRefList = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.MaxDecFrameBuffering = 0; // unspecified
param->codingOption.AUDelimiter = MFX_CODINGOPTION_OFF;
param->codingOption.SingleSeiNalUnit = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.PicTimingSEI = MFX_CODINGOPTION_OFF;
param->codingOption.VuiNalHrdParameters = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.FramePicture = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.CAVLC = MFX_CODINGOPTION_OFF;
// introduced in API 1.3
param->codingOption.RefPicMarkRep = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.FieldOutput = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.NalHrdConformance = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.SingleSeiNalUnit = MFX_CODINGOPTION_UNKNOWN;
param->codingOption.VuiVclHrdParameters = MFX_CODINGOPTION_UNKNOWN;
// introduced in API 1.4
param->codingOption.ViewOutput = MFX_CODINGOPTION_UNKNOWN;
// introduced in API 1.6
param->codingOption.RecoveryPointSEI = MFX_CODINGOPTION_UNKNOWN;
// introduced in API 1.3
memset(¶m->videoSignalInfo, 0, sizeof(mfxExtVideoSignalInfo));
param->videoSignalInfo.Header.BufferId = MFX_EXTBUFF_VIDEO_SIGNAL_INFO;
param->videoSignalInfo.Header.BufferSz = sizeof(mfxExtVideoSignalInfo);
param->videoSignalInfo.VideoFormat = 5; // undefined
param->videoSignalInfo.VideoFullRange = 0; // TV range
param->videoSignalInfo.ColourDescriptionPresent = 0; // don't write to bitstream
param->videoSignalInfo.ColourPrimaries = 2; // undefined
param->videoSignalInfo.TransferCharacteristics = 2; // undefined
param->videoSignalInfo.MatrixCoefficients = 2; // undefined
// introduced in API 1.6
memset(¶m->codingOption2, 0, sizeof(mfxExtCodingOption2));
param->codingOption2.Header.BufferId = MFX_EXTBUFF_CODING_OPTION2;
param->codingOption2.Header.BufferSz = sizeof(mfxExtCodingOption2);
param->codingOption2.IntRefType = 0;
param->codingOption2.IntRefCycleSize = 2;
param->codingOption2.IntRefQPDelta = 0;
param->codingOption2.MaxFrameSize = 0;
param->codingOption2.BitrateLimit = MFX_CODINGOPTION_ON;
param->codingOption2.MBBRC = MFX_CODINGOPTION_ON;
param->codingOption2.ExtBRC = MFX_CODINGOPTION_OFF;
// introduced in API 1.7
param->codingOption2.LookAheadDepth = 40;
param->codingOption2.Trellis = MFX_TRELLIS_OFF;
// introduced in API 1.8
param->codingOption2.RepeatPPS = MFX_CODINGOPTION_ON;
param->codingOption2.BRefType = MFX_B_REF_UNKNOWN; // controlled via gop.b_pyramid
param->codingOption2.AdaptiveI = MFX_CODINGOPTION_OFF;
param->codingOption2.AdaptiveB = MFX_CODINGOPTION_OFF;
param->codingOption2.LookAheadDS = MFX_LOOKAHEAD_DS_OFF;
param->codingOption2.NumMbPerSlice = 0;
// GOP & rate control
param->gop.b_pyramid = -1; // set automatically
param->gop.gop_pic_size = -1; // set automatically
param->gop.gop_ref_dist = -1; // set automatically
param->gop.int_ref_cycle_size = -1; // set automatically
param->rc.icq = 1; // enabled by default (if supported)
param->rc.lookahead = 1; // enabled by default (if supported)
param->rc.cqp_offsets[0] = 0;
param->rc.cqp_offsets[1] = 2;
param->rc.cqp_offsets[2] = 4;
param->rc.vbv_max_bitrate = 0; // set automatically
param->rc.vbv_buffer_size = 0; // set automatically
param->rc.vbv_buffer_init = .0; // set automatically
// introduced in API 1.0
memset(videoParam, 0, sizeof(mfxVideoParam));
param->videoParam = videoParam;
param->videoParam->Protected = 0; // reserved, must be 0
param->videoParam->NumExtParam = 0;
param->videoParam->IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY;
param->videoParam->mfx.TargetUsage = MFX_TARGETUSAGE_2;
param->videoParam->mfx.GopOptFlag = MFX_GOP_CLOSED;
param->videoParam->mfx.NumThread = 0; // deprecated, must be 0
param->videoParam->mfx.EncodedOrder = 0; // input is in display order
param->videoParam->mfx.IdrInterval = 0; // all I-frames are IDR
param->videoParam->mfx.NumSlice = 0; // use Media SDK default
param->videoParam->mfx.NumRefFrame = 0; // use Media SDK default
param->videoParam->mfx.GopPicSize = 0; // use Media SDK default
param->videoParam->mfx.GopRefDist = 0; // use Media SDK default
// introduced in API 1.1
param->videoParam->AsyncDepth = AV_QSV_ASYNC_DEPTH_DEFAULT;
// introduced in API 1.3
param->videoParam->mfx.BRCParamMultiplier = 0; // no multiplier
// FrameInfo: set by video encoder, except PicStruct
param->videoParam->mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
// attach supported mfxExtBuffer structures to the mfxVideoParam
param->videoParam->NumExtParam = 0;
param->videoParam->ExtParam = param->ExtParamArray;
param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)¶m->codingOption;
param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)¶m->videoSignalInfo;
if (info->capabilities & HB_QSV_CAP_MSDK_API_1_6)
{
param->videoParam->ExtParam[param->videoParam->NumExtParam++] = (mfxExtBuffer*)¶m->codingOption2;
}
}
else
{
hb_error("hb_qsv_param_default: invalid pointer(s)");
return -1;
}
return 0;
}
const char* hb_qsv_frametype_name(uint16_t qsv_frametype)
{
if (qsv_frametype & MFX_FRAMETYPE_IDR)
{
return qsv_frametype & MFX_FRAMETYPE_REF ? "IDR (ref)" : "IDR";
}
else if (qsv_frametype & MFX_FRAMETYPE_I)
{
return qsv_frametype & MFX_FRAMETYPE_REF ? "I (ref)" : "I";
}
else if (qsv_frametype & MFX_FRAMETYPE_P)
{
return qsv_frametype & MFX_FRAMETYPE_REF ? "P (ref)" : "P";
}
else if (qsv_frametype & MFX_FRAMETYPE_B)
{
return qsv_frametype & MFX_FRAMETYPE_REF ? "B (ref)" : "B";
}
else
{
return "unknown";
}
}
uint8_t hb_qsv_frametype_xlat(uint16_t qsv_frametype, uint16_t *out_flags)
{
uint16_t flags = 0;
uint8_t frametype = 0;
if (qsv_frametype & MFX_FRAMETYPE_IDR)
{
frametype = HB_FRAME_IDR;
}
else if (qsv_frametype & MFX_FRAMETYPE_I)
{
frametype = HB_FRAME_I;
}
else if (qsv_frametype & MFX_FRAMETYPE_P)
{
frametype = HB_FRAME_P;
}
else if (qsv_frametype & MFX_FRAMETYPE_B)
{
frametype = HB_FRAME_B;
}
if (qsv_frametype & MFX_FRAMETYPE_REF)
{
flags |= HB_FRAME_REF;
}
if (out_flags != NULL)
{
*out_flags = flags;
}
return frametype;
}
int hb_qsv_impl_set_preferred(const char *name)
{
if (name == NULL)
{
return -1;
}
if (!strcasecmp(name, "software"))
{
if (qsv_software_info_avc.available)
{
hb_qsv_info_avc = &qsv_software_info_avc;
}
if (qsv_software_info_hevc.available)
{
hb_qsv_info_hevc = &qsv_software_info_hevc;
}
return 0;
}
if (!strcasecmp(name, "hardware"))
{
if (qsv_hardware_info_avc.available)
{
hb_qsv_info_avc = &qsv_hardware_info_avc;
}
if (qsv_hardware_info_hevc.available)
{
hb_qsv_info_hevc = &qsv_hardware_info_hevc;
}
return 0;
}
return -1;
}
const char* hb_qsv_impl_get_name(int impl)
{
switch (MFX_IMPL_BASETYPE(impl))
{
case MFX_IMPL_SOFTWARE:
return "software";
case MFX_IMPL_HARDWARE:
return "hardware (1)";
case MFX_IMPL_HARDWARE2:
return "hardware (2)";
case MFX_IMPL_HARDWARE3:
return "hardware (3)";
case MFX_IMPL_HARDWARE4:
return "hardware (4)";
case MFX_IMPL_HARDWARE_ANY:
return "hardware (any)";
case MFX_IMPL_AUTO:
return "automatic";
case MFX_IMPL_AUTO_ANY:
return "automatic (any)";
default:
return NULL;
}
}
void hb_qsv_force_workarounds()
{
qsv_software_info_avc.capabilities &= ~HB_QSV_CAP_MSDK_API_1_6;
qsv_hardware_info_avc.capabilities &= ~HB_QSV_CAP_MSDK_API_1_6;
qsv_software_info_hevc.capabilities &= ~HB_QSV_CAP_MSDK_API_1_6;
qsv_hardware_info_hevc.capabilities &= ~HB_QSV_CAP_MSDK_API_1_6;
}
#endif // USE_QSV
HandBrake-0.10.2/libhb/h265_common.h 0000664 0001752 0001752 00000003263 12470166275 017342 0 ustar handbrake handbrake /* h265_common.h
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#ifndef HB_H265_COMMON_H
#define HB_H265_COMMON_H
static const char * const hb_h265_tier_names[] = { "auto", "main", "high", NULL, };
static const char * const hb_h265_profile_names[] = { "auto", "main", "mainstillpicture", NULL, };
static const char * const hb_h265_level_names[] = { "auto", "1.0", "2.0", "2.1", "3.0", "3.1", "4.0", "4.1", "5.0", "5.1", "5.2", "6.0", "6.1", "6.2", NULL, };
static const int const hb_h265_level_values[] = { -1, 30, 60, 63, 90, 93, 120, 123, 150, 153, 156, 180, 183, 186, 0, };
// stolen from libx265's x265.h
static const char * const hb_h265_fullrange_names[] = { "limited", "full", NULL, };
static const char * const hb_h265_vidformat_names[] = { "component", "pal", "ntsc", "secam", "mac", "undef", NULL, };
static const char * const hb_h265_colorprim_names[] = { "", "bt709", "undef", "", "bt470m", "bt470bg", "smpte170m", "smpte240m", "film", "bt2020", NULL, };
static const char * const hb_h265_transfer_names[] = { "", "bt709", "undef", "", "bt470m", "bt470bg", "smpte170m", "smpte240m", "linear", "log100", "log316", "iec61966-2-4", "bt1361e", "iec61966-2-1", "bt2020-10", "bt2020-12", NULL, };
static const char * const hb_h265_colmatrix_names[] = { "GBR", "bt709", "undef", "", "fcc", "bt470bg", "smpte170m", "smpte240m", "YCgCo", "bt2020nc", "bt2020c", NULL, };
#endif //HB_H265_COMMON_H
HandBrake-0.10.2/test/ 0000775 0001752 0001752 00000000000 12535641635 015030 5 ustar handbrake handbrake HandBrake-0.10.2/test/parsecsv.c 0000664 0001752 0001752 00000011224 12463330511 017006 0 ustar handbrake handbrake /* parsecsv.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include "hb.h"
#include "parsecsv.h"
/* Internal declarations */
#define is_newline(_x) ( (_x) == 13 || \
(_x) == 11 || \
(_x) == 10 )
#define is_white(_x) ( (_x) == '\t' || \
(_x) == ' ' || \
is_newline(_x) )
#define is_sep(_x) ( (_x) == ',' )
#define is_esc(_x) ( (_x) == '\\' )
#define CSV_CHAR_ERROR 0x8000
#define CSV_CHAR_EOF 0x4000
#define CSV_CHAR_ROWSEP 0x2000
#define CSV_CHAR_COLSEP 0x1000
#define CSV_PARSE_NORMAL 0x0000
#define CSV_PARSE_SEEK 0x0001
#define CSV_PARSE_ESC 0x0002
static uint16_t hb_parse_character( hb_csv_file_t * file );
static void hb_trim_end( char *text );
/* Open a CSV File */
hb_csv_file_t *hb_open_csv_file( const char *filepath )
{
hb_csv_file_t *file = NULL;
FILE * fileref;
if( filepath == NULL )
{
return file;
}
fileref = hb_fopen(filepath, "r");
if( fileref == NULL )
{
return file;
}
file = malloc( sizeof( hb_csv_file_t ) );
if( file == NULL )
{
return file;
}
file->fileref = fileref;
file->eof = 0;
file->parse_state = CSV_PARSE_SEEK;
file->curr_col = 0;
file->curr_row = 0;
return file;
}
void hb_close_csv_file( hb_csv_file_t *file )
{
if( file == NULL )
{
return;
}
fclose( file->fileref );
free( file );
}
/* Parse CSV Cells */
hb_csv_cell_t *hb_read_next_cell( hb_csv_file_t *file )
{
hb_csv_cell_t *cell = NULL;
uint16_t c;
int index;
if( file == NULL )
{
return cell;
}
if( file->eof )
{
return cell;
}
cell = malloc( sizeof( hb_csv_cell_t ) );
if( cell == NULL )
{
return cell;
}
cell->cell_row = file->curr_row;
cell->cell_col = file->curr_col;
index = 0;
while( CSV_CHAR_EOF != (c = hb_parse_character( file ) ) )
{
if( c == CSV_CHAR_ROWSEP )
{
file->curr_row++;
file->curr_col = 0;
break;
}
else if( c == CSV_CHAR_COLSEP )
{
file->curr_col++;
break;
}
else
{
if( index < 1023 )
{
cell->cell_text[index] = (char)c;
index++;
}
}
}
if( c == CSV_CHAR_EOF )
{
file->eof = 1;
}
/* Terminate the cell text */
cell->cell_text[index] = '\0';
hb_trim_end( cell->cell_text );
return cell;
}
void hb_dispose_cell( hb_csv_cell_t *cell )
{
if( cell == NULL )
{
return;
}
free( cell );
}
/* Raw parsing */
static uint16_t hb_parse_character( hb_csv_file_t * file )
{
int byte;
uint16_t c = 0;
int need_char = 1;
if( file == NULL )
{
return CSV_CHAR_ERROR;
}
while( need_char )
{
byte = fgetc( file->fileref );
if( feof( file->fileref ) )
{
return CSV_CHAR_EOF;
}
if( ferror( file->fileref ) )
{
return CSV_CHAR_ERROR;
}
if( file->parse_state == CSV_PARSE_SEEK && is_white(byte) )
{
continue;
}
else if( file->parse_state != CSV_PARSE_ESC && is_esc(byte) )
{
file->parse_state = CSV_PARSE_ESC;
continue;
}
else if( file->parse_state != CSV_PARSE_ESC && is_sep(byte) )
{
file->parse_state = CSV_PARSE_SEEK;
need_char = 0;
c = CSV_CHAR_COLSEP;
}
else if( file->parse_state == CSV_PARSE_ESC )
{
file->parse_state = CSV_PARSE_NORMAL;
need_char = 0;
c = (uint16_t)byte;
}
else if( is_newline(byte) )
{
file->parse_state = CSV_PARSE_SEEK;
need_char = 0;
c = CSV_CHAR_ROWSEP;
}
else
{
file->parse_state = CSV_PARSE_NORMAL;
need_char = 0;
c = (uint16_t)byte;
}
}
return c;
}
static void hb_trim_end( char *text )
{
if( text == NULL )
{
return;
}
int i = strlen(text) - 1;
for( i = strlen(text) - 1; i >= 0 && is_white(text[i]) ; i-- )
{
text[i] = '\0';
}
}
HandBrake-0.10.2/test/fakexcode.cpp 0000664 0001752 0001752 00000000041 11026035545 017447 0 ustar handbrake handbrake /*
* Force Xcode to use g++
*/
HandBrake-0.10.2/test/module.defs 0000664 0001752 0001752 00000003463 12417602031 017150 0 ustar handbrake handbrake $(eval $(call import.MODULE.defs,TEST,test,LIBHB))
$(eval $(call import.GCC,TEST))
TEST.src/ = $(SRC/)test/
TEST.build/ = $(BUILD/)test/
TEST.c = $(wildcard $(TEST.src/)*.c)
TEST.c.o = $(patsubst $(SRC/)%.c,$(BUILD/)%.o,$(TEST.c))
TEST.exe = $(BUILD/)$(call TARGET.exe,$(HB.name)CLI)
TEST.GCC.L = $(CONTRIB.build/)lib
TEST.libs = $(LIBHB.a)
TEST.GCC.l = \
ass avcodec avformat avutil avresample dvdnav dvdread \
fontconfig fribidi mp3lame ogg \
samplerate swscale vpx theoraenc theoradec vorbis vorbisenc x264 \
bluray freetype xml2 bz2 z
ifeq (1,$(FEATURE.qsv))
TEST.GCC.D += USE_QSV HAVE_THREADS=1
endif
ifeq (1,$(FEATURE.x265))
TEST.GCC.D += USE_X265
endif
TEST.GCC.l += $(foreach m,$(MODULES.NAMES),$($m.OSL.libs))
TEST.install.exe = $(DESTDIR)$(PREFIX/)bin/$(notdir $(TEST.exe))
###############################################################################
TEST.out += $(TEST.c.o)
TEST.out += $(TEST.exe)
BUILD.out += $(TEST.out)
BUILD.out += $(TEST.install.exe)
###############################################################################
TEST.GCC.I += $(LIBHB.GCC.I)
ifeq ($(BUILD.system),darwin)
TEST.GCC.f += IOKit CoreServices AudioToolbox
TEST.GCC.l += iconv
else ifeq ($(BUILD.system),linux)
TEST.GCC.l += pthread dl m
else ifeq ($(BUILD.system),kfreebsd)
TEST.GCC.l += pthread dl m
else ifeq ($(BUILD.system),solaris)
TEST.GCC.l += pthread nsl socket iconv
TEST.GCC.D += _POSIX_C_SOURCE=200112L __EXTENSIONS__
else ifeq (1-mingw,$(BUILD.cross)-$(BUILD.system))
ifeq ($(HAS.dlfcn),1)
TEST.GCC.l += dl
endif
ifeq (1,$(FEATURE.hwd))
TEST.GCC.D += USE_HWD
endif
TEST.GCC.l += pthreadGC2 iconv ws2_32 regex
TEST.GCC.D += PTW32_STATIC_LIB
TEST.GCC.args.extra.exe++ += -static
endif # (1-mingw,$(BUILD.cross)-$(BUILD.system))
HandBrake-0.10.2/test/module.rules 0000664 0001752 0001752 00000001673 11360657735 017403 0 ustar handbrake handbrake $(eval $(call import.MODULE.rules,TEST))
test.build: $(TEST.exe)
$(TEST.exe): | $(dir $(TEST.exe))
$(TEST.exe): $(TEST.c.o)
$(call TEST.GCC.EXE++,$@,$^ $(TEST.libs))
$(TEST.c.o): $(LIBHB.a)
$(TEST.c.o): | $(dir $(TEST.c.o))
$(TEST.c.o): $(BUILD/)%.o: $(SRC/)%.c
$(call TEST.GCC.C_O,$@,$<)
test.clean:
$(RM.exe) -f $(TEST.out)
###############################################################################
build: test.build
clean: test.clean
###############################################################################
## skip install/uninstall on darwin
ifneq ($(BUILD.system),darwin)
test.install-strip: | $(dir $(TEST.install.exe))
$(CP.exe) $(TEST.exe) $(TEST.install.exe)
$(STRIP.exe) $(TEST.install.exe)
test.install: | $(dir $(TEST.install.exe))
$(CP.exe) $(TEST.exe) $(TEST.install.exe)
test.uninstall:
$(RM.exe) -f $(TEST.install.exe)
install-strip: test.install-strip
install: test.install
uninstall: test.uninstall
endif
HandBrake-0.10.2/test/test.c 0000664 0001752 0001752 00000534351 12463330511 016152 0 ustar handbrake handbrake /* test.c
Copyright (c) 2003-2015 HandBrake Team
This file is part of the HandBrake source code
Homepage: .
It may be used under the terms of the GNU General Public License v2.
For full terms see the file COPYING file or visit http://www.gnu.org/licenses/gpl-2.0.html
*/
#include
#include
#include
#include
#include
#include
#include
#include
#if defined( __MINGW32__ )
#include
#include
#endif
#if defined( PTW32_STATIC_LIB )
#include
#endif
#include "hb.h"
#include "lang.h"
#include "parsecsv.h"
#include "openclwrapper.h"
#ifdef USE_QSV
#include "qsv_common.h"
#endif
#if defined( __APPLE_CC__ )
#import
#include
#include
#include
#endif
/* Options */
static int debug = HB_DEBUG_ALL;
static int update = 0;
static int dvdnav = 1;
static char * input = NULL;
static char * output = NULL;
static char * format = NULL;
static int titleindex = 1;
static int titlescan = 0;
static int main_feature = 0;
static char * native_language = NULL;
static int native_dub = 0;
static int twoPass = 0;
static int deinterlace = 0;
static char * deinterlace_opt = 0;
static int deblock = 0;
static char * deblock_opt = 0;
static int denoise = 0;
static char * denoise_opt = 0;
static int nlmeans = 0;
static char * nlmeans_opt = NULL;
static char * nlmeans_tune_opt = NULL;
static int detelecine = 0;
static char * detelecine_opt = 0;
static int decomb = 0;
static char * decomb_opt = 0;
static int rotate = 0;
static char * rotate_opt = 0;
static int rotate_val = 0;
static int grayscale = 0;
static int vcodec = HB_VCODEC_FFMPEG_MPEG4;
static hb_list_t * audios = NULL;
static hb_audio_config_t * audio = NULL;
static int num_audio_tracks = 0;
static int allowed_audio_copy = -1;
static char * mixdowns = NULL;
static char * dynamic_range_compression = NULL;
static char * audio_gain = NULL;
static char ** audio_dither = NULL;
static char ** normalize_mix_level = NULL;
static char * atracks = NULL;
static char * arates = NULL;
static char ** abitrates = NULL;
static char ** aqualities = NULL;
static char ** acompressions = NULL;
static char * acodec_fallback = NULL;
static char * acodecs = NULL;
static char ** anames = NULL;
static int audio_explicit = 0;
static char ** subtracks = NULL;
static char ** subforce = NULL;
static char * subburn = NULL;
static char * subdefault = NULL;
static char ** srtfile = NULL;
static char ** srtcodeset = NULL;
static char ** srtoffset = NULL;
static char ** srtlang = NULL;
static int srtdefault = -1;
static int srtburn = -1;
static int subtitle_scan = 0;
static int width = 0;
static int height = 0;
static int crop[4] = { -1,-1,-1,-1 };
static int loose_crop = -1;
static int vrate = 0;
static float vquality = -1.0;
static int vbitrate = 0;
static int mux = 0;
static int anamorphic_mode = 0;
static int modulus = 0;
static int par_height = 0;
static int par_width = 0;
static int display_width = 0;
static int keep_display_aspect = 0;
static int itu_par = 0;
static int angle = 0;
static int chapter_start = 0;
static int chapter_end = 0;
static int chapter_markers = 0;
static char * marker_file = NULL;
static char * x264_preset = NULL;
static char * x264_tune = NULL;
static char * advanced_opts = NULL;
static char * h264_profile = NULL;
static char * h264_level = NULL;
static int maxHeight = 0;
static int maxWidth = 0;
static int turbo_opts_enabled = 0;
static int largeFileSize = 0;
static int preset = 0;
static char * preset_name = 0;
static int cfr = 0;
static int mp4_optimize = 0;
static int ipod_atom = 0;
static int color_matrix_code = 0;
static int preview_count = 10;
static int store_previews = 0;
static int start_at_preview = 0;
static int64_t start_at_pts = 0;
static int start_at_frame = 0;
static int64_t stop_at_pts = 0;
static int stop_at_frame = 0;
static uint64_t min_title_duration = 10;
static int use_opencl = 0;
static int use_hwd = 0;
#ifdef USE_QSV
static int qsv_async_depth = -1;
static int qsv_decode = 1;
#endif
/* Exit cleanly on Ctrl-C */
static volatile hb_error_code done_error = HB_ERROR_NONE;
static volatile int die = 0;
static void SigHandler( int );
/* Utils */
static void ShowHelp();
static void ShowPresets();
static void ShowCommands()
{
fprintf(stdout, "\nCommands:\n");
fprintf(stdout, " [h]elp Show this message\n");
fprintf(stdout, " [q]uit Exit HandBrakeCLI\n");
fprintf(stdout, " [p]ause Pause encoding\n");
fprintf(stdout, " [r]esume Resume encoding\n");
}
static int ParseOptions( int argc, char ** argv );
static int CheckOptions( int argc, char ** argv );
static int HandleEvents( hb_handle_t * h );
static void str_vfree( char **strv );
static char** str_split( char *str, char delem );
static void print_string_list(FILE *out, const char* const *list, const char *prefix);
#ifdef __APPLE_CC__
static char* bsd_name_for_path(char *path);
static int device_is_dvd(char *device);
static io_service_t get_iokit_service( char *device );
static int is_dvd_service( io_service_t service );
static int is_whole_media_service( io_service_t service );
#endif
/* Only print the "Muxing..." message once */
static int show_mux_warning = 1;
/****************************************************************************
* hb_error_handler
*
* When using the CLI just display using hb_log as we always did in the past
* make sure that we prefix with a nice ERROR message to catch peoples eyes.
****************************************************************************/
static void hb_cli_error_handler ( const char *errmsg )
{
fprintf( stderr, "ERROR: %s\n", errmsg );
}
static int get_argv_utf8(int *argc_ptr, char ***argv_ptr)
{
#if defined( __MINGW32__ )
int ret = 0;
int argc;
char **argv;
wchar_t **argv_utf16 = CommandLineToArgvW(GetCommandLineW(), &argc);
if (argv_utf16)
{
int i;
int offset = (argc+1) * sizeof(char*);
int size = offset;
for(i = 0; i < argc; i++)
size += WideCharToMultiByte(CP_UTF8, 0, argv_utf16[i], -1, NULL, 0, NULL, NULL );
argv = malloc(size);
if (argv)
{
for (i = 0; i < argc; i++)
{
argv[i] = (char*)argv + offset;
offset += WideCharToMultiByte(CP_UTF8, 0, argv_utf16[i], -1, argv[i], size-offset, NULL, NULL);
}
argv[argc] = NULL;
ret = 1;
}
LocalFree(argv_utf16);
}
if (ret)
{
*argc_ptr = argc;
*argv_ptr = argv;
}
return ret;
#else
// On other systems, assume command line is already utf8
return 1;
#endif
}
int main( int argc, char ** argv )
{
hb_handle_t * h;
int build;
char * version;
hb_global_init();
audios = hb_list_init();
// Get utf8 command line if windows
get_argv_utf8(&argc, &argv);
/* Parse command line */
if( ParseOptions( argc, argv ) ||
CheckOptions( argc, argv ) )
{
return 1;
}
/* Register our error handler */
hb_register_error_handler(&hb_cli_error_handler);
/* Init libhb */
h = hb_init( debug, update );
hb_dvd_set_dvdnav( dvdnav );
/* Show version */
fprintf( stderr, "%s - %s - %s\n",
HB_PROJECT_TITLE, HB_PROJECT_BUILD_TITLE, HB_PROJECT_URL_WEBSITE );
/* Check for update */
if( update )
{
if( ( build = hb_check_update( h, &version ) ) > -1 )
{
fprintf( stderr, "You are using an old version of "
"HandBrake.\nLatest is %s (build %d).\n", version,
build );
}
else
{
fprintf( stderr, "Your version of HandBrake is up to "
"date.\n" );
}
hb_close( &h );
hb_global_close();
return 0;
}
/* Geeky */
fprintf( stderr, "%d CPU%s detected\n", hb_get_cpu_count(),
hb_get_cpu_count( h ) > 1 ? "s" : "" );
/* Exit ASAP on Ctrl-C */
signal( SIGINT, SigHandler );
/* Feed libhb with a DVD to scan */
fprintf( stderr, "Opening %s...\n", input );
if (main_feature) {
/*
* We need to scan for all the titles in order to find the main feature
*/
titleindex = 0;
}
hb_system_sleep_prevent(h);
hb_gui_use_hwd_flag = use_hwd;
hb_scan( h, input, titleindex, preview_count, store_previews, min_title_duration * 90000LL );
/* Wait... */
while( !die )
{
#if defined( __MINGW32__ )
if( _kbhit() ) {
switch( _getch() )
{
case 0x03: /* ctrl-c */
case 'q':
fprintf( stdout, "\nEncoding Quit by user command\n" );
done_error = HB_ERROR_CANCELED;
die = 1;
break;
case 'p':
fprintf(stdout,
"\nEncoding Paused by user command, 'r' to resume\n");
hb_pause(h);
hb_system_sleep_allow(h);
break;
case 'r':
hb_system_sleep_prevent(h);
hb_resume(h);
break;
case 'h':
ShowCommands();
break;
}
}
hb_snooze( 200 );
#elif !defined(SYS_BEOS)
fd_set fds;
struct timeval tv;
int ret;
char buf[257];
tv.tv_sec = 0;
tv.tv_usec = 100000;
FD_ZERO( &fds );
FD_SET( STDIN_FILENO, &fds );
ret = select( STDIN_FILENO + 1, &fds, NULL, NULL, &tv );
if( ret > 0 )
{
int size = 0;
while( size < 256 &&
read( STDIN_FILENO, &buf[size], 1 ) > 0 )
{
if( buf[size] == '\n' )
{
break;
}
size++;
}
if( size >= 256 || buf[size] == '\n' )
{
switch( buf[0] )
{
case 'q':
fprintf( stdout, "\nEncoding Quit by user command\n" );
done_error = HB_ERROR_CANCELED;
die = 1;
break;
case 'p':
fprintf(stdout,
"\nEncoding Paused by user command, 'r' to resume\n");
hb_pause(h);
hb_system_sleep_allow(h);
break;
case 'r':
hb_system_sleep_prevent(h);
hb_resume(h);
break;
case 'h':
ShowCommands();
break;
}
}
}
hb_snooze( 200 );
#else
hb_snooze( 200 );
#endif
HandleEvents( h );
}
/* Clean up */
hb_close(&h);
hb_global_close();
if (audios != NULL)
{
while ((audio = hb_list_item(audios, 0)) != NULL)
{
hb_list_rem(audios, audio);
if (audio->out.name != NULL)
{
free(audio->out.name);
}
free(audio);
}
hb_list_close(&audios);
}
str_vfree(abitrates);
str_vfree(acompressions);
str_vfree(aqualities);
str_vfree(audio_dither);
free(acodecs);
free(arates);
free(atracks);
free(audio_gain);
free(dynamic_range_compression);
free(mixdowns);
free(native_language);
free(format);
free(input);
free(output);
free(preset_name);
free(x264_preset);
free(x264_tune);
free(advanced_opts);
free(h264_profile);
free(h264_level);
free(nlmeans_opt);
free(nlmeans_tune_opt);
// write a carriage return to stdout
// avoids overlap / line wrapping when stderr is redirected
fprintf(stdout, "\n");
fprintf(stderr, "HandBrake has exited.\n");
return done_error;
}
static void PrintTitleInfo( hb_title_t * title, int feature )
{
hb_chapter_t * chapter;
hb_subtitle_t * subtitle;
int i;
fprintf( stderr, "+ title %d:\n", title->index );
if ( title->index == feature )
{
fprintf( stderr, " + Main Feature\n" );
}
if ( title->type == HB_STREAM_TYPE || title->type == HB_FF_STREAM_TYPE )
{
fprintf( stderr, " + stream: %s\n", title->path );
}
else if ( title->type == HB_DVD_TYPE )
{
fprintf( stderr, " + vts %d, ttn %d, cells %d->%d (%"PRIu64" blocks)\n",
title->vts, title->ttn, title->cell_start, title->cell_end,
title->block_count );
}
else if( title->type == HB_BD_TYPE )
{
fprintf( stderr, " + playlist: %05d.MPLS\n", title->playlist );
}
if (title->angle_count > 1)
fprintf( stderr, " + angle(s) %d\n", title->angle_count );
fprintf( stderr, " + duration: %02d:%02d:%02d\n",
title->hours, title->minutes, title->seconds );
fprintf( stderr, " + size: %dx%d, pixel aspect: %d/%d, display aspect: %.2f, %.3f fps\n",
title->width, title->height,
title->pixel_aspect_width,
title->pixel_aspect_height,
(float) title->aspect,
(float) title->rate / title->rate_base );
fprintf( stderr, " + autocrop: %d/%d/%d/%d\n", title->crop[0],
title->crop[1], title->crop[2], title->crop[3] );
fprintf(stderr, " + support opencl: %s\n", title->opencl_support ? "yes" : "no");
#ifdef USE_HWD
fprintf(stderr, " + support hwd: %s\n", title->hwd_support ? "yes" : "no");
#else
fprintf(stderr, " + support hwd: not built-in\n");
#endif
fprintf( stderr, " + chapters:\n" );
for( i = 0; i < hb_list_count( title->list_chapter ); i++ )
{
chapter = hb_list_item( title->list_chapter, i );
fprintf( stderr, " + %d: cells %d->%d, %"PRIu64" blocks, duration "
"%02d:%02d:%02d\n", chapter->index,
chapter->cell_start, chapter->cell_end,
chapter->block_count, chapter->hours, chapter->minutes,
chapter->seconds );
}
fprintf( stderr, " + audio tracks:\n" );
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
{
audio = hb_list_audio_config_item( title->list_audio, i );
if( ( audio->in.codec == HB_ACODEC_AC3 ) || ( audio->in.codec == HB_ACODEC_DCA) )
{
fprintf( stderr, " + %d, %s (iso639-2: %s), %dHz, %dbps\n",
i + 1,
audio->lang.description,
audio->lang.iso639_2,
audio->in.samplerate,
audio->in.bitrate );
}
else
{
fprintf( stderr, " + %d, %s (iso639-2: %s)\n",
i + 1,
audio->lang.description,
audio->lang.iso639_2 );
}
}
fprintf( stderr, " + subtitle tracks:\n" );
for( i = 0; i < hb_list_count( title->list_subtitle ); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
fprintf( stderr, " + %d, %s (iso639-2: %s) (%s)(%s)\n",
i + 1, subtitle->lang,
subtitle->iso639_2,
(subtitle->format == TEXTSUB) ? "Text" : "Bitmap",
hb_subsource_name(subtitle->source));
}
if(title->detected_interlacing)
{
/* Interlacing was found in half or more of the preview frames */
fprintf( stderr, " + combing detected, may be interlaced or telecined\n");
}
}
static void PrintTitleSetInfo( hb_title_set_t * title_set )
{
int i;
hb_title_t * title;
for( i = 0; i < hb_list_count( title_set->list_title ); i++ )
{
title = hb_list_item( title_set->list_title, i );
PrintTitleInfo( title, title_set->feature );
}
}
static int test_sub_list( char ** list, int pos )
{
int i;
if ( list == NULL || pos == 0 )
return 0;
if ( list[0] == NULL && pos == 1 )
return 1;
for ( i = 0; list[i] != NULL; i++ )
{
int idx = strtol( list[i], NULL, 0 );
if ( idx == pos )
return 1;
}
return 0;
}
static int cmp_lang( char * lang, const char * code )
{
iso639_lang_t * iso639;
iso639 = lang_for_code2( code );
if ( iso639 == NULL )
return 0;
if ( iso639->eng_name && !strcasecmp( lang, iso639->eng_name ) )
return 1;
if ( iso639->native_name && !strcasecmp( lang, iso639->native_name ) )
return 1;
if ( iso639->iso639_1 && !strcasecmp( lang, iso639->iso639_1 ) )
return 1;
if ( iso639->iso639_2 && !strcasecmp( lang, iso639->iso639_2 ) )
return 1;
if ( iso639->iso639_2b && !strcasecmp( lang, iso639->iso639_2b ) )
return 1;
return 0;
}
static void apply_loose_crop(int total, int * v1, int * v2, int mod, int max)
{
/* number of extra pixels which must be cropped to reach next modulus */
int add = (total - *v1 - *v2) % mod;
if (add)
{
/* number of pixels which must be uncropped to reach previous modulus */
int sub = mod - add;
/* less than maximum (or can't reduce), increase the crop size */
if (add <= max || sub > (*v1 + *v2))
{
int add1 = add / 2;
if ((*v1 + add1) & 1) // avoid odd crop if possible
++add1;
int add2 = (add - add1);
*v1 += add1;
*v2 += add2;
}
/* more than maximum, reduce the crop size instead */
else
{
int sub1 = sub / 2;
if (sub1 > *v1)
sub1 = *v1;
else if ((*v1 - sub1) & 1) // avoid odd crop if possible
++sub1;
int sub2 = sub - sub1;
if (sub2 > *v2)
{
sub1 += (sub2 - *v2);
if ((*v1 - sub1) & 1) // avoid odd crop if possible
++sub1;
sub2 = sub - sub1;
}
*v1 -= sub1;
*v2 -= sub2;
}
}
}
static int HandleEvents( hb_handle_t * h )
{
hb_state_t s;
const hb_encoder_t *encoder;
int tmp_num_audio_tracks;
int filter_cfr, filter_vrate, filter_vrate_base;
hb_get_state( h, &s );
switch( s.state )
{
case HB_STATE_IDLE:
/* Nothing to do */
break;
#define p s.param.scanning
case HB_STATE_SCANNING:
/* Show what title is currently being scanned */
if (p.preview_cur)
{
fprintf(stderr, "\rScanning title %d of %d, preview %d, %.2f %%",
p.title_cur, p.title_count, p.preview_cur, 100 * p.progress);
}
else
{
fprintf(stderr, "\rScanning title %d of %d, %.2f %%",
p.title_cur, p.title_count, 100 * p.progress);
}
fflush(stderr);
break;
#undef p
case HB_STATE_SCANDONE:
{
hb_title_set_t * title_set;
hb_title_t * title;
hb_job_t * job;
int i;
int sub_burned = 0;
/* Audio argument string parsing variables */
int acodec = 0;
int abitrate = 0;
float aquality = 0;
float acompression = 0;
int arate = 0;
int mixdown = HB_AMIXDOWN_DOLBYPLII;
double d_r_c = 0;
double gain = 0;
/* Audio argument string parsing variables */
title_set = hb_get_title_set( h );
if( !title_set || !hb_list_count( title_set->list_title ) )
{
/* No valid title, stop right there */
fprintf( stderr, "No title found.\n" );
done_error = HB_ERROR_WRONG_INPUT;
die = 1;
break;
}
if( main_feature )
{
int i;
int main_feature_idx=0;
int main_feature_pos=-1;
int main_feature_time=0;
int title_time;
fprintf( stderr, "Searching for main feature title...\n" );
for( i = 0; i < hb_list_count( title_set->list_title ); i++ )
{
title = hb_list_item( title_set->list_title, i );
title_time = (title->hours*60*60 ) + (title->minutes *60) + (title->seconds);
fprintf( stderr, " + Title (%d) index %d has length %dsec\n",
i, title->index, title_time );
if( main_feature_time < title_time )
{
main_feature_time = title_time;
main_feature_pos = i;
main_feature_idx = title->index;
}
if( title_set->feature == title->index )
{
main_feature_time = title_time;
main_feature_pos = i;
main_feature_idx = title->index;
break;
}
}
if( main_feature_pos == -1 )
{
fprintf( stderr, "No main feature title found.\n" );
done_error = HB_ERROR_WRONG_INPUT;
die = 1;
break;
}
titleindex = main_feature_idx;
fprintf( stderr, "Found main feature title, setting title to %d\n",
main_feature_idx);
title = hb_list_item( title_set->list_title, main_feature_pos);
} else {
title = hb_list_item( title_set->list_title, 0 );
}
if( !titleindex || titlescan )
{
/* Scan-only mode, print infos and exit */
PrintTitleSetInfo( title_set );
die = 1;
break;
}
PrintTitleInfo( title, title_set->feature );
/* Set job settings */
job = hb_job_init(title);
filter_cfr = job->cfr;
filter_vrate = job->vrate;
filter_vrate_base = job->vrate_base;
if( chapter_start && chapter_end && !stop_at_pts && !start_at_preview && !stop_at_frame && !start_at_pts && !start_at_frame )
{
job->chapter_start = MAX( job->chapter_start,
chapter_start );
job->chapter_end = MIN( job->chapter_end,
chapter_end );
job->chapter_end = MAX( job->chapter_start,
job->chapter_end );
}
if ( angle )
{
job->angle = angle;
}
if (preset)
{
fprintf( stderr, "+ Using preset: %s\n", preset_name);
if (!strcasecmp(preset_name, "Universal"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
vcodec = HB_VCODEC_X264;
job->vquality = 20.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1,1");
}
if( !acodecs )
{
acodecs = strdup("ffaac,copy:ac3");
}
if( !abitrates )
{
abitrates = str_split("160,160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2,none");
}
if( !arates )
{
arates = strdup("Auto,Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0,0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 720;
maxHeight = 576;
if (x264_preset == NULL)
{
x264_preset = strdup("fast");
}
if (h264_profile == NULL)
{
h264_profile = strdup("baseline");
}
if (h264_level == NULL)
{
h264_level = strdup("3.0");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "iPod"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
job->ipod_atom = 1;
vcodec = HB_VCODEC_X264;
job->vquality = 22.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1");
}
if( !acodecs )
{
acodecs = strdup("ffaac");
}
if( !abitrates )
{
abitrates = str_split("160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2");
}
if( !arates )
{
arates = strdup("Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 320;
maxHeight = 240;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("baseline");
}
if (h264_level == NULL)
{
h264_level = strdup("1.3");
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "iPhone & iPod touch"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
job->largeFileSize = 1;
vcodec = HB_VCODEC_X264;
job->vquality = 22.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1");
}
if( !acodecs )
{
acodecs = strdup("ffaac");
}
if( !abitrates )
{
abitrates = str_split("160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2");
}
if( !arates )
{
arates = strdup("Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 960;
maxHeight = 640;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("high");
}
if (h264_level == NULL)
{
h264_level = strdup("3.1");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "iPad"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
job->largeFileSize = 1;
vcodec = HB_VCODEC_X264;
job->vquality = 20.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1");
}
if( !acodecs )
{
acodecs = strdup("ffaac");
}
if( !abitrates )
{
abitrates = str_split("160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2");
}
if( !arates )
{
arates = strdup("Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 1280;
maxHeight = 720;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("high");
}
if (h264_level == NULL)
{
h264_level = strdup("3.1");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "AppleTV"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
job->largeFileSize = 1;
vcodec = HB_VCODEC_X264;
job->vquality = 20.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1,1");
}
if( !acodecs )
{
acodecs = strdup("ffaac,copy:ac3");
}
if( !abitrates )
{
abitrates = str_split("160,160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2,none");
}
if( !arates )
{
arates = strdup("Auto,Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0,0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 960;
maxHeight = 720;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("high");
}
if (h264_level == NULL)
{
h264_level = strdup("3.1");
}
if (advanced_opts == NULL)
{
advanced_opts = strdup("qpmin=4:cabac=0:ref=2:b-pyramid=none:weightb=0:weightp=0:vbv-maxrate=9500:vbv-bufsize=9500");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "AppleTV 2"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
job->largeFileSize = 1;
vcodec = HB_VCODEC_X264;
job->vquality = 20.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1,1");
}
if( !acodecs )
{
acodecs = strdup("ffaac,copy:ac3");
}
if( !abitrates )
{
abitrates = str_split("160,160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2,none");
}
if( !arates )
{
arates = strdup("Auto,Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0,0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 1280;
maxHeight = 720;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("high");
}
if (h264_level == NULL)
{
h264_level = strdup("3.1");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "AppleTV 3"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
job->largeFileSize = 1;
vcodec = HB_VCODEC_X264;
job->vquality = 20.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1,1");
}
if( !acodecs )
{
acodecs = strdup("ffaac,copy:ac3");
}
if( !abitrates )
{
abitrates = str_split("160,160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2,none");
}
if( !arates )
{
arates = strdup("Auto,Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0,0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 1920;
maxHeight = 1080;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("high");
}
if (h264_level == NULL)
{
h264_level = strdup("4.0");
}
decomb = 1;
decomb_opt = "7:2:6:9:1:80";
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "Android"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
vcodec = HB_VCODEC_X264;
job->vquality = 22.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1");
}
if( !acodecs )
{
acodecs = strdup("ffaac");
}
if( !abitrates )
{
abitrates = str_split("128", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2");
}
if( !arates )
{
arates = strdup("Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 720;
maxHeight = 576;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("main");
}
if (h264_level == NULL)
{
h264_level = strdup("3.0");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
}
if (!strcasecmp(preset_name, "Android Tablet"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
vcodec = HB_VCODEC_X264;
job->vquality = 22.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1");
}
if( !acodecs )
{
acodecs = strdup("ffaac");
}
if( !abitrates )
{
abitrates = str_split("128", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2");
}
if( !arates )
{
arates = strdup("Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 1280;
maxHeight = 720;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("main");
}
if (h264_level == NULL)
{
h264_level = strdup("3.1");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
}
if (!strcasecmp(preset_name, "Windows Phone 8"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
vcodec = HB_VCODEC_X264;
job->vquality = 22.0;
filter_vrate_base = 900000;
filter_cfr = 2;
if( !atracks )
{
atracks = strdup("1");
}
if( !acodecs )
{
acodecs = strdup("ffaac");
}
if( !abitrates )
{
abitrates = str_split("128", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2");
}
if( !arates )
{
arates = strdup("Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
maxWidth = 1280;
maxHeight = 720;
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("main");
}
if (h264_level == NULL)
{
h264_level = strdup("3.1");
}
if( !anamorphic_mode )
{
anamorphic_mode = 0;
}
modulus = 2;
}
if (!strcasecmp(preset_name, "Normal"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
vcodec = HB_VCODEC_X264;
job->vquality = 20.0;
if( !atracks )
{
atracks = strdup("1");
}
if( !acodecs )
{
acodecs = strdup("ffaac");
}
if( !abitrates )
{
abitrates = str_split("160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2");
}
if( !arates )
{
arates = strdup("Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
if (x264_preset == NULL)
{
x264_preset = strdup("veryfast");
}
if (h264_profile == NULL)
{
h264_profile = strdup("main");
}
if (h264_level == NULL)
{
h264_level = strdup("4.0");
}
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
if (!strcasecmp(preset_name, "High Profile"))
{
if( !mux )
{
mux = HB_MUX_MP4;
}
job->largeFileSize = 1;
vcodec = HB_VCODEC_X264;
job->vquality = 20.0;
if( !atracks )
{
atracks = strdup("1,1");
}
if( !acodecs )
{
acodecs = strdup("ffaac,copy:ac3");
}
if( !abitrates )
{
abitrates = str_split("160,160", ',');
}
if( !mixdowns )
{
mixdowns = strdup("dpl2,none");
}
if( !arates )
{
arates = strdup("Auto,Auto");
}
if( !dynamic_range_compression )
{
dynamic_range_compression = strdup("0.0,0.0");
}
if( allowed_audio_copy == -1 )
{
allowed_audio_copy = 0;
allowed_audio_copy |= HB_ACODEC_AAC_PASS;
allowed_audio_copy |= HB_ACODEC_AC3_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_HD_PASS;
allowed_audio_copy |= HB_ACODEC_DCA_PASS;
allowed_audio_copy |= HB_ACODEC_MP3_PASS;
allowed_audio_copy &= HB_ACODEC_PASS_MASK;
}
if( acodec_fallback == NULL )
{
acodec_fallback = "ffac3";
}
if (x264_preset == NULL)
{
x264_preset = strdup("medium");
}
if (h264_profile == NULL)
{
h264_profile = strdup("high");
}
if (h264_level == NULL)
{
h264_level = strdup("4.1");
}
decomb = 1;
if( !anamorphic_mode )
{
anamorphic_mode = 2;
}
modulus = 2;
job->chapter_markers = 1;
}
}
if ( chapter_markers )
{
job->chapter_markers = chapter_markers;
if( marker_file != NULL )
{
hb_csv_file_t * file = hb_open_csv_file( marker_file );
hb_csv_cell_t * cell;
int row = 0;
int chapter = 0;
fprintf( stderr, "Reading chapter markers from file %s\n", marker_file );
if( file == NULL )
{
fprintf( stderr, "Cannot open chapter marker file, using defaults\n" );
}
else
{
/* Parse the cells */
while( NULL != ( cell = hb_read_next_cell( file ) ) )
{
/* We have a chapter number */
if( cell->cell_col == 0 )
{
row = cell->cell_row;
chapter = atoi( cell->cell_text );
}
/* We have a chapter name */
if( cell->cell_col == 1 && row == cell->cell_row )
{
/* If we have a valid chapter, copy the string an terminate it */
if( chapter >= job->chapter_start && chapter <= job->chapter_end )
{
hb_chapter_t * chapter_s;
chapter_s = hb_list_item( job->list_chapter, chapter - 1);
hb_chapter_set_title(chapter_s, cell->cell_text);
}
}
hb_dispose_cell( cell );
}
hb_close_csv_file( file );
}
}
}
if (crop[0] < 0 || crop[1] < 0 || crop[2] < 0 || crop[3] < 0)
{
memcpy(crop, title->crop, sizeof(int[4]));
}
if( loose_crop >= 0 )
{
int mod = modulus > 0 ? modulus : 2;
apply_loose_crop(title->height, &crop[0], &crop[1], mod, loose_crop);
apply_loose_crop(title->width, &crop[2], &crop[3], mod, loose_crop);
}
job->deinterlace = deinterlace;
job->grayscale = grayscale;
hb_filter_object_t * filter;
job->use_detelecine = detelecine;
job->use_decomb = decomb;
/* Add selected filters */
if( detelecine )
{
filter = hb_filter_init( HB_FILTER_DETELECINE );
hb_add_filter( job, filter, detelecine_opt );
}
if( decomb )
{
filter = hb_filter_init( HB_FILTER_DECOMB );
hb_add_filter( job, filter, decomb_opt );
}
if( deinterlace )
{
filter = hb_filter_init( HB_FILTER_DEINTERLACE );
hb_add_filter( job, filter, deinterlace_opt );
}
if( deblock )
{
filter = hb_filter_init( HB_FILTER_DEBLOCK );
hb_add_filter( job, filter, deblock_opt );
}
if( denoise )
{
filter = hb_filter_init( HB_FILTER_DENOISE );
hb_add_filter( job, filter, denoise_opt );
}
if( nlmeans )
{
filter = hb_filter_init( HB_FILTER_NLMEANS );
hb_add_filter( job, filter, nlmeans_opt );
}
if( rotate )
{
filter = hb_filter_init( HB_FILTER_ROTATE );
hb_add_filter( job, filter, rotate_opt);
}
if (use_hwd)
{
job->use_hwd = use_hwd;
}
hb_geometry_t srcGeo, resultGeo;
hb_ui_geometry_t uiGeo;
srcGeo.width = title->width;
srcGeo.height = title->height;
srcGeo.par.num = title->pixel_aspect_width;
srcGeo.par.den = title->pixel_aspect_height;
keep_display_aspect |= anamorphic_mode != HB_ANAMORPHIC_CUSTOM;
uiGeo.mode = job->anamorphic.mode = anamorphic_mode;
if (width != 0 && height != 0)
{
if (anamorphic_mode == HB_ANAMORPHIC_NONE)
{
keep_display_aspect = 0;
}
else
{
uiGeo.mode = HB_ANAMORPHIC_CUSTOM;
}
}
job->anamorphic.keep_display_aspect = keep_display_aspect;
uiGeo.keep = !!keep_display_aspect * HB_KEEP_DISPLAY_ASPECT;
uiGeo.itu_par = job->anamorphic.itu_par = itu_par;
uiGeo.modulus = job->modulus = modulus;
memcpy(uiGeo.crop, crop, sizeof(int[4]));
if (width == 0)
{
uiGeo.width = title->width - crop[2] - crop[3];
}
else
{
uiGeo.keep |= HB_KEEP_WIDTH;
uiGeo.width = width;
}
if (height == 0)
{
uiGeo.height = title->height - crop[0] - crop[1];
}
else
{
uiGeo.keep |= HB_KEEP_HEIGHT;
uiGeo.height = height;
}
uiGeo.maxWidth = maxWidth;
uiGeo.maxHeight = maxHeight;
uiGeo.dar.num = 0;
uiGeo.dar.den = 0;
if( par_width && par_height )
{
uiGeo.par.num = par_width;
uiGeo.par.den = par_height;
}
else if (display_width != 0 && width != 0)
{
if (height != 0)
{
fprintf(stderr, "display_width (%d), width (%d), and height (%d) can not all be specified, ignoring height", display_width, width, height);
}
uiGeo.par.num = display_width;
uiGeo.par.den = width;
}
else if (display_width != 0)
{
uiGeo.dar.num = display_width;
uiGeo.dar.den = uiGeo.height;
}
else
{
uiGeo.par = srcGeo.par;
}
hb_set_anamorphic_size2(&srcGeo, &uiGeo, &resultGeo);
job->width = resultGeo.width;
job->height = resultGeo.height;
job->anamorphic.par_width = resultGeo.par.num;
job->anamorphic.par_height = resultGeo.par.den;
memcpy(job->crop, crop, sizeof(int[4]));
// Add filter that does cropping and scaling
char * filter_str;
filter_str = hb_strdup_printf("%d:%d:%d:%d:%d:%d",
job->width, job->height, crop[0], crop[1], crop[2], crop[3] );
filter = hb_filter_init( HB_FILTER_CROP_SCALE );
hb_add_filter( job, filter, filter_str );
free( filter_str );
// Add framerate shaping filter
if (vrate)
{
filter_cfr = cfr;
filter_vrate = 27000000;
filter_vrate_base = vrate;
}
else if (cfr)
{
// cfr or pfr flag with no rate specified implies
// use the title rate.
filter_cfr = cfr;
filter_vrate = title->rate;
filter_vrate_base = title->rate_base;
}
filter = hb_filter_init(HB_FILTER_VFR);
filter_str = hb_strdup_printf("%d:%d:%d", filter_cfr, filter_vrate,
filter_vrate_base);
hb_add_filter(job, filter, filter_str);
free(filter_str);
// hb_job_init() will set a default muxer for us
// only override it if a specific muxer has been set
// note: the muxer must be set after presets, but before encoders
if (mux)
{
job->mux = mux;
}
// then, muxer options
if (largeFileSize)
{
job->largeFileSize = 1;
}
if (mp4_optimize)
{
job->mp4_optimize = 1;
}
if (ipod_atom)
{
job->ipod_atom = 1;
}
if( vquality >= 0.0 )
{
job->vquality = vquality;
job->vbitrate = 0;
}
else if( vbitrate )
{
job->vquality = -1.0;
job->vbitrate = vbitrate;
}
/* Set video encoder and check muxer compatibility */
if (vcodec)
{
job->vcodec = vcodec;
}
encoder = NULL;
while ((encoder = hb_video_encoder_get_next(encoder)) != NULL)
{
if ((encoder->codec == job->vcodec) &&
(encoder->muxers & job->mux) == 0)
{
hb_error("incompatible video encoder '%s' for muxer '%s'",
hb_video_encoder_get_short_name(job->vcodec),
hb_container_get_short_name (job->mux));
done_error = HB_ERROR_INIT;
die = 1;
return -1;
}
}
#ifdef USE_QSV
if (qsv_async_depth >= 0)
{
job->qsv.async_depth = qsv_async_depth;
}
job->qsv.decode = qsv_decode;
#endif
/* Grab audio tracks */
if( atracks )
{
char * token = strtok( atracks, "," );
if( token == NULL )
token = optarg;
int track_start, track_end;
for( ; token != NULL; token = strtok( NULL, "," ) )
{
if( strlen( token ) >= 3 )
{
if( sscanf( token, "%d-%d", &track_start, &track_end ) == 2 )
{
int i;
for( i = track_start - 1; i < track_end; i++ )
{
if( hb_list_item( title->list_audio, i ) == NULL )
{
fprintf( stderr, "Warning: Could not find audio track %d, skipped\n", i + 1 );
continue;
}
audio = calloc( 1, sizeof( *audio ) );
hb_audio_config_init( audio );
audio->in.track = i;
audio->out.track = num_audio_tracks++;
hb_list_add( audios, audio );
}
}
else if( !strcasecmp(token, "none" ) )
{
audio = calloc( 1, sizeof( *audio ) );
hb_audio_config_init( audio );
audio->in.track = audio->out.track = -1;
audio->out.codec = 0;
hb_list_add( audios, audio );
break;
}
else
{
fprintf( stderr, "ERROR: unable to parse audio input \"%s\", skipping\n",
token);
}
}
else
{
int i = atoi( token ) - 1;
if( hb_list_item( title->list_audio, i ) == NULL )
{
fprintf(stderr,
"Warning: Could not find audio track '%s', skipped\n",
token);
continue;
}
audio = calloc( 1, sizeof( *audio ) );
hb_audio_config_init( audio );
audio->in.track = i;
audio->out.track = num_audio_tracks++;
hb_list_add( audios, audio );
}
}
}
/* Parse audio tracks */
if( native_language && native_dub )
{
if( hb_list_count( audios ) == 0 || !audio_explicit )
{
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
{
int track = i;
audio = hb_list_audio_config_item( title->list_audio, i );
if( cmp_lang( native_language, audio->lang.iso639_2 ) &&
audio->lang.type != 3 && // Directors 1
audio->lang.type != 4) // Directors 2
{
/*
* Matched an audio to our native language - use it.
* Replace any existing audio tracks that a preset may
* have put here.
*/
if( hb_list_count( audios ) == 0 )
{
audio = calloc( 1, sizeof( *audio ) );
hb_audio_config_init( audio );
audio->in.track = track;
audio->out.track = num_audio_tracks++;
/* Add it to our audios */
hb_list_add( audios, audio );
}
else
{
/*
* Update the track numbers on what is already in
* there.
*/
for( i = 0; i < hb_list_count( audios ); i++ )
{
audio = hb_list_item( audios, i );
audio->in.track = track;
}
}
break;
}
}
}
else
{
fprintf( stderr, "Warning: Native language (dubbing) selection ignored since an audio track has already been selected\n" );
}
}
if( hb_list_count(audios) == 0 &&
hb_list_count(title->list_audio) > 0 )
{
/* Create a new audio track with default settings */
audio = calloc( 1, sizeof( *audio ) );
hb_audio_config_init( audio );
/* Add it to our audios */
hb_list_add( audios, audio );
}
tmp_num_audio_tracks = num_audio_tracks = hb_list_count( audios );
for( i = 0; i < tmp_num_audio_tracks; i++ )
{
audio = hb_list_item( audios, 0 );
if( audio == NULL ||
audio->in.track == -1 ||
audio->out.track == -1 ||
audio->out.codec == 0 ||
hb_audio_add( job, audio ) == 0 )
{
num_audio_tracks--;
}
if( audio != NULL )
{
hb_list_rem( audios, audio );
if( audio->out.name )
{
free( audio->out.name );
}
free( audio );
}
}
/* Audio Codecs */
i = 0;
if( acodecs )
{
char * token = strtok(acodecs, ",");
if( token == NULL )
token = acodecs;
while ( token != NULL )
{
if ((acodec = hb_audio_encoder_get_from_name(token)) <= 0)
{
fprintf(stderr, "Invalid codec %s, using default for container.\n", token);
acodec = hb_audio_encoder_get_default(job->mux);
}
if( i < num_audio_tracks )
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.codec = acodec;
}
else
{
hb_audio_config_t * last_audio = hb_list_audio_config_item( job->list_audio, i - 1 );
hb_audio_config_t audio;
if( last_audio )
{
fprintf(stderr, "More audio codecs than audio tracks, copying track %i and using encoder %s\n",
i, token);
hb_audio_config_init(&audio);
audio.in.track = last_audio->in.track;
audio.out.track = num_audio_tracks++;
audio.out.codec = acodec;
hb_audio_add(job, &audio);
}
else
{
fprintf(stderr, "Audio codecs and no valid audio tracks, skipping codec %s\n", token);
}
}
token = strtok(NULL, ",");
i++;
}
}
if( i < num_audio_tracks )
{
/* We have fewer inputs than audio tracks, use the default codec for
* this container for the remaining tracks. Unless we only have one input
* then use that codec instead.
*/
if (i != 1)
acodec = hb_audio_encoder_get_default(job->mux);
for ( ; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.codec = acodec;
}
}
// sanity check muxer compatibility
for (i = 0; i < num_audio_tracks; i++)
{
encoder = NULL;
audio = hb_list_audio_config_item(job->list_audio, i);
if (audio != NULL)
{
while ((encoder = hb_audio_encoder_get_next(encoder)) != NULL)
{
if ((encoder->codec == audio->out.codec) &&
(encoder->muxers & job->mux) == 0)
{
hb_error("audio track %d: incompatible encoder '%s' for muxer '%s'", i + 1,
hb_audio_encoder_get_short_name(audio->out.codec),
hb_container_get_short_name (job->mux));
done_error = HB_ERROR_INIT;
die = 1;
return -1;
}
}
}
}
/* Audio Codecs */
/* Sample Rate */
int auto_sample_rate = 0;
i = 0;
if( arates )
{
char * token = strtok(arates, ",");
if (token == NULL)
token = arates;
while ( token != NULL )
{
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio != NULL )
{
if ( !strcasecmp( token, "auto" ) )
{
arate = audio->in.samplerate;
auto_sample_rate = 1;
}
else
{
arate = hb_audio_samplerate_get_from_name(token);
}
if (arate <= 0)
{
fprintf(stderr,
"Invalid sample rate %s, using input rate %d\n",
token, audio->in.samplerate);
arate = audio->in.samplerate;
}
audio->out.samplerate = arate;
if( (++i) >= num_audio_tracks )
break; /* We have more inputs than audio tracks, oops */
}
else
{
fprintf(stderr, "Ignoring sample rate %d, no audio tracks\n", arate);
}
token = strtok(NULL, ",");
}
}
if (i < num_audio_tracks)
{
/* We have fewer inputs than audio tracks, use default sample rate.
* Unless we only have one input, then use that for all tracks.
*/
int use_default = 0;
if( i != 1 || auto_sample_rate )
use_default = 1;
for ( ; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
if( use_default )
arate = audio->in.samplerate;
audio->out.samplerate = arate;
}
}
/* Sample Rate */
/* Audio Mixdown */
i = 0;
if ( mixdowns )
{
char * token = strtok(mixdowns, ",");
if (token == NULL)
token = mixdowns;
while ( token != NULL )
{
mixdown = hb_mixdown_get_from_name(token);
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio != NULL )
{
audio->out.mixdown = mixdown;
if( (++i) >= num_audio_tracks )
break; /* We have more inputs than audio tracks, oops */
}
else
{
fprintf(stderr, "Ignoring mixdown, no audio tracks\n");
}
token = strtok(NULL, ",");
}
}
if (i < num_audio_tracks && i == 1)
{
/* We have fewer inputs than audio tracks
* and we only have one input, then use that.
*/
for (; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.mixdown = mixdown;
}
}
/* Audio Mixdown */
/* Audio Bitrate */
i = 0;
if( abitrates )
{
for ( i = 0; abitrates[i] != NULL && i < num_audio_tracks; i++ )
{
char * token = abitrates[i];
abitrate = atoi(token);
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio != NULL )
{
audio->out.bitrate = abitrate;
}
else
{
fprintf(stderr, "Ignoring bitrate %d, no audio tracks\n", abitrate);
}
}
}
if (i < num_audio_tracks && i == 1)
{
/* We have fewer inputs than audio tracks,
* and we only have one input, use
* that for all tracks.
*/
for (; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.bitrate = abitrate;
}
}
/* Audio Bitrate */
/* Audio Quality */
i = 0;
if( aqualities )
{
for ( i = 0; aqualities[i] != NULL && i < num_audio_tracks; i++ )
{
char * token = aqualities[i];
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio == NULL )
{
fprintf(stderr, "Ignoring quality %.3f, no audio tracks\n", aquality);
}
else if( *token != 0 )
{
aquality = atof(token);
audio->out.quality = aquality;
audio->out.bitrate = -1;
}
}
}
if (i < num_audio_tracks && i == 1)
{
/* We have fewer inputs than audio tracks,
* and we only have one input, use
* that for all tracks.
*/
for (; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio->out.bitrate <= 0 )
audio->out.quality = aquality;
}
}
/* Audio Quality */
/* Audio Compression Level */
i = 0;
if( acompressions )
{
for ( i = 0; acompressions[i] != NULL && i < num_audio_tracks; i++ )
{
char * token = acompressions[i];
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio == NULL )
{
fprintf(stderr, "Ignoring compression level %.2f, no audio tracks\n", acompression);
}
else if( *token != 0 )
{
acompression = atof(token);
audio->out.compression_level = acompression;
}
}
}
// Compression levels are codec specific values. So don't
// try to apply to other tracks.
/* Audio Compression Level */
/* Audio DRC */
i = 0;
if ( dynamic_range_compression )
{
char * token = strtok(dynamic_range_compression, ",");
if (token == NULL)
token = dynamic_range_compression;
while ( token != NULL )
{
d_r_c = atof(token);
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio != NULL )
{
audio->out.dynamic_range_compression = d_r_c;
if( (++i) >= num_audio_tracks )
break; /* We have more inputs than audio tracks, oops */
}
else
{
fprintf(stderr, "Ignoring drc, no audio tracks\n");
}
token = strtok(NULL, ",");
}
}
if (i < num_audio_tracks)
{
/* We have fewer inputs than audio tracks, use no DRC for the remaining
* tracks. Unless we only have one input, then use the same DRC for all
* tracks.
*/
if (i != 1)
d_r_c = 0;
for (; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.dynamic_range_compression = d_r_c;
}
}
/* Audio DRC */
/* Audio Gain */
i = 0;
if ( audio_gain )
{
char * token = strtok(audio_gain, ",");
if (token == NULL)
token = audio_gain;
while ( token != NULL )
{
gain = atof(token);
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio != NULL )
{
audio->out.gain = gain;
if( (++i) >= num_audio_tracks )
break; /* We have more inputs than audio tracks, oops */
}
else
{
fprintf(stderr, "Ignoring gain, no audio tracks\n");
}
token = strtok(NULL, ",");
}
}
if (i < num_audio_tracks)
{
/* We have fewer inputs than audio tracks, use no gain for the remaining
* tracks. Unless we only have one input, then use the same gain for all
* tracks.
*/
if (i != 1)
gain = 0;
for (; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.gain = gain;
}
}
/* Audio Gain */
/* Audio Dither */
if (audio_dither != NULL)
{
int dither_method = hb_audio_dither_get_default();
for (i = 0; audio_dither[i] != NULL; i++)
{
dither_method = hb_audio_dither_get_from_name(audio_dither[i]);
audio = hb_list_audio_config_item(job->list_audio, i);
if (audio != NULL)
{
if (hb_audio_dither_is_supported(audio->out.codec))
{
audio->out.dither_method = dither_method;
}
else if (dither_method != hb_audio_dither_get_default())
{
fprintf(stderr,
"Ignoring dither %s, not supported by codec\n",
audio_dither[i]);
}
}
else
{
fprintf(stderr, "Ignoring dither %s, no audio tracks\n",
audio_dither[i]);
}
}
if (i < num_audio_tracks && i == 1)
{
/*
* We have fewer inputs than audio tracks, and we only have
* one input: use that for all tracks.
*/
while (i < num_audio_tracks)
{
audio = hb_list_audio_config_item(job->list_audio, i);
if (hb_audio_dither_is_supported(audio->out.codec))
{
audio->out.dither_method = dither_method;
}
else if (dither_method != hb_audio_dither_get_default())
{
fprintf(stderr,
"Ignoring dither %s, not supported by codec\n",
audio_dither[0]);
}
i++;
}
}
}
/* Audio Dither */
/* Audio Mix Normalization */
i = 0;
int norm = 0;
if( normalize_mix_level )
{
for ( i = 0; normalize_mix_level[i] != NULL && i < num_audio_tracks; i++ )
{
char * token = normalize_mix_level[i];
norm = atoi(token);
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio != NULL )
{
audio->out.normalize_mix_level = norm;
}
else
{
fprintf(stderr, "Ignoring normalization %d, no audio tracks\n", norm);
}
}
}
if (i < num_audio_tracks && i == 1)
{
/* We have fewer inputs than audio tracks,
* and we only have one input, use
* that for all tracks.
*/
for (; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.normalize_mix_level = norm;
}
}
/* Audio Mix Normalization */
/* Audio Track Names */
if ( anames )
{
char * token;
for ( i = 0; anames[i] != NULL && i < num_audio_tracks; i++ )
{
token = anames[i];
if ( *token )
{
audio = hb_list_audio_config_item(job->list_audio, i);
if( audio != NULL )
{
audio->out.name = strdup(token);
}
else
{
fprintf(stderr, "Ignoring aname '%s', no audio track\n",
token);
}
}
}
}
if( i < num_audio_tracks && i == 1 )
{
/* We have exactly one name and more than one audio track. Use the same
* name for all tracks. */
for ( ; i < num_audio_tracks; i++)
{
audio = hb_list_audio_config_item(job->list_audio, i);
audio->out.name = strdup(anames[0]);
}
}
/* Audio Track Names */
/* Sanitize passthru (drop/fallback if necessary) */
for( i = 0; i < hb_list_count( job->list_audio ); )
{
audio = hb_list_audio_config_item( job->list_audio, i );
if( audio->out.codec == HB_ACODEC_AUTO_PASS )
{
// Auto Passthru
job->acodec_copy_mask = allowed_audio_copy == -1 ? HB_ACODEC_PASS_MASK : allowed_audio_copy;
job->acodec_fallback = hb_audio_encoder_get_from_name(acodec_fallback);
}
else if( ( audio->out.codec & HB_ACODEC_PASS_FLAG ) &&
!( audio->out.codec & audio->in.codec & HB_ACODEC_PASS_MASK ) )
{
// passthru fallbacks
int requested_passthru = audio->out.codec;
audio->out.codec =
hb_audio_encoder_get_fallback_for_passthru(requested_passthru);
if (!(audio->out.codec & HB_ACODEC_MASK))
{
// Passthru not possible, drop audio.
fprintf(stderr,
"Passthru requested and input codec is not the same as output codec for track %d, dropping track\n",
audio->out.track);
hb_audio_t *item = hb_list_item(job->list_audio, i);
hb_list_rem(job->list_audio, item);
hb_audio_close(&item);
continue;
}
fprintf(stderr,
"%s requested and input codec is not compatible for track %d, using %s encoder\n",
hb_audio_encoder_get_name(requested_passthru), audio->out.track,
hb_audio_encoder_get_name(audio->out.codec));
}
// we didn't drop the track
i++;
}
if( subtracks )
{
char * token;
int i;
int burnpos = 0, defaultpos = 0;
if ( subburn )
burnpos = strtol( subburn, NULL, 0 );
if ( subdefault )
defaultpos = strtol( subdefault, NULL, 0 );
for ( i = 0; subtracks[i] != NULL; i++ )
{
token = subtracks[i];
if( strcasecmp(token, "scan" ) == 0 )
{
int burn = 0, force = 0, def = 0;
if ( subburn != NULL )
{
burn = ( i == 0 && subburn[0] == 0 ) ||
( burnpos == i+1 );
}
if ( subdefault != NULL )
{
def = ( i == 0 && subdefault[0] == 0 ) ||
( defaultpos == i+1 );
}
force = test_sub_list( subforce, i+1 );
if ( !burn )
{
job->select_subtitle_config.dest = PASSTHRUSUB;
}
else
{
if ( sub_burned )
{
continue;
}
sub_burned = 1;
}
job->select_subtitle_config.force = force;
job->select_subtitle_config.default_track = def;
subtitle_scan = 1;
}
else
{
hb_subtitle_t * subtitle;
hb_subtitle_config_t sub_config;
int track;
int burn = 0, force = 0, def = 0;
track = atoi(token) - 1;
subtitle = hb_list_item(title->list_subtitle, track);
if( subtitle == NULL )
{
fprintf(stderr,
"Warning: Could not find subtitle track '%s', skipped\n",
token);
continue;
}
sub_config = subtitle->config;
if ( subburn != NULL )
{
burn = ( i == 0 && subburn[0] == 0 ) ||
( burnpos == i+1 );
}
if ( subdefault != NULL )
{
def = ( i == 0 && subdefault[0] == 0 ) ||
( defaultpos == i+1 );
}
force = test_sub_list(subforce, i+1);
int supports_burn = hb_subtitle_can_burn( subtitle->source );
if ( ( burn && supports_burn ) ||
!hb_subtitle_can_pass( subtitle->source, mux ) )
{
// Only allow one subtitle to be burned into video
if ( sub_burned )
{
fprintf( stderr, "Warning: Skipping subtitle track %d, can't have more than one track burnt in\n", track+1 );
continue;
}
sub_burned = 1;
// Mark as burn-in
sub_config.dest = RENDERSUB;
}
else
{
sub_config.dest = PASSTHRUSUB;
}
sub_config.force = force;
sub_config.default_track = def;
hb_subtitle_add( job, &sub_config, track );
}
}
}
if( srtfile )
{
int i;
hb_subtitle_config_t sub_config;
for( i=0; srtfile[i] != NULL; i++ )
{
char *codeset = "L1";
int64_t offset = 0;
char *lang = "und";
if (srtburn == i + 1 && hb_subtitle_can_burn(SRTSUB))
{
// Only allow one subtitle to be burned into video
if ( sub_burned )
{
fprintf( stderr, "Warning: Skipping SRT track %d, can't have more than one track burnt in\n", i+1 );
continue;
}
sub_burned = 1;
// Mark as burn-in
sub_config.dest = RENDERSUB;
}
else
{
sub_config.dest = PASSTHRUSUB;
}
if( srtcodeset && srtcodeset[i] )
{
codeset = srtcodeset[i];
}
if( srtoffset && srtoffset[i] )
{
offset = strtoll( srtoffset[i], &srtoffset[i], 0 );
}
if ( srtlang && srtlang[i] )
{
lang = srtlang[i];
}
sub_config.default_track = srtdefault == i + 1;
sub_config.force = 0;
strncpy( sub_config.src_filename, srtfile[i], 255);
sub_config.src_filename[255] = 0;
strncpy( sub_config.src_codeset, codeset, 39);
sub_config.src_codeset[39] = 0;
sub_config.offset = offset;
hb_srt_add( job, &sub_config, lang);
}
}
if ( sub_burned )
{
char * filter_str;
filter_str = hb_strdup_printf("%d:%d:%d:%d",
crop[0], crop[1], crop[2], crop[3] );
filter = hb_filter_init( HB_FILTER_RENDER_SUB );
hb_add_filter( job, filter, filter_str);
free( filter_str );
}
if( native_language )
{
audio = hb_list_audio_config_item(job->list_audio, 0);
if( audio )
{
if( !cmp_lang( native_language, audio->lang.iso639_2 ) )
{
/*
* Audio language is not the same as our native language.
* If we have any subtitles in our native language they
* should be selected here if they haven't already been.
*/
hb_subtitle_t *subtitle, *subtitle2 = NULL;
int matched_track = 0;
for( i = 0; i < hb_list_count( title->list_subtitle ); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
matched_track = i;
if (cmp_lang(native_language, subtitle->iso639_2))
{
/*
* Found the first matching subtitle in our
* native language. Is it already selected?
*/
for( i = 0; i < hb_list_count( job->list_subtitle ); i++ )
{
subtitle2 = hb_list_item( job->list_subtitle, i );
if( subtitle2->track == subtitle->track) {
/*
* Already selected
*/
break;
}
subtitle2 = NULL;
}
if( subtitle2 == NULL )
{
/*
* Not already selected, so select it.
*/
hb_subtitle_config_t sub_config;
if( native_dub )
{
fprintf( stderr, "Warning: no matching audio for native language - using subtitles instead.\n");
}
sub_config = subtitle->config;
if ((mux & HB_MUX_MASK_MKV) || subtitle->format == TEXTSUB)
{
sub_config.dest = PASSTHRUSUB;
}
sub_config.force = 0;
sub_config.default_track = 1;
hb_subtitle_add( job, &sub_config, matched_track);
}
/*
* Stop searching.
*/
break;
}
}
}
}
}
hb_job_set_file( job, output );
if( color_matrix_code )
{
job->color_matrix_code = color_matrix_code;
}
hb_job_set_encoder_preset (job, x264_preset);
hb_job_set_encoder_tune (job, x264_tune);
hb_job_set_encoder_profile(job, h264_profile);
hb_job_set_encoder_level (job, h264_level);
if (maxWidth)
job->maxWidth = maxWidth;
if (maxHeight)
job->maxHeight = maxHeight;
if( start_at_preview )
{
job->start_at_preview = start_at_preview - 1;
job->seek_points = preview_count;
}
if( stop_at_pts )
{
job->pts_to_stop = stop_at_pts;
subtitle_scan = 0;
}
if( stop_at_frame )
{
job->frame_to_stop = stop_at_frame;
subtitle_scan = 0;
}
if( start_at_pts )
{
job->pts_to_start = start_at_pts;
subtitle_scan = 0;
}
if( start_at_frame )
{
job->frame_to_start = start_at_frame;
subtitle_scan = 0;
}
/* OpenCL */
job->use_opencl = use_opencl;
if( subtitle_scan )
{
/*
* When subtitle scan is enabled do a fast pre-scan job
* which will determine which subtitles to enable, if any.
*/
job->pass = -1;
hb_job_set_encoder_options(job, NULL);
job->indepth_scan = subtitle_scan;
fprintf( stderr, "Subtitle Scan Enabled - enabling "
"subtitles if found for foreign language segments\n");
/*
* Add the pre-scan job
*/
hb_add( h, job );
}
hb_job_set_encoder_options(job, advanced_opts);
if( twoPass )
{
/*
* If subtitle_scan is enabled then only turn it on
* for the first pass and then off again for the
* second.
*/
job->pass = 1;
job->indepth_scan = 0;
/* Turbo first pass */
if( turbo_opts_enabled )
{
job->fastfirstpass = 1;
}
else
{
job->fastfirstpass = 0;
}
hb_add( h, job );
job->pass = 2;
/*
* On the second pass we turn off subtitle scan so that we
* can actually encode using any subtitles that were auto
* selected in the first pass (using the whacky select-subtitle
* attribute of the job).
*/
job->indepth_scan = 0;
hb_add( h, job );
}
else
{
/*
* Turn on subtitle scan if requested, note that this option
* precludes encoding of any actual subtitles.
*/
job->indepth_scan = 0;
job->pass = 0;
hb_add( h, job );
}
hb_job_close( &job );
hb_start( h );
break;
}
#define p s.param.working
case HB_STATE_SEARCHING:
fprintf( stdout, "\rEncoding: task %d of %d, Searching for start time, %.2f %%",
p.job_cur, p.job_count, 100.0 * p.progress );
if( p.seconds > -1 )
{
fprintf( stdout, " (ETA %02dh%02dm%02ds)",
p.hours, p.minutes, p.seconds );
}
fflush(stdout);
break;
case HB_STATE_WORKING:
fprintf( stdout, "\rEncoding: task %d of %d, %.2f %%",
p.job_cur, p.job_count, 100.0 * p.progress );
if( p.seconds > -1 )
{
fprintf( stdout, " (%.2f fps, avg %.2f fps, ETA "
"%02dh%02dm%02ds)", p.rate_cur, p.rate_avg,
p.hours, p.minutes, p.seconds );
}
fflush(stdout);
break;
#undef p
#define p s.param.muxing
case HB_STATE_MUXING:
{
if (show_mux_warning)
{
fprintf( stdout, "\rMuxing: this may take awhile..." );
fflush(stdout);
show_mux_warning = 0;
}
break;
}
#undef p
#define p s.param.workdone
case HB_STATE_WORKDONE:
/* Print error if any, then exit */
switch( p.error )
{
case HB_ERROR_NONE:
fprintf( stderr, "\nEncode done!\n" );
break;
case HB_ERROR_CANCELED:
fprintf( stderr, "\nEncode canceled.\n" );
break;
default:
fprintf( stderr, "\nEncode failed (error %x).\n",
p.error );
}
done_error = p.error;
die = 1;
break;
#undef p
}
return 0;
}
/****************************************************************************
* SigHandler:
****************************************************************************/
static volatile int64_t i_die_date = 0;
void SigHandler( int i_signal )
{
done_error = HB_ERROR_CANCELED;
if( die == 0 )
{
die = 1;
i_die_date = hb_get_date();
fprintf( stderr, "Signal %d received, terminating - do it "
"again in case it gets stuck\n", i_signal );
}
else if( i_die_date + 500 < hb_get_date() )
{
fprintf( stderr, "Dying badly, files might remain in your /tmp\n" );
exit( done_error );
}
}
/****************************************************************************
* ShowHelp:
****************************************************************************/
static void ShowHelp()
{
int i;
const char *name;
const hb_rate_t *rate;
const hb_dither_t *dither;
const hb_mixdown_t *mixdown;
const hb_encoder_t *encoder;
const hb_container_t *container;
FILE* const out = stdout;
fprintf( out,
"Syntax: HandBrakeCLI [options] -i -o \n"
"\n"
"### General Handbrake Options------------------------------------------------\n\n"
" -h, --help Print help\n"
" -u, --update Check for updates and exit\n"
" -v, --verbose <#> Be verbose (optional argument: logging level)\n"
" -Z. --preset Use a built-in preset. Capitalization matters, and\n"
" if the preset name has spaces, surround it with\n"
" double quotation marks\n"
" -z, --preset-list See a list of available built-in presets\n"
" --no-dvdnav Do not use dvdnav for reading DVDs\n"
" --no-opencl Disable use of OpenCL\n"
"\n"
"### Source Options-----------------------------------------------------------\n\n"
" -i, --input Set input device\n"
" -t, --title Select a title to encode (0 to scan all titles only,\n"
" default: 1)\n"
" --min-duration Set the minimum title duration (in seconds). Shorter\n"
" titles will not be scanned (default: 10).\n"
" --scan Scan selected title only.\n"
" --main-feature Detect and select the main feature title.\n"
" -c, --chapters Select chapters (e.g. \"1-3\" for chapters\n"
" 1 to 3, or \"3\" for chapter 3 only,\n"
" default: all chapters)\n"
" --angle Select the video angle (DVD or Blu-ray only)\n"
" --previews <#:B> Select how many preview images are generated,\n"
" and whether or not they're stored to disk (0 or 1).\n"
" (default: 10:0)\n"
" --start-at-preview <#> Start encoding at a given preview.\n"
" --start-at Start encoding at a given frame, duration (in seconds),\n"
" or pts (on a 90kHz clock)\n"
" --stop-at Stop encoding at a given frame, duration (in seconds),\n"
" or pts (on a 90kHz clock)"
"\n"
"### Destination Options------------------------------------------------------\n\n"
" -o, --output Set output file name\n"
" -f, --format Set output container format (");
container = NULL;
while ((container = hb_container_get_next(container)) != NULL)
{
fprintf(out, "%s", container->short_name);
if (hb_container_get_next(container) != NULL)
{
fprintf(out, "/");
}
else
{
fprintf(out, ")\n");
}
}
fprintf(out,
" (default: autodetected from file name)\n"
" -m, --markers Add chapter markers\n"
" -O, --optimize Optimize mp4 files for HTTP streaming (\"fast start\")\n"
" -I, --ipod-atom Mark mp4 files so 5.5G iPods will accept them\n"
" -P, --use-opencl Use OpenCL where applicable\n"
" -U, --use-hwd Use DXVA2 hardware decoding\n"
"\n"
"### Video Options------------------------------------------------------------\n\n"
" -e, --encoder Set video library encoder\n"
" Options: " );
name = NULL;
encoder = NULL;
while ((encoder = hb_video_encoder_get_next(encoder)) != NULL)
{
fprintf(out, "%s", encoder->short_name);
if (hb_video_encoder_get_next(encoder) != NULL)
{
fprintf(out, "/");
}
else
{
fprintf(out, "\n");
}
if (encoder->codec == vcodec)
{
name = encoder->short_name;
}
}
fprintf(out, " (default: %s)\n", name);
fprintf(out,
" --encoder-preset Adjust video encoding settings for a particular\n"
" speed/efficiency tradeoff (encoder-specific)\n"
" --encoder-preset-list List supported --encoder-preset values for the\n"
" specified video encoder\n"
" --encoder-tune Adjust video encoding settings for a particular\n"
" type of souce or situation (encoder-specific)\n"
" --encoder-tune-list List supported --encoder-tune values for the\n"
" specified video encoder\n"
" -x, --encopts Specify advanced encoding options in the same\n"
" style as mencoder (all encoders except theora):\n"
" option1=value1:option2=value2\n"
" --encoder-profile Ensures compliance with the requested codec\n"
" profile (encoder-specific)\n"
" --encoder-profile-list List supported --encoder-profile values for the\n"
" specified video encoder\n"
" --encoder-level Ensures compliance with the requested codec\n"
" level (encoder-specific)\n"
" --encoder-level-list List supported --encoder-level values for the\n"
" specified video encoder\n"
" -q, --quality Set video quality\n"
" -b, --vb Set video bitrate (default: 1000)\n"
" -2, --two-pass Use two-pass mode\n"
" -T, --turbo When using 2-pass use \"turbo\" options on the\n"
" 1st pass to improve speed (only works with x264)\n"
" -r, --rate Set video framerate (" );
rate = NULL;
while ((rate = hb_video_framerate_get_next(rate)) != NULL)
{
fprintf(out, "%s", rate->name);
if (hb_video_framerate_get_next(rate) != NULL)
{
fprintf(out, "/");
}
}
fprintf( out, ")\n"
" Be aware that not specifying a framerate lets\n"
" HandBrake preserve a source's time stamps,\n"
" potentially creating variable framerate video\n"
" --vfr, --cfr, --pfr Select variable, constant or peak-limited\n"
" frame rate control. VFR preserves the source\n"
" timing. CFR makes the output constant rate at\n"
" the rate given by the -r flag (or the source's\n"
" average rate if no -r is given). PFR doesn't\n"
" allow the rate to go over the rate specified\n"
" with the -r flag but won't change the source\n"
" timing if it's below that rate.\n"
" If none of these flags are given, the default\n"
" is --cfr when -r is given and --vfr otherwise\n"
"\n"
"### Audio Options-----------------------------------------------------------\n\n"
" -a, --audio Select audio track(s), separated by commas\n"
" (\"none\" for no audio, \"1,2,3\" for multiple\n"
" tracks, default: first one).\n"
" Multiple output tracks can be used for one input.\n"
" -E, --aencoder Audio encoder(s):\n" );
encoder = NULL;
while ((encoder = hb_audio_encoder_get_next(encoder)) != NULL)
{
fprintf(out, " %s\n",
encoder->short_name);
}
fprintf(out,
" copy:* will passthrough the corresponding\n"
" audio unmodified to the muxer if it is a\n"
" supported passthrough audio type.\n"
" Separated by commas for more than one audio track.\n"
" Defaults:\n");
container = NULL;
while ((container = hb_container_get_next(container)) != NULL)
{
int audio_encoder = hb_audio_encoder_get_default(container->format);
fprintf(out, " %-8s %s\n",
container->short_name,
hb_audio_encoder_get_short_name(audio_encoder));
}
fprintf(out,
" --audio-copy-mask Set audio codecs that are permitted when the\n"
" \"copy\" audio encoder option is specified\n"
" (" );
i = 0;
encoder = NULL;
while ((encoder = hb_audio_encoder_get_next(encoder)) != NULL)
{
if ((encoder->codec & HB_ACODEC_PASS_FLAG) &&
(encoder->codec != HB_ACODEC_AUTO_PASS))
{
if (i)
{
fprintf(out, "/");
}
i = 1;
// skip "copy:"
fprintf(out, "%s", encoder->short_name + 5);
}
}
fprintf(out, ", default: all).\n"
" Separated by commas for multiple allowed options.\n"
" --audio-fallback Set audio codec to use when it is not possible\n"
" to copy an audio track without re-encoding.\n"
" -B, --ab Set audio bitrate(s) (default: depends on the\n"
" selected codec, mixdown and samplerate)\n"
" Separated by commas for more than one audio track.\n"
" -Q, --aq Set audio quality metric (default: depends on the\n"
" selected codec)\n"
" Separated by commas for more than one audio track.\n"
" -C, --ac Set audio compression metric (default: depends on the\n"
" selected codec)\n"
" Separated by commas for more than one audio track.\n"
" -6, --mixdown Format(s) for audio downmixing/upmixing:\n");
// skip HB_AMIXDOWN_NONE
mixdown = hb_mixdown_get_next(NULL);
while((mixdown = hb_mixdown_get_next(mixdown)) != NULL)
{
fprintf(out, " %s\n",
mixdown->short_name);
}
fprintf(out,
" Separated by commas for more than one audio track.\n"
" Defaults:\n");
encoder = NULL;
while((encoder = hb_audio_encoder_get_next(encoder)) != NULL)
{
if (!(encoder->codec & HB_ACODEC_PASS_FLAG))
{
// layout: UINT64_MAX (all channels) should work with any mixdown
int mixdown = hb_mixdown_get_default(encoder->codec, UINT64_MAX);
// assumes that the encoder short name is <= 16 characters long
fprintf(out, " %-16s up to %s\n",
encoder->short_name, hb_mixdown_get_short_name(mixdown));
}
}
fprintf(out,
" --normalize-mix Normalize audio mix levels to prevent clipping.\n"
" Separated by commas for more than one audio track.\n"
" 0 = Disable Normalization (default)\n"
" 1 = Enable Normalization\n"
" -R, --arate Set audio samplerate(s) (" );
rate = NULL;
while ((rate = hb_audio_samplerate_get_next(rate)) != NULL)
{
fprintf(out, "%s", rate->name);
if (hb_audio_samplerate_get_next(rate) != NULL)
{
fprintf(out, "/");
}
}
fprintf( out, " kHz)\n"
" Separated by commas for more than one audio track.\n"
" -D, --drc Apply extra dynamic range compression to the audio,\n"
" making soft sounds louder. Range is 1.0 to 4.0\n"
" (too loud), with 1.5 - 2.5 being a useful range.\n"
" Separated by commas for more than one audio track.\n"
" --gain Amplify or attenuate audio before encoding. Does\n"
" NOT work with audio passthru (copy). Values are in\n"
" dB. Negative values attenuate, positive values\n"
" amplify. A 1 dB difference is barely audible.\n"
" --adither Apply dithering to the audio before encoding.\n"
" Separated by commas for more than one audio track.\n"
" Only supported by some encoders (");
i = 0;
encoder = NULL;
while ((encoder = hb_audio_encoder_get_next(encoder)) != NULL)
{
if (hb_audio_dither_is_supported(encoder->codec))
{
if (i)
{
fprintf(out, "/");
}
i = 1;
fprintf(out, "%s", encoder->short_name);
}
}
fprintf(out, ").\n");
fprintf(out,
" Options:\n");
dither = NULL;
while ((dither = hb_audio_dither_get_next(dither)) != NULL)
{
if (dither->method == hb_audio_dither_get_default())
{
fprintf(out, " %s (default)\n",
dither->short_name);
}
else
{
fprintf(out, " %s\n",
dither->short_name);
}
}
fprintf(out,
" -A, --aname Audio track name(s),\n"
" Separated by commas for more than one audio track.\n"
"\n"
"### Picture Settings---------------------------------------------------------\n\n"
" -w, --width Set picture width\n"
" -l, --height Set picture height\n"
" --crop Set cropping values (default: autocrop)\n"
" --loose-crop <#> Always crop to a multiple of the modulus\n"
" Specifies the maximum number of extra pixels\n"
" which may be cropped (default: 15)\n"
" -Y, --maxHeight <#> Set maximum height\n"
" -X, --maxWidth <#> Set maximum width\n"
" --strict-anamorphic Store pixel aspect ratio in video stream\n"
" --loose-anamorphic Store pixel aspect ratio with specified width\n"
" --custom-anamorphic Store pixel aspect ratio in video stream and\n"
" directly control all parameters.\n"
" --display-width Set the width to scale the actual pixels to\n"
" at playback, for custom anamorphic.\n"
" --keep-display-aspect Preserve the source's display aspect ratio\n"
" when using custom anamorphic\n"
" --pixel-aspect Set a custom pixel aspect for custom anamorphic\n"
" \n"
" (--display-width and --pixel-aspect are mutually\n"
" exclusive and the former will override the latter)\n"
" --itu-par Use wider, ITU pixel aspect values for loose and\n"
" custom anamorphic, useful with underscanned sources\n"
" --modulus Set the number you want the scaled pixel dimensions\n"
" to divide cleanly by. Does not affect strict\n"
" anamorphic mode, which is always mod 2 (default: 16)\n"
" -M, --color-matrix Set the color space signaled by the output\n"
" Values: 709, pal, ntsc, 601 (same as ntsc)\n"
" (default: detected from source)\n"
"\n"
"### Filters---------------------------------------------------------\n\n"
" -d, --deinterlace Unconditionally deinterlaces all frames\n"
" or omitted (default settings)\n"
" or\n"
" (default 0:-1)\n"
" -5, --decomb Selectively deinterlaces when it detects combing\n"
" or omitted (default settings)\n"
" or\n"
" \n"
" (default: 7:2:6:9:80:16:16:10:20:20:4:2:50:24:1:-1)\n"
" -9, --detelecine Detelecine (ivtc) video with pullup filter\n"
" Note: this filter drops duplicate frames to\n"
" restore the pre-telecine framerate, unless you\n"
" specify a constant framerate (--rate 29.97)\n"
" (default 1:1:4:4:0:0:-1)\n"
" -8, --denoise Denoise video with hqdn3d filter\n"
" or omitted (default settings)\n"
" or\n"
" \n"
" (default: 4:3:3:6:4.5:4.5)\n"
" --nlmeans Denoise video with nlmeans filter\n"
" or omitted\n"
" or\n"
" \n"
" (default 8:1:7:3:2:0)\n"
" --nlmeans-tune Tune nlmeans filter to content type\n"
" Note: only works in conjunction with presets\n"
" ultralight/light/medium/strong.\n"
" or omitted (default none)\n"
" -7, --deblock Deblock video with pp7 filter\n"
" (default 5:2)\n"
" --rotate Rotate image or flip its axes.\n"
" Modes: (can be combined)\n"
" 1 vertical flip\n"
" 2 horizontal flip\n"
" 4 rotate clockwise 90 degrees\n"
" Default: 3 (vertical and horizontal flip)\n"
" -g, --grayscale Grayscale encoding\n"
"\n"
"### Subtitle Options------------------------------------------------------------\n\n"
" -s, --subtitle Select subtitle track(s), separated by commas\n"
" More than one output track can be used for one\n"
" input.\n"
" Example: \"1,2,3\" for multiple tracks.\n"
" A special track name \"scan\" adds an extra 1st pass.\n"
" This extra pass scans subtitles matching the\n"
" language of the first audio or the language \n"
" selected by --native-language.\n"
" The one that's only used 10 percent of the time\n"
" or less is selected. This should locate subtitles\n"
" for short foreign language segments. Best used in\n"
" conjunction with --subtitle-forced.\n"
" -F, --subtitle-forced Only display subtitles from the selected stream if\n"
" the subtitle has the forced flag set. The values in\n"
" \"string\" are indexes into the subtitle list\n"
" specified with '--subtitle'.\n"
" Separated by commas for more than one subtitle track.\n"
" Example: \"1,2,3\" for multiple tracks.\n"
" If \"string\" is omitted, the first track is forced.\n"
" --subtitle-burned \"Burn\" the selected subtitle into the video track\n"
" If \"number\" is omitted, the first track is burned.\n"
" \"number\" is an index into the subtitle list\n"
" specified with '--subtitle'.\n"
" --subtitle-default Flag the selected subtitle as the default subtitle\n"
" to be displayed upon playback. Setting no default\n"
" means no subtitle will be automatically displayed\n"
" If \"number\" is omitted, the first track is default.\n"
" \"number\" is an index into the subtitle list\n"
" specified with '--subtitle'.\n"
" -N, --native-language Specifiy your language preference. When the first\n"
" audio track does not match your native language then\n"
" select the first subtitle that does. When used in\n"
" conjunction with --native-dub the audio track is\n"
" changed in preference to subtitles. Provide the\n"
" language's iso639-2 code (fre, eng, spa, dut, et cetera)\n"
" --native-dub Used in conjunction with --native-language\n"
" requests that if no audio tracks are selected the\n"
" default selected audio track will be the first one\n"
" that matches the --native-language. If there are no\n"
" matching audio tracks then the first matching\n"
" subtitle track is used instead.\n"
" --srt-file SubRip SRT filename(s), separated by commas.\n"
" --srt-codeset Character codeset(s) that the SRT file(s) are\n"
" encoded in, separated by commas.\n"
" Use 'iconv -l' for a list of valid\n"
" codesets. If not specified, 'latin1' is assumed\n"
" --srt-offset Offset (in milliseconds) to apply to the SRT file(s),\n"
" separated by commas. If not specified, zero is assumed.\n"
" Offsets may be negative.\n"
" --srt-lang Language as an iso639-2 code fra, eng, spa et cetera)\n"
" for the SRT file(s), separated by commas. If not specified,\n"
" then 'und' is used.\n"
" --srt-default Flag the selected srt as the default subtitle\n"
" to be displayed upon playback. Setting no default\n"
" means no subtitle will be automatically displayed\n"
" If \"number\" is omitted, the first srt is default.\n"
" \"number\" is an 1 based index into the srt-file list\n"
" --srt-burn \"Burn\" the selected srt subtitle into the video track\n"
"