instruction
stringclasses 1
value | input
stringlengths 90
114k
| output
stringlengths 16
114k
| __index_level_0__
int64 165k
175k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ext4_xattr_put_super(struct super_block *sb)
{
mb_cache_shrink(sb->s_bdev);
}
Commit Message: ext4: convert to mbcache2
The conversion is generally straightforward. The only tricky part is
that xattr block corresponding to found mbcache entry can get freed
before we get buffer lock for that block. So we have to check whether
the entry is still valid after getting buffer lock.
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
CWE ID: CWE-19 | ext4_xattr_put_super(struct super_block *sb)
| 169,995 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void fe_netjoin_deinit(void)
{
while (joinservers != NULL)
netjoin_server_remove(joinservers->data);
if (join_tag != -1) {
g_source_remove(join_tag);
signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting);
}
signal_remove("setup changed", (SIGNAL_FUNC) read_settings);
signal_remove("message quit", (SIGNAL_FUNC) msg_quit);
signal_remove("message join", (SIGNAL_FUNC) msg_join);
signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode);
}
Commit Message: Merge branch 'netjoin-timeout' into 'master'
fe-netjoin: remove irc servers on "server disconnected" signal
Closes #7
See merge request !10
CWE ID: CWE-416 | void fe_netjoin_deinit(void)
{
while (joinservers != NULL)
netjoin_server_remove(joinservers->data);
if (join_tag != -1) {
g_source_remove(join_tag);
signal_remove("print starting", (SIGNAL_FUNC) sig_print_starting);
}
signal_remove("setup changed", (SIGNAL_FUNC) read_settings);
signal_remove("server disconnected", (SIGNAL_FUNC) sig_server_disconnected);
signal_remove("message quit", (SIGNAL_FUNC) msg_quit);
signal_remove("message join", (SIGNAL_FUNC) msg_join);
signal_remove("message irc mode", (SIGNAL_FUNC) msg_mode);
}
| 168,290 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WORD32 ih264d_create(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
ih264d_create_op_t *ps_create_op;
WORD32 ret;
ps_create_op = (ih264d_create_op_t *)pv_api_op;
ps_create_op->s_ivd_create_op_t.u4_error_code = 0;
ret = ih264d_allocate_static_bufs(&dec_hdl, pv_api_ip, pv_api_op);
/* If allocation of some buffer fails, then free buffers allocated till then */
if((IV_FAIL == ret) && (NULL != dec_hdl))
{
ih264d_free_static_bufs(dec_hdl);
ps_create_op->s_ivd_create_op_t.u4_error_code = IVD_MEM_ALLOC_FAILED;
ps_create_op->s_ivd_create_op_t.u4_error_code = 1 << IVD_FATALERROR;
return IV_FAIL;
}
return IV_SUCCESS;
}
Commit Message: Decoder: Handle dec_hdl memory allocation failure gracefully
If memory allocation for dec_hdl fails, return gracefully
with an error code. All other allocation failures are
handled correctly.
Bug: 68300072
Test: ran poc before/after
Change-Id: I118ae71f4aded658441f1932bd4ede3536f5028b
(cherry picked from commit 7720b3fe3de04523da3a9ecec2b42a3748529bbd)
CWE ID: CWE-770 | WORD32 ih264d_create(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
ih264d_create_ip_t *ps_create_ip;
ih264d_create_op_t *ps_create_op;
WORD32 ret;
ps_create_ip = (ih264d_create_ip_t *)pv_api_ip;
ps_create_op = (ih264d_create_op_t *)pv_api_op;
ps_create_op->s_ivd_create_op_t.u4_error_code = 0;
dec_hdl = NULL;
ret = ih264d_allocate_static_bufs(&dec_hdl, pv_api_ip, pv_api_op);
/* If allocation of some buffer fails, then free buffers allocated till then */
if(IV_FAIL == ret)
{
if(dec_hdl)
{
if(dec_hdl->pv_codec_handle)
{
ih264d_free_static_bufs(dec_hdl);
}
else
{
void (*pf_aligned_free)(void *pv_mem_ctxt, void *pv_buf);
void *pv_mem_ctxt;
pf_aligned_free = ps_create_ip->s_ivd_create_ip_t.pf_aligned_free;
pv_mem_ctxt = ps_create_ip->s_ivd_create_ip_t.pv_mem_ctxt;
pf_aligned_free(pv_mem_ctxt, dec_hdl);
}
}
ps_create_op->s_ivd_create_op_t.u4_error_code = IVD_MEM_ALLOC_FAILED;
ps_create_op->s_ivd_create_op_t.u4_error_code = 1 << IVD_FATALERROR;
return IV_FAIL;
}
return IV_SUCCESS;
}
| 174,112 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: main( int argc,
char* argv[] )
{
int old_ptsize, orig_ptsize, file;
int first_glyph = 0;
int XisSetup = 0;
char* execname;
int option;
int file_loaded;
grEvent event;
execname = ft_basename( argv[0] );
while ( 1 )
{
option = getopt( argc, argv, "d:e:f:r:" );
if ( option == -1 )
break;
switch ( option )
{
case 'd':
parse_design_coords( optarg );
break;
case 'e':
encoding = (FT_Encoding)make_tag( optarg );
break;
case 'f':
first_glyph = atoi( optarg );
break;
case 'r':
res = atoi( optarg );
if ( res < 1 )
usage( execname );
break;
default:
usage( execname );
break;
}
}
argc -= optind;
argv += optind;
if ( argc <= 1 )
usage( execname );
if ( sscanf( argv[0], "%d", &orig_ptsize ) != 1 )
orig_ptsize = 64;
file = 1;
/* Initialize engine */
error = FT_Init_FreeType( &library );
if ( error )
PanicZ( "Could not initialize FreeType library" );
NewFile:
ptsize = orig_ptsize;
hinted = 1;
file_loaded = 0;
/* Load face */
error = FT_New_Face( library, argv[file], 0, &face );
if ( error )
goto Display_Font;
if ( encoding != FT_ENCODING_NONE )
{
error = FT_Select_Charmap( face, encoding );
if ( error )
goto Display_Font;
}
/* retrieve multiple master information */
error = FT_Get_MM_Var( face, &multimaster );
if ( error )
goto Display_Font;
/* if the user specified a position, use it, otherwise */
/* set the current position to the median of each axis */
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
design_pos[n] = n < requested_cnt ? requested_pos[n]
: multimaster->axis[n].def;
if ( design_pos[n] < multimaster->axis[n].minimum )
design_pos[n] = multimaster->axis[n].minimum;
else if ( design_pos[n] > multimaster->axis[n].maximum )
design_pos[n] = multimaster->axis[n].maximum;
}
}
error = FT_Set_Var_Design_Coordinates( face,
multimaster->num_axis,
design_pos );
if ( error )
goto Display_Font;
file_loaded++;
Reset_Scale( ptsize );
num_glyphs = face->num_glyphs;
glyph = face->glyph;
size = face->size;
Display_Font:
/* initialize graphics if needed */
if ( !XisSetup )
{
XisSetup = 1;
Init_Display();
}
grSetTitle( surface, "FreeType Glyph Viewer - press F1 for help" );
old_ptsize = ptsize;
if ( file_loaded >= 1 )
{
Fail = 0;
Num = first_glyph;
if ( Num >= num_glyphs )
Num = num_glyphs - 1;
if ( Num < 0 )
Num = 0;
}
for ( ;; )
{
int key;
Clear_Display();
if ( file_loaded >= 1 )
{
switch ( render_mode )
{
case 0:
Render_Text( Num );
break;
default:
Render_All( Num, ptsize );
}
sprintf( Header, "%s %s (file %s)",
face->family_name,
face->style_name,
ft_basename( argv[file] ) );
if ( !new_header )
new_header = Header;
grWriteCellString( &bit, 0, 0, new_header, fore_color );
new_header = 0;
sprintf( Header, "axis: " );
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
char temp[32];
sprintf( temp, " %s:%g",
multimaster->axis[n].name,
design_pos[n]/65536. );
strcat( Header, temp );
}
}
grWriteCellString( &bit, 0, 16, Header, fore_color );
sprintf( Header, "at %d points, first glyph = %d",
ptsize,
Num );
}
else
{
sprintf( Header, "%s: not an MM font file, or could not be opened",
ft_basename( argv[file] ) );
}
grWriteCellString( &bit, 0, 8, Header, fore_color );
grRefreshSurface( surface );
grListenSurface( surface, 0, &event );
if ( !( key = Process_Event( &event ) ) )
goto End;
if ( key == 'n' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file < argc - 1 )
file++;
goto NewFile;
}
if ( key == 'p' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file > 1 )
file--;
goto NewFile;
}
if ( ptsize != old_ptsize )
{
Reset_Scale( ptsize );
old_ptsize = ptsize;
}
}
End:
grDoneSurface( surface );
grDoneDevices();
free ( multimaster );
FT_Done_Face ( face );
FT_Done_FreeType( library );
printf( "Execution completed successfully.\n" );
printf( "Fails = %d\n", Fail );
exit( 0 ); /* for safety reasons */
return 0; /* never reached */
}
Commit Message:
CWE ID: CWE-119 | main( int argc,
char* argv[] )
{
int old_ptsize, orig_ptsize, file;
int first_glyph = 0;
int XisSetup = 0;
char* execname;
int option;
int file_loaded;
grEvent event;
execname = ft_basename( argv[0] );
while ( 1 )
{
option = getopt( argc, argv, "d:e:f:r:" );
if ( option == -1 )
break;
switch ( option )
{
case 'd':
parse_design_coords( optarg );
break;
case 'e':
encoding = (FT_Encoding)make_tag( optarg );
break;
case 'f':
first_glyph = atoi( optarg );
break;
case 'r':
res = atoi( optarg );
if ( res < 1 )
usage( execname );
break;
default:
usage( execname );
break;
}
}
argc -= optind;
argv += optind;
if ( argc <= 1 )
usage( execname );
if ( sscanf( argv[0], "%d", &orig_ptsize ) != 1 )
orig_ptsize = 64;
file = 1;
/* Initialize engine */
error = FT_Init_FreeType( &library );
if ( error )
PanicZ( "Could not initialize FreeType library" );
NewFile:
ptsize = orig_ptsize;
hinted = 1;
file_loaded = 0;
/* Load face */
error = FT_New_Face( library, argv[file], 0, &face );
if ( error )
goto Display_Font;
if ( encoding != FT_ENCODING_NONE )
{
error = FT_Select_Charmap( face, encoding );
if ( error )
goto Display_Font;
}
/* retrieve multiple master information */
error = FT_Get_MM_Var( face, &multimaster );
if ( error )
goto Display_Font;
/* if the user specified a position, use it, otherwise */
/* set the current position to the median of each axis */
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
design_pos[n] = n < requested_cnt ? requested_pos[n]
: multimaster->axis[n].def;
if ( design_pos[n] < multimaster->axis[n].minimum )
design_pos[n] = multimaster->axis[n].minimum;
else if ( design_pos[n] > multimaster->axis[n].maximum )
design_pos[n] = multimaster->axis[n].maximum;
}
}
error = FT_Set_Var_Design_Coordinates( face,
multimaster->num_axis,
design_pos );
if ( error )
goto Display_Font;
file_loaded++;
Reset_Scale( ptsize );
num_glyphs = face->num_glyphs;
glyph = face->glyph;
size = face->size;
Display_Font:
/* initialize graphics if needed */
if ( !XisSetup )
{
XisSetup = 1;
Init_Display();
}
grSetTitle( surface, "FreeType Glyph Viewer - press F1 for help" );
old_ptsize = ptsize;
if ( file_loaded >= 1 )
{
Fail = 0;
Num = first_glyph;
if ( Num >= num_glyphs )
Num = num_glyphs - 1;
if ( Num < 0 )
Num = 0;
}
for ( ;; )
{
int key;
Clear_Display();
if ( file_loaded >= 1 )
{
switch ( render_mode )
{
case 0:
Render_Text( Num );
break;
default:
Render_All( Num, ptsize );
}
sprintf( Header, "%.50s %.50s (file %.100s)",
face->family_name,
face->style_name,
ft_basename( argv[file] ) );
if ( !new_header )
new_header = Header;
grWriteCellString( &bit, 0, 0, new_header, fore_color );
new_header = 0;
sprintf( Header, "axis: " );
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
char temp[32];
sprintf( temp, " %s:%g",
multimaster->axis[n].name,
design_pos[n]/65536. );
strcat( Header, temp );
}
}
grWriteCellString( &bit, 0, 16, Header, fore_color );
sprintf( Header, "at %d points, first glyph = %d",
ptsize,
Num );
}
else
{
sprintf( Header, "%.100s: not an MM font file, or could not be opened",
ft_basename( argv[file] ) );
}
grWriteCellString( &bit, 0, 8, Header, fore_color );
grRefreshSurface( surface );
grListenSurface( surface, 0, &event );
if ( !( key = Process_Event( &event ) ) )
goto End;
if ( key == 'n' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file < argc - 1 )
file++;
goto NewFile;
}
if ( key == 'p' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file > 1 )
file--;
goto NewFile;
}
if ( ptsize != old_ptsize )
{
Reset_Scale( ptsize );
old_ptsize = ptsize;
}
}
End:
grDoneSurface( surface );
grDoneDevices();
free ( multimaster );
FT_Done_Face ( face );
FT_Done_FreeType( library );
printf( "Execution completed successfully.\n" );
printf( "Fails = %d\n", Fail );
exit( 0 ); /* for safety reasons */
return 0; /* never reached */
}
| 164,999 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode)
{
int do_rf64 = 0, write_junk = 1;
ChunkHeader ds64hdr, datahdr, fmthdr;
RiffChunkHeader riffhdr;
DS64Chunk ds64_chunk;
JunkChunk junkchunk;
WaveHeader wavhdr;
uint32_t bcount;
int64_t total_data_bytes, total_riff_bytes;
int num_channels = WavpackGetNumChannels (wpc);
int32_t channel_mask = WavpackGetChannelMask (wpc);
int32_t sample_rate = WavpackGetSampleRate (wpc);
int bytes_per_sample = WavpackGetBytesPerSample (wpc);
int bits_per_sample = WavpackGetBitsPerSample (wpc);
int format = WavpackGetFloatNormExp (wpc) ? 3 : 1;
int wavhdrsize = 16;
if (format == 3 && WavpackGetFloatNormExp (wpc) != 127) {
error_line ("can't create valid RIFF wav header for non-normalized floating data!");
return FALSE;
}
if (total_samples == -1)
total_samples = 0x7ffff000 / (bytes_per_sample * num_channels);
total_data_bytes = total_samples * bytes_per_sample * num_channels;
if (total_data_bytes > 0xff000000) {
if (debug_logging_mode)
error_line ("total_data_bytes = %lld, so rf64", total_data_bytes);
write_junk = 0;
do_rf64 = 1;
}
else if (debug_logging_mode)
error_line ("total_data_bytes = %lld, so riff", total_data_bytes);
CLEAR (wavhdr);
wavhdr.FormatTag = format;
wavhdr.NumChannels = num_channels;
wavhdr.SampleRate = sample_rate;
wavhdr.BytesPerSecond = sample_rate * num_channels * bytes_per_sample;
wavhdr.BlockAlign = bytes_per_sample * num_channels;
wavhdr.BitsPerSample = bits_per_sample;
if (num_channels > 2 || channel_mask != 0x5 - num_channels) {
wavhdrsize = sizeof (wavhdr);
wavhdr.cbSize = 22;
wavhdr.ValidBitsPerSample = bits_per_sample;
wavhdr.SubFormat = format;
wavhdr.ChannelMask = channel_mask;
wavhdr.FormatTag = 0xfffe;
wavhdr.BitsPerSample = bytes_per_sample * 8;
wavhdr.GUID [4] = 0x10;
wavhdr.GUID [6] = 0x80;
wavhdr.GUID [9] = 0xaa;
wavhdr.GUID [11] = 0x38;
wavhdr.GUID [12] = 0x9b;
wavhdr.GUID [13] = 0x71;
}
strncpy (riffhdr.ckID, do_rf64 ? "RF64" : "RIFF", sizeof (riffhdr.ckID));
strncpy (riffhdr.formType, "WAVE", sizeof (riffhdr.formType));
total_riff_bytes = sizeof (riffhdr) + wavhdrsize + sizeof (datahdr) + ((total_data_bytes + 1) & ~(int64_t)1);
if (do_rf64) total_riff_bytes += sizeof (ds64hdr) + sizeof (ds64_chunk);
if (write_junk) total_riff_bytes += sizeof (junkchunk);
strncpy (fmthdr.ckID, "fmt ", sizeof (fmthdr.ckID));
strncpy (datahdr.ckID, "data", sizeof (datahdr.ckID));
fmthdr.ckSize = wavhdrsize;
if (write_junk) {
CLEAR (junkchunk);
strncpy (junkchunk.ckID, "junk", sizeof (junkchunk.ckID));
junkchunk.ckSize = sizeof (junkchunk) - 8;
WavpackNativeToLittleEndian (&junkchunk, ChunkHeaderFormat);
}
if (do_rf64) {
strncpy (ds64hdr.ckID, "ds64", sizeof (ds64hdr.ckID));
ds64hdr.ckSize = sizeof (ds64_chunk);
CLEAR (ds64_chunk);
ds64_chunk.riffSize64 = total_riff_bytes;
ds64_chunk.dataSize64 = total_data_bytes;
ds64_chunk.sampleCount64 = total_samples;
riffhdr.ckSize = (uint32_t) -1;
datahdr.ckSize = (uint32_t) -1;
WavpackNativeToLittleEndian (&ds64hdr, ChunkHeaderFormat);
WavpackNativeToLittleEndian (&ds64_chunk, DS64ChunkFormat);
}
else {
riffhdr.ckSize = (uint32_t) total_riff_bytes;
datahdr.ckSize = (uint32_t) total_data_bytes;
}
WavpackNativeToLittleEndian (&riffhdr, ChunkHeaderFormat);
WavpackNativeToLittleEndian (&fmthdr, ChunkHeaderFormat);
WavpackNativeToLittleEndian (&wavhdr, WaveHeaderFormat);
WavpackNativeToLittleEndian (&datahdr, ChunkHeaderFormat);
if (!DoWriteFile (outfile, &riffhdr, sizeof (riffhdr), &bcount) || bcount != sizeof (riffhdr) ||
(do_rf64 && (!DoWriteFile (outfile, &ds64hdr, sizeof (ds64hdr), &bcount) || bcount != sizeof (ds64hdr))) ||
(do_rf64 && (!DoWriteFile (outfile, &ds64_chunk, sizeof (ds64_chunk), &bcount) || bcount != sizeof (ds64_chunk))) ||
(write_junk && (!DoWriteFile (outfile, &junkchunk, sizeof (junkchunk), &bcount) || bcount != sizeof (junkchunk))) ||
!DoWriteFile (outfile, &fmthdr, sizeof (fmthdr), &bcount) || bcount != sizeof (fmthdr) ||
!DoWriteFile (outfile, &wavhdr, wavhdrsize, &bcount) || bcount != wavhdrsize ||
!DoWriteFile (outfile, &datahdr, sizeof (datahdr), &bcount) || bcount != sizeof (datahdr)) {
error_line ("can't write .WAV data, disk probably full!");
return FALSE;
}
return TRUE;
}
Commit Message: issue #27, do not overwrite stack on corrupt RF64 file
CWE ID: CWE-119 | int WriteRiffHeader (FILE *outfile, WavpackContext *wpc, int64_t total_samples, int qmode)
{
int do_rf64 = 0, write_junk = 1, table_length = 0;
ChunkHeader ds64hdr, datahdr, fmthdr;
RiffChunkHeader riffhdr;
DS64Chunk ds64_chunk;
CS64Chunk cs64_chunk;
JunkChunk junkchunk;
WaveHeader wavhdr;
uint32_t bcount;
int64_t total_data_bytes, total_riff_bytes;
int num_channels = WavpackGetNumChannels (wpc);
int32_t channel_mask = WavpackGetChannelMask (wpc);
int32_t sample_rate = WavpackGetSampleRate (wpc);
int bytes_per_sample = WavpackGetBytesPerSample (wpc);
int bits_per_sample = WavpackGetBitsPerSample (wpc);
int format = WavpackGetFloatNormExp (wpc) ? 3 : 1;
int wavhdrsize = 16;
if (format == 3 && WavpackGetFloatNormExp (wpc) != 127) {
error_line ("can't create valid RIFF wav header for non-normalized floating data!");
return FALSE;
}
if (total_samples == -1)
total_samples = 0x7ffff000 / (bytes_per_sample * num_channels);
total_data_bytes = total_samples * bytes_per_sample * num_channels;
if (total_data_bytes > 0xff000000) {
if (debug_logging_mode)
error_line ("total_data_bytes = %lld, so rf64", total_data_bytes);
write_junk = 0;
do_rf64 = 1;
}
else if (debug_logging_mode)
error_line ("total_data_bytes = %lld, so riff", total_data_bytes);
CLEAR (wavhdr);
wavhdr.FormatTag = format;
wavhdr.NumChannels = num_channels;
wavhdr.SampleRate = sample_rate;
wavhdr.BytesPerSecond = sample_rate * num_channels * bytes_per_sample;
wavhdr.BlockAlign = bytes_per_sample * num_channels;
wavhdr.BitsPerSample = bits_per_sample;
if (num_channels > 2 || channel_mask != 0x5 - num_channels) {
wavhdrsize = sizeof (wavhdr);
wavhdr.cbSize = 22;
wavhdr.ValidBitsPerSample = bits_per_sample;
wavhdr.SubFormat = format;
wavhdr.ChannelMask = channel_mask;
wavhdr.FormatTag = 0xfffe;
wavhdr.BitsPerSample = bytes_per_sample * 8;
wavhdr.GUID [4] = 0x10;
wavhdr.GUID [6] = 0x80;
wavhdr.GUID [9] = 0xaa;
wavhdr.GUID [11] = 0x38;
wavhdr.GUID [12] = 0x9b;
wavhdr.GUID [13] = 0x71;
}
strncpy (riffhdr.ckID, do_rf64 ? "RF64" : "RIFF", sizeof (riffhdr.ckID));
strncpy (riffhdr.formType, "WAVE", sizeof (riffhdr.formType));
total_riff_bytes = sizeof (riffhdr) + wavhdrsize + sizeof (datahdr) + ((total_data_bytes + 1) & ~(int64_t)1);
if (do_rf64) total_riff_bytes += sizeof (ds64hdr) + sizeof (ds64_chunk);
total_riff_bytes += table_length * sizeof (CS64Chunk);
if (write_junk) total_riff_bytes += sizeof (junkchunk);
strncpy (fmthdr.ckID, "fmt ", sizeof (fmthdr.ckID));
strncpy (datahdr.ckID, "data", sizeof (datahdr.ckID));
fmthdr.ckSize = wavhdrsize;
if (write_junk) {
CLEAR (junkchunk);
strncpy (junkchunk.ckID, "junk", sizeof (junkchunk.ckID));
junkchunk.ckSize = sizeof (junkchunk) - 8;
WavpackNativeToLittleEndian (&junkchunk, ChunkHeaderFormat);
}
if (do_rf64) {
strncpy (ds64hdr.ckID, "ds64", sizeof (ds64hdr.ckID));
ds64hdr.ckSize = sizeof (ds64_chunk) + (table_length * sizeof (CS64Chunk));
CLEAR (ds64_chunk);
ds64_chunk.riffSize64 = total_riff_bytes;
ds64_chunk.dataSize64 = total_data_bytes;
ds64_chunk.sampleCount64 = total_samples;
ds64_chunk.tableLength = table_length;
riffhdr.ckSize = (uint32_t) -1;
datahdr.ckSize = (uint32_t) -1;
WavpackNativeToLittleEndian (&ds64hdr, ChunkHeaderFormat);
WavpackNativeToLittleEndian (&ds64_chunk, DS64ChunkFormat);
}
else {
riffhdr.ckSize = (uint32_t) total_riff_bytes;
datahdr.ckSize = (uint32_t) total_data_bytes;
}
// this "table" is just a dummy placeholder for testing (normally not written)
if (table_length) {
strncpy (cs64_chunk.ckID, "dmmy", sizeof (cs64_chunk.ckID));
cs64_chunk.chunkSize64 = 12345678;
WavpackNativeToLittleEndian (&cs64_chunk, CS64ChunkFormat);
}
WavpackNativeToLittleEndian (&riffhdr, ChunkHeaderFormat);
WavpackNativeToLittleEndian (&fmthdr, ChunkHeaderFormat);
WavpackNativeToLittleEndian (&wavhdr, WaveHeaderFormat);
WavpackNativeToLittleEndian (&datahdr, ChunkHeaderFormat);
if (!DoWriteFile (outfile, &riffhdr, sizeof (riffhdr), &bcount) || bcount != sizeof (riffhdr) ||
(do_rf64 && (!DoWriteFile (outfile, &ds64hdr, sizeof (ds64hdr), &bcount) || bcount != sizeof (ds64hdr))) ||
(do_rf64 && (!DoWriteFile (outfile, &ds64_chunk, sizeof (ds64_chunk), &bcount) || bcount != sizeof (ds64_chunk)))) {
error_line ("can't write .WAV data, disk probably full!");
return FALSE;
}
// again, this is normally not written except for testing
while (table_length--)
if (!DoWriteFile (outfile, &cs64_chunk, sizeof (cs64_chunk), &bcount) || bcount != sizeof (cs64_chunk)) {
error_line ("can't write .WAV data, disk probably full!");
return FALSE;
}
if ((write_junk && (!DoWriteFile (outfile, &junkchunk, sizeof (junkchunk), &bcount) || bcount != sizeof (junkchunk))) ||
!DoWriteFile (outfile, &fmthdr, sizeof (fmthdr), &bcount) || bcount != sizeof (fmthdr) ||
!DoWriteFile (outfile, &wavhdr, wavhdrsize, &bcount) || bcount != wavhdrsize ||
!DoWriteFile (outfile, &datahdr, sizeof (datahdr), &bcount) || bcount != sizeof (datahdr)) {
error_line ("can't write .WAV data, disk probably full!");
return FALSE;
}
return TRUE;
}
| 169,335 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: INLINE void impeg2d_bit_stream_flush(void* pv_ctxt, UWORD32 u4_no_of_bits)
{
stream_t *ps_stream = (stream_t *)pv_ctxt;
if (ps_stream->u4_offset < ps_stream->u4_max_offset)
{
FLUSH_BITS(ps_stream->u4_offset,ps_stream->u4_buf,ps_stream->u4_buf_nxt,u4_no_of_bits,ps_stream->pu4_buf_aligned)
}
return;
}
Commit Message: Fixed out of bound read in flush_bits
Bug: 28168413
Change-Id: I3db5432a08daf665e160c0dab2d1928a576418b4
CWE ID: CWE-200 | INLINE void impeg2d_bit_stream_flush(void* pv_ctxt, UWORD32 u4_no_of_bits)
{
stream_t *ps_stream = (stream_t *)pv_ctxt;
if ((ps_stream->u4_offset + 64) < ps_stream->u4_max_offset)
{
FLUSH_BITS(ps_stream->u4_offset,ps_stream->u4_buf,ps_stream->u4_buf_nxt,u4_no_of_bits,ps_stream->pu4_buf_aligned)
}
else
{
UWORD32 u4_temp;
if (((ps_stream->u4_offset & 0x1f) + u4_no_of_bits) >= 32)
{
ps_stream->u4_buf = ps_stream->u4_buf_nxt;
ps_stream->u4_buf_nxt = 0;
}
ps_stream->u4_offset += u4_no_of_bits;
}
return;
}
| 173,549 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: log2vis_encoded_string (PyObject * string, const char *encoding,
FriBidiParType base_direction, int clean, int reordernsm)
{
PyObject *logical = NULL; /* logical unicode object */
PyObject *result = NULL; /* output string object */
/* Always needed for the string length */
logical = PyUnicode_Decode (PyString_AS_STRING (string),
PyString_GET_SIZE (string),
encoding, "strict");
if (logical == NULL)
return NULL;
if (strcmp (encoding, "utf-8") == 0)
/* Shortcut for utf8 strings (little faster) */
result = log2vis_utf8 (string,
PyUnicode_GET_SIZE (logical),
base_direction, clean, reordernsm);
else
{
/* Invoke log2vis_unicode and encode back to encoding */
PyObject *visual = log2vis_unicode (logical, base_direction, clean, reordernsm);
if (visual)
{
result = PyUnicode_Encode (PyUnicode_AS_UNICODE
(visual),
PyUnicode_GET_SIZE (visual),
encoding, "strict");
Py_DECREF (visual);
}
}
Py_DECREF (logical);
return result;
}
Commit Message: refactor pyfribidi.c module
pyfribidi.c is now compiled as _pyfribidi. This module only handles
unicode internally and doesn't use the fribidi_utf8_to_unicode
function (which can't handle 4 byte utf-8 sequences). This fixes the
buffer overflow in issue #2.
The code is now also much simpler: pyfribidi.c is down from 280 to 130
lines of code.
We now ship a pure python pyfribidi that handles the case when
non-unicode strings are passed in.
We now also adapt the size of the output string if clean=True is
passed.
CWE ID: CWE-119 | log2vis_encoded_string (PyObject * string, const char *encoding,
| 165,640 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ossl_cipher_set_key(VALUE self, VALUE key)
{
EVP_CIPHER_CTX *ctx;
int key_len;
StringValue(key);
GetCipher(self, ctx);
key_len = EVP_CIPHER_CTX_key_length(ctx);
if (RSTRING_LEN(key) != key_len)
ossl_raise(rb_eArgError, "key must be %d bytes", key_len);
if (EVP_CipherInit_ex(ctx, NULL, NULL, (unsigned char *)RSTRING_PTR(key), NULL, -1) != 1)
ossl_raise(eCipherError, NULL);
return key;
}
Commit Message: cipher: don't set dummy encryption key in Cipher#initialize
Remove the encryption key initialization from Cipher#initialize. This
is effectively a revert of r32723 ("Avoid possible SEGV from AES
encryption/decryption", 2011-07-28).
r32723, which added the key initialization, was a workaround for
Ruby Bug #2768. For some certain ciphers, calling EVP_CipherUpdate()
before setting an encryption key caused segfault. It was not a problem
until OpenSSL implemented GCM mode - the encryption key could be
overridden by repeated calls of EVP_CipherInit_ex(). But, it is not the
case for AES-GCM ciphers. Setting a key, an IV, a key, in this order
causes the IV to be reset to an all-zero IV.
The problem of Bug #2768 persists on the current versions of OpenSSL.
So, make Cipher#update raise an exception if a key is not yet set by the
user. Since encrypting or decrypting without key does not make any
sense, this should not break existing applications.
Users can still call Cipher#key= and Cipher#iv= multiple times with
their own responsibility.
Reference: https://bugs.ruby-lang.org/issues/2768
Reference: https://bugs.ruby-lang.org/issues/8221
Reference: https://github.com/ruby/openssl/issues/49
CWE ID: CWE-310 | ossl_cipher_set_key(VALUE self, VALUE key)
{
EVP_CIPHER_CTX *ctx;
int key_len;
StringValue(key);
GetCipher(self, ctx);
key_len = EVP_CIPHER_CTX_key_length(ctx);
if (RSTRING_LEN(key) != key_len)
ossl_raise(rb_eArgError, "key must be %d bytes", key_len);
if (EVP_CipherInit_ex(ctx, NULL, NULL, (unsigned char *)RSTRING_PTR(key), NULL, -1) != 1)
ossl_raise(eCipherError, NULL);
rb_ivar_set(self, id_key_set, Qtrue);
return key;
}
| 168,782 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void traverse_commit_list(struct rev_info *revs,
show_commit_fn show_commit,
show_object_fn show_object,
void *data)
{
int i;
struct commit *commit;
struct strbuf base;
strbuf_init(&base, PATH_MAX);
while ((commit = get_revision(revs)) != NULL) {
/*
* an uninteresting boundary commit may not have its tree
* parsed yet, but we are not going to show them anyway
*/
if (commit->tree)
add_pending_tree(revs, commit->tree);
show_commit(commit, data);
}
for (i = 0; i < revs->pending.nr; i++) {
struct object_array_entry *pending = revs->pending.objects + i;
struct object *obj = pending->item;
const char *name = pending->name;
const char *path = pending->path;
if (obj->flags & (UNINTERESTING | SEEN))
continue;
if (obj->type == OBJ_TAG) {
obj->flags |= SEEN;
show_object(obj, NULL, name, data);
continue;
}
if (!path)
path = "";
if (obj->type == OBJ_TREE) {
process_tree(revs, (struct tree *)obj, show_object,
&base, path, data);
continue;
}
if (obj->type == OBJ_BLOB) {
process_blob(revs, (struct blob *)obj, show_object,
NULL, path, data);
continue;
}
die("unknown pending object %s (%s)",
oid_to_hex(&obj->oid), name);
}
object_array_clear(&revs->pending);
strbuf_release(&base);
}
Commit Message: list-objects: pass full pathname to callbacks
When we find a blob at "a/b/c", we currently pass this to
our show_object_fn callbacks as two components: "a/b/" and
"c". Callbacks which want the full value then call
path_name(), which concatenates the two. But this is an
inefficient interface; the path is a strbuf, and we could
simply append "c" to it temporarily, then roll back the
length, without creating a new copy.
So we could improve this by teaching the callsites of
path_name() this trick (and there are only 3). But we can
also notice that no callback actually cares about the
broken-down representation, and simply pass each callback
the full path "a/b/c" as a string. The callback code becomes
even simpler, then, as we do not have to worry about freeing
an allocated buffer, nor rolling back our modification to
the strbuf.
This is theoretically less efficient, as some callbacks
would not bother to format the final path component. But in
practice this is not measurable. Since we use the same
strbuf over and over, our work to grow it is amortized, and
we really only pay to memcpy a few bytes.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
CWE ID: CWE-119 | void traverse_commit_list(struct rev_info *revs,
show_commit_fn show_commit,
show_object_fn show_object,
void *data)
{
int i;
struct commit *commit;
struct strbuf base;
strbuf_init(&base, PATH_MAX);
while ((commit = get_revision(revs)) != NULL) {
/*
* an uninteresting boundary commit may not have its tree
* parsed yet, but we are not going to show them anyway
*/
if (commit->tree)
add_pending_tree(revs, commit->tree);
show_commit(commit, data);
}
for (i = 0; i < revs->pending.nr; i++) {
struct object_array_entry *pending = revs->pending.objects + i;
struct object *obj = pending->item;
const char *name = pending->name;
const char *path = pending->path;
if (obj->flags & (UNINTERESTING | SEEN))
continue;
if (obj->type == OBJ_TAG) {
obj->flags |= SEEN;
show_object(obj, name, data);
continue;
}
if (!path)
path = "";
if (obj->type == OBJ_TREE) {
process_tree(revs, (struct tree *)obj, show_object,
&base, path, data);
continue;
}
if (obj->type == OBJ_BLOB) {
process_blob(revs, (struct blob *)obj, show_object,
&base, path, data);
continue;
}
die("unknown pending object %s (%s)",
oid_to_hex(&obj->oid), name);
}
object_array_clear(&revs->pending);
strbuf_release(&base);
}
| 167,420 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: StyleResolver::StyleResolver(Document& document)
: m_document(document)
, m_fontSelector(CSSFontSelector::create(&document))
, m_viewportStyleResolver(ViewportStyleResolver::create(&document))
, m_styleResourceLoader(document.fetcher())
, m_styleResolverStatsSequence(0)
, m_accessCount(0)
{
Element* root = document.documentElement();
m_fontSelector->registerForInvalidationCallbacks(this);
CSSDefaultStyleSheets::initDefaultStyle(root);
FrameView* view = document.view();
if (view)
m_medium = adoptPtr(new MediaQueryEvaluator(view->mediaType()));
else
m_medium = adoptPtr(new MediaQueryEvaluator("all"));
if (root)
m_rootDefaultStyle = styleForElement(root, 0, DisallowStyleSharing, MatchOnlyUserAgentRules);
if (m_rootDefaultStyle && view)
m_medium = adoptPtr(new MediaQueryEvaluator(view->mediaType(), &view->frame(), m_rootDefaultStyle.get()));
m_styleTree.clear();
initWatchedSelectorRules(CSSSelectorWatch::from(document).watchedCallbackSelectors());
#if ENABLE(SVG_FONTS)
if (document.svgExtensions()) {
const HashSet<SVGFontFaceElement*>& svgFontFaceElements = document.svgExtensions()->svgFontFaceElements();
HashSet<SVGFontFaceElement*>::const_iterator end = svgFontFaceElements.end();
for (HashSet<SVGFontFaceElement*>::const_iterator it = svgFontFaceElements.begin(); it != end; ++it)
fontSelector()->addFontFaceRule((*it)->fontFaceRule());
}
#endif
document.styleEngine()->appendActiveAuthorStyleSheets(this);
}
Commit Message: Remove the Simple Default Stylesheet, it's just a foot-gun.
We've been bitten by the Simple Default Stylesheet being out
of sync with the real html.css twice this week.
The Simple Default Stylesheet was invented years ago for Mac:
http://trac.webkit.org/changeset/36135
It nicely handles the case where you just want to create
a single WebView and parse some simple HTML either without
styling said HTML, or only to display a small string, etc.
Note that this optimization/complexity *only* helps for the
very first document, since the default stylesheets are
all static (process-global) variables. Since any real page
on the internet uses a tag not covered by the simple default
stylesheet, not real load benefits from this optimization.
Only uses of WebView which were just rendering small bits
of text might have benefited from this. about:blank would
also have used this sheet.
This was a common application for some uses of WebView back
in those days. These days, even with WebView on Android,
there are likely much larger overheads than parsing the
html.css stylesheet, so making it required seems like the
right tradeoff of code-simplicity for this case.
BUG=319556
Review URL: https://codereview.chromium.org/73723005
git-svn-id: svn://svn.chromium.org/blink/trunk@162153 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | StyleResolver::StyleResolver(Document& document)
: m_document(document)
, m_fontSelector(CSSFontSelector::create(&document))
, m_viewportStyleResolver(ViewportStyleResolver::create(&document))
, m_styleResourceLoader(document.fetcher())
, m_styleResolverStatsSequence(0)
, m_accessCount(0)
{
m_fontSelector->registerForInvalidationCallbacks(this);
// FIXME: Why do this here instead of as part of resolving style on the root?
CSSDefaultStyleSheets::loadDefaultStylesheetIfNecessary();
// Construct document root element default style. This is needed
// This is here instead of constructor because when constructor is run,
// Document doesn't have documentElement.
// NOTE: This assumes that element that gets passed to the styleForElement call
// is always from the document that owns the StyleResolver.
FrameView* view = document.view();
if (view)
m_medium = adoptPtr(new MediaQueryEvaluator(view->mediaType()));
else
m_medium = adoptPtr(new MediaQueryEvaluator("all"));
Element* root = document.documentElement();
if (root)
m_rootDefaultStyle = styleForElement(root, 0, DisallowStyleSharing, MatchOnlyUserAgentRules);
if (m_rootDefaultStyle && view)
m_medium = adoptPtr(new MediaQueryEvaluator(view->mediaType(), &view->frame(), m_rootDefaultStyle.get()));
m_styleTree.clear();
initWatchedSelectorRules(CSSSelectorWatch::from(document).watchedCallbackSelectors());
#if ENABLE(SVG_FONTS)
if (document.svgExtensions()) {
const HashSet<SVGFontFaceElement*>& svgFontFaceElements = document.svgExtensions()->svgFontFaceElements();
HashSet<SVGFontFaceElement*>::const_iterator end = svgFontFaceElements.end();
for (HashSet<SVGFontFaceElement*>::const_iterator it = svgFontFaceElements.begin(); it != end; ++it)
fontSelector()->addFontFaceRule((*it)->fontFaceRule());
}
#endif
document.styleEngine()->appendActiveAuthorStyleSheets(this);
}
| 171,584 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: xmlParseCharData(xmlParserCtxtPtr ctxt, int cdata) {
const xmlChar *in;
int nbchar = 0;
int line = ctxt->input->line;
int col = ctxt->input->col;
int ccol;
SHRINK;
GROW;
/*
* Accelerated common case where input don't need to be
* modified before passing it to the handler.
*/
if (!cdata) {
in = ctxt->input->cur;
do {
get_more_space:
while (*in == 0x20) { in++; ctxt->input->col++; }
if (*in == 0xA) {
do {
ctxt->input->line++; ctxt->input->col = 1;
in++;
} while (*in == 0xA);
goto get_more_space;
}
if (*in == '<') {
nbchar = in - ctxt->input->cur;
if (nbchar > 0) {
const xmlChar *tmp = ctxt->input->cur;
ctxt->input->cur = in;
if ((ctxt->sax != NULL) &&
(ctxt->sax->ignorableWhitespace !=
ctxt->sax->characters)) {
if (areBlanks(ctxt, tmp, nbchar, 1)) {
if (ctxt->sax->ignorableWhitespace != NULL)
ctxt->sax->ignorableWhitespace(ctxt->userData,
tmp, nbchar);
} else {
if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData,
tmp, nbchar);
if (*ctxt->space == -1)
*ctxt->space = -2;
}
} else if ((ctxt->sax != NULL) &&
(ctxt->sax->characters != NULL)) {
ctxt->sax->characters(ctxt->userData,
tmp, nbchar);
}
}
return;
}
get_more:
ccol = ctxt->input->col;
while (test_char_data[*in]) {
in++;
ccol++;
}
ctxt->input->col = ccol;
if (*in == 0xA) {
do {
ctxt->input->line++; ctxt->input->col = 1;
in++;
} while (*in == 0xA);
goto get_more;
}
if (*in == ']') {
if ((in[1] == ']') && (in[2] == '>')) {
xmlFatalErr(ctxt, XML_ERR_MISPLACED_CDATA_END, NULL);
ctxt->input->cur = in;
return;
}
in++;
ctxt->input->col++;
goto get_more;
}
nbchar = in - ctxt->input->cur;
if (nbchar > 0) {
if ((ctxt->sax != NULL) &&
(ctxt->sax->ignorableWhitespace !=
ctxt->sax->characters) &&
(IS_BLANK_CH(*ctxt->input->cur))) {
const xmlChar *tmp = ctxt->input->cur;
ctxt->input->cur = in;
if (areBlanks(ctxt, tmp, nbchar, 0)) {
if (ctxt->sax->ignorableWhitespace != NULL)
ctxt->sax->ignorableWhitespace(ctxt->userData,
tmp, nbchar);
} else {
if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData,
tmp, nbchar);
if (*ctxt->space == -1)
*ctxt->space = -2;
}
line = ctxt->input->line;
col = ctxt->input->col;
} else if (ctxt->sax != NULL) {
if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData,
ctxt->input->cur, nbchar);
line = ctxt->input->line;
col = ctxt->input->col;
}
/* something really bad happened in the SAX callback */
if (ctxt->instate != XML_PARSER_CONTENT)
return;
}
ctxt->input->cur = in;
if (*in == 0xD) {
in++;
if (*in == 0xA) {
ctxt->input->cur = in;
in++;
ctxt->input->line++; ctxt->input->col = 1;
continue; /* while */
}
in--;
}
if (*in == '<') {
return;
}
if (*in == '&') {
return;
}
SHRINK;
GROW;
in = ctxt->input->cur;
} while (((*in >= 0x20) && (*in <= 0x7F)) || (*in == 0x09));
nbchar = 0;
}
ctxt->input->line = line;
ctxt->input->col = col;
xmlParseCharDataComplex(ctxt, cdata);
}
Commit Message: libxml: XML_PARSER_EOF checks from upstream
BUG=229019
TBR=cpu
Review URL: https://chromiumcodereview.appspot.com/14053009
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@196804 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | xmlParseCharData(xmlParserCtxtPtr ctxt, int cdata) {
const xmlChar *in;
int nbchar = 0;
int line = ctxt->input->line;
int col = ctxt->input->col;
int ccol;
SHRINK;
GROW;
/*
* Accelerated common case where input don't need to be
* modified before passing it to the handler.
*/
if (!cdata) {
in = ctxt->input->cur;
do {
get_more_space:
while (*in == 0x20) { in++; ctxt->input->col++; }
if (*in == 0xA) {
do {
ctxt->input->line++; ctxt->input->col = 1;
in++;
} while (*in == 0xA);
goto get_more_space;
}
if (*in == '<') {
nbchar = in - ctxt->input->cur;
if (nbchar > 0) {
const xmlChar *tmp = ctxt->input->cur;
ctxt->input->cur = in;
if ((ctxt->sax != NULL) &&
(ctxt->sax->ignorableWhitespace !=
ctxt->sax->characters)) {
if (areBlanks(ctxt, tmp, nbchar, 1)) {
if (ctxt->sax->ignorableWhitespace != NULL)
ctxt->sax->ignorableWhitespace(ctxt->userData,
tmp, nbchar);
} else {
if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData,
tmp, nbchar);
if (*ctxt->space == -1)
*ctxt->space = -2;
}
} else if ((ctxt->sax != NULL) &&
(ctxt->sax->characters != NULL)) {
ctxt->sax->characters(ctxt->userData,
tmp, nbchar);
}
}
return;
}
get_more:
ccol = ctxt->input->col;
while (test_char_data[*in]) {
in++;
ccol++;
}
ctxt->input->col = ccol;
if (*in == 0xA) {
do {
ctxt->input->line++; ctxt->input->col = 1;
in++;
} while (*in == 0xA);
goto get_more;
}
if (*in == ']') {
if ((in[1] == ']') && (in[2] == '>')) {
xmlFatalErr(ctxt, XML_ERR_MISPLACED_CDATA_END, NULL);
ctxt->input->cur = in;
return;
}
in++;
ctxt->input->col++;
goto get_more;
}
nbchar = in - ctxt->input->cur;
if (nbchar > 0) {
if ((ctxt->sax != NULL) &&
(ctxt->sax->ignorableWhitespace !=
ctxt->sax->characters) &&
(IS_BLANK_CH(*ctxt->input->cur))) {
const xmlChar *tmp = ctxt->input->cur;
ctxt->input->cur = in;
if (areBlanks(ctxt, tmp, nbchar, 0)) {
if (ctxt->sax->ignorableWhitespace != NULL)
ctxt->sax->ignorableWhitespace(ctxt->userData,
tmp, nbchar);
} else {
if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData,
tmp, nbchar);
if (*ctxt->space == -1)
*ctxt->space = -2;
}
line = ctxt->input->line;
col = ctxt->input->col;
} else if (ctxt->sax != NULL) {
if (ctxt->sax->characters != NULL)
ctxt->sax->characters(ctxt->userData,
ctxt->input->cur, nbchar);
line = ctxt->input->line;
col = ctxt->input->col;
}
/* something really bad happened in the SAX callback */
if (ctxt->instate != XML_PARSER_CONTENT)
return;
}
ctxt->input->cur = in;
if (*in == 0xD) {
in++;
if (*in == 0xA) {
ctxt->input->cur = in;
in++;
ctxt->input->line++; ctxt->input->col = 1;
continue; /* while */
}
in--;
}
if (*in == '<') {
return;
}
if (*in == '&') {
return;
}
SHRINK;
GROW;
if (ctxt->instate == XML_PARSER_EOF)
return;
in = ctxt->input->cur;
} while (((*in >= 0x20) && (*in <= 0x7F)) || (*in == 0x09));
nbchar = 0;
}
ctxt->input->line = line;
ctxt->input->col = col;
xmlParseCharDataComplex(ctxt, cdata);
}
| 171,274 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long ParseElementHeader(IMkvReader* pReader, long long& pos,
long long stop, long long& id,
long long& size) {
if (stop >= 0 && pos >= stop)
return E_FILE_FORMAT_INVALID;
long len;
id = ReadID(pReader, pos, len);
if (id < 0)
return E_FILE_FORMAT_INVALID;
pos += len; // consume id
if (stop >= 0 && pos >= stop)
return E_FILE_FORMAT_INVALID;
size = ReadUInt(pReader, pos, len);
if (size < 0 || len < 1 || len > 8) {
return E_FILE_FORMAT_INVALID;
}
const unsigned long long rollover_check =
static_cast<unsigned long long>(pos) + len;
if (rollover_check > LONG_LONG_MAX)
return E_FILE_FORMAT_INVALID;
pos += len; // consume length of size
if (stop >= 0 && pos >= stop)
return E_FILE_FORMAT_INVALID;
return 0; // success
}
Commit Message: Fix ParseElementHeader to support 0 payload elements
Cherry-pick'ing Change 5c83bbec9a5f6f00a349674ddad85b753d2ea219
from upstream. This fixes regression in some edge cases for mkv
playback.
BUG=26499283
Change-Id: I88de03219a3d941b6b2f251d384e29c36bdd4d9b
CWE ID: CWE-20 | long ParseElementHeader(IMkvReader* pReader, long long& pos,
long long stop, long long& id,
long long& size) {
if (stop >= 0 && pos >= stop)
return E_FILE_FORMAT_INVALID;
long len;
id = ReadID(pReader, pos, len);
if (id < 0)
return E_FILE_FORMAT_INVALID;
pos += len; // consume id
if (stop >= 0 && pos >= stop)
return E_FILE_FORMAT_INVALID;
size = ReadUInt(pReader, pos, len);
if (size < 0 || len < 1 || len > 8) {
return E_FILE_FORMAT_INVALID;
}
const unsigned long long rollover_check =
static_cast<unsigned long long>(pos) + len;
if (rollover_check > LONG_LONG_MAX)
return E_FILE_FORMAT_INVALID;
pos += len; // consume length of size
if (stop >= 0 && pos > stop)
return E_FILE_FORMAT_INVALID;
return 0; // success
}
| 174,229 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void AppControllerImpl::GetArcAndroidId(
mojom::AppController::GetArcAndroidIdCallback callback) {
arc::GetAndroidId(base::BindOnce(
[](mojom::AppController::GetArcAndroidIdCallback callback, bool success,
int64_t android_id) {
std::move(callback).Run(success, base::NumberToString(android_id));
},
std::move(callback)));
}
Commit Message: Refactor the AppController implementation into a KeyedService.
This is necessary to guarantee that the AppController will not outlive
the AppServiceProxy, which could happen before during Profile destruction.
Bug: 945427
Change-Id: I9e2089799e38d5a70a4a9aa66df5319113e7809e
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1542336
Reviewed-by: Michael Giuffrida <michaelpg@chromium.org>
Commit-Queue: Lucas Tenório <ltenorio@chromium.org>
Cr-Commit-Position: refs/heads/master@{#645122}
CWE ID: CWE-416 | void AppControllerImpl::GetArcAndroidId(
void AppControllerService::GetArcAndroidId(
mojom::AppController::GetArcAndroidIdCallback callback) {
arc::GetAndroidId(base::BindOnce(
[](mojom::AppController::GetArcAndroidIdCallback callback, bool success,
int64_t android_id) {
std::move(callback).Run(success, base::NumberToString(android_id));
},
std::move(callback)));
}
| 172,083 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring) {
if (keyring->restrict_link)
return -EPERM;
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
}
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
key->reject_error = -error;
smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
}
Commit Message: KEYS: potential uninitialized variable
If __key_link_begin() failed then "edit" would be uninitialized. I've
added a check to fix that.
This allows a random user to crash the kernel, though it's quite
difficult to achieve. There are three ways it can be done as the user
would have to cause an error to occur in __key_link():
(1) Cause the kernel to run out of memory. In practice, this is difficult
to achieve without ENOMEM cropping up elsewhere and aborting the
attempt.
(2) Revoke the destination keyring between the keyring ID being looked up
and it being tested for revocation. In practice, this is difficult to
time correctly because the KEYCTL_REJECT function can only be used
from the request-key upcall process. Further, users can only make use
of what's in /sbin/request-key.conf, though this does including a
rejection debugging test - which means that the destination keyring
has to be the caller's session keyring in practice.
(3) Have just enough key quota available to create a key, a new session
keyring for the upcall and a link in the session keyring, but not then
sufficient quota to create a link in the nominated destination keyring
so that it fails with EDQUOT.
The bug can be triggered using option (3) above using something like the
following:
echo 80 >/proc/sys/kernel/keys/root_maxbytes
keyctl request2 user debug:fred negate @t
The above sets the quota to something much lower (80) to make the bug
easier to trigger, but this is dependent on the system. Note also that
the name of the keyring created contains a random number that may be
between 1 and 10 characters in size, so may throw the test off by
changing the amount of quota used.
Assuming the failure occurs, something like the following will be seen:
kfree_debugcheck: out of range ptr 6b6b6b6b6b6b6b68h
------------[ cut here ]------------
kernel BUG at ../mm/slab.c:2821!
...
RIP: 0010:[<ffffffff811600f9>] kfree_debugcheck+0x20/0x25
RSP: 0018:ffff8804014a7de8 EFLAGS: 00010092
RAX: 0000000000000034 RBX: 6b6b6b6b6b6b6b68 RCX: 0000000000000000
RDX: 0000000000040001 RSI: 00000000000000f6 RDI: 0000000000000300
RBP: ffff8804014a7df0 R08: 0000000000000001 R09: 0000000000000000
R10: ffff8804014a7e68 R11: 0000000000000054 R12: 0000000000000202
R13: ffffffff81318a66 R14: 0000000000000000 R15: 0000000000000001
...
Call Trace:
kfree+0xde/0x1bc
assoc_array_cancel_edit+0x1f/0x36
__key_link_end+0x55/0x63
key_reject_and_link+0x124/0x155
keyctl_reject_key+0xb6/0xe0
keyctl_negate_key+0x10/0x12
SyS_keyctl+0x9f/0xe7
do_syscall_64+0x63/0x13a
entry_SYSCALL64_slow_path+0x25/0x25
Fixes: f70e2e06196a ('KEYS: Do preallocation for __key_link()')
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: David Howells <dhowells@redhat.com>
cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
CWE ID: | int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring) {
if (keyring->restrict_link)
return -EPERM;
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
}
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
key->reject_error = -error;
smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring && link_ret == 0)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
}
| 167,261 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: hstore_slice_to_hstore(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
HStore *out;
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
Pairs *out_pairs;
int bufsiz;
int lastidx = 0;
int i;
int out_count = 0;
if (nkeys == 0)
{
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
}
out_pairs = palloc(sizeof(Pairs) * nkeys);
bufsiz = 0;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; ++i)
{
int idx = hstoreFindKey(hs, &lastidx,
key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
{
out_pairs[out_count].key = key_pairs[i].key;
bufsiz += (out_pairs[out_count].keylen = key_pairs[i].keylen);
out_pairs[out_count].val = HS_VAL(entries, ptr, idx);
bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries, idx));
out_pairs[out_count].isnull = HS_VALISNULL(entries, idx);
out_pairs[out_count].needfree = false;
++out_count;
}
}
/*
* we don't use uniquePairs here because we know that the pairs list is
* already sorted and uniq'ed.
*/
out = hstorePairs(out_pairs, out_count, bufsiz);
PG_RETURN_POINTER(out);
}
Commit Message: Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064
CWE ID: CWE-189 | hstore_slice_to_hstore(PG_FUNCTION_ARGS)
{
HStore *hs = PG_GETARG_HS(0);
HEntry *entries = ARRPTR(hs);
char *ptr = STRPTR(hs);
ArrayType *key_array = PG_GETARG_ARRAYTYPE_P(1);
HStore *out;
int nkeys;
Pairs *key_pairs = hstoreArrayToPairs(key_array, &nkeys);
Pairs *out_pairs;
int bufsiz;
int lastidx = 0;
int i;
int out_count = 0;
if (nkeys == 0)
{
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
}
/* hstoreArrayToPairs() checked overflow */
out_pairs = palloc(sizeof(Pairs) * nkeys);
bufsiz = 0;
/*
* we exploit the fact that the pairs list is already sorted into strictly
* increasing order to narrow the hstoreFindKey search; each search can
* start one entry past the previous "found" entry, or at the lower bound
* of the last search.
*/
for (i = 0; i < nkeys; ++i)
{
int idx = hstoreFindKey(hs, &lastidx,
key_pairs[i].key, key_pairs[i].keylen);
if (idx >= 0)
{
out_pairs[out_count].key = key_pairs[i].key;
bufsiz += (out_pairs[out_count].keylen = key_pairs[i].keylen);
out_pairs[out_count].val = HS_VAL(entries, ptr, idx);
bufsiz += (out_pairs[out_count].vallen = HS_VALLEN(entries, idx));
out_pairs[out_count].isnull = HS_VALISNULL(entries, idx);
out_pairs[out_count].needfree = false;
++out_count;
}
}
/*
* we don't use uniquePairs here because we know that the pairs list is
* already sorted and uniq'ed.
*/
out = hstorePairs(out_pairs, out_count, bufsiz);
PG_RETURN_POINTER(out);
}
| 166,401 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: get_pols_2_svc(gpols_arg *arg, struct svc_req *rqstp)
{
static gpols_ret ret;
char *prime_arg;
gss_buffer_desc client_name,
service_name;
OM_uint32 minor_stat;
kadm5_server_handle_t handle;
const char *errmsg = NULL;
xdr_free(xdr_gpols_ret, &ret);
if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle)))
goto exit_func;
if ((ret.code = check_handle((void *)handle)))
goto exit_func;
ret.api_version = handle->api_version;
if (setup_gss_names(rqstp, &client_name, &service_name) < 0) {
ret.code = KADM5_FAILURE;
goto exit_func;
}
prime_arg = arg->exp;
if (prime_arg == NULL)
prime_arg = "*";
if (CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context,
rqst2name(rqstp),
ACL_LIST, NULL, NULL)) {
ret.code = KADM5_AUTH_LIST;
log_unauth("kadm5_get_policies", prime_arg,
&client_name, &service_name, rqstp);
} else {
ret.code = kadm5_get_policies((void *)handle,
arg->exp, &ret.pols,
&ret.count);
if( ret.code != 0 )
errmsg = krb5_get_error_message(handle->context, ret.code);
log_done("kadm5_get_policies", prime_arg, errmsg,
&client_name, &service_name, rqstp);
if (errmsg != NULL)
krb5_free_error_message(handle->context, errmsg);
}
gss_release_buffer(&minor_stat, &client_name);
gss_release_buffer(&minor_stat, &service_name);
exit_func:
free_server_handle(handle);
return &ret;
}
Commit Message: Fix leaks in kadmin server stubs [CVE-2015-8631]
In each kadmind server stub, initialize the client_name and
server_name variables, and release them in the cleanup handler. Many
of the stubs will otherwise leak the client and server name if
krb5_unparse_name() fails. Also make sure to free the prime_arg
variables in rename_principal_2_svc(), or we can leak the first one if
unparsing the second one fails. Discovered by Simo Sorce.
CVE-2015-8631:
In all versions of MIT krb5, an authenticated attacker can cause
kadmind to leak memory by supplying a null principal name in a request
which uses one. Repeating these requests will eventually cause
kadmind to exhaust all available memory.
CVSSv2 Vector: AV:N/AC:L/Au:S/C:N/I:N/A:C/E:POC/RL:OF/RC:C
ticket: 8343 (new)
target_version: 1.14-next
target_version: 1.13-next
tags: pullup
CWE ID: CWE-119 | get_pols_2_svc(gpols_arg *arg, struct svc_req *rqstp)
{
static gpols_ret ret;
char *prime_arg;
gss_buffer_desc client_name = GSS_C_EMPTY_BUFFER;
gss_buffer_desc service_name = GSS_C_EMPTY_BUFFER;
OM_uint32 minor_stat;
kadm5_server_handle_t handle;
const char *errmsg = NULL;
xdr_free(xdr_gpols_ret, &ret);
if ((ret.code = new_server_handle(arg->api_version, rqstp, &handle)))
goto exit_func;
if ((ret.code = check_handle((void *)handle)))
goto exit_func;
ret.api_version = handle->api_version;
if (setup_gss_names(rqstp, &client_name, &service_name) < 0) {
ret.code = KADM5_FAILURE;
goto exit_func;
}
prime_arg = arg->exp;
if (prime_arg == NULL)
prime_arg = "*";
if (CHANGEPW_SERVICE(rqstp) || !kadm5int_acl_check(handle->context,
rqst2name(rqstp),
ACL_LIST, NULL, NULL)) {
ret.code = KADM5_AUTH_LIST;
log_unauth("kadm5_get_policies", prime_arg,
&client_name, &service_name, rqstp);
} else {
ret.code = kadm5_get_policies((void *)handle,
arg->exp, &ret.pols,
&ret.count);
if( ret.code != 0 )
errmsg = krb5_get_error_message(handle->context, ret.code);
log_done("kadm5_get_policies", prime_arg, errmsg,
&client_name, &service_name, rqstp);
if (errmsg != NULL)
krb5_free_error_message(handle->context, errmsg);
}
exit_func:
gss_release_buffer(&minor_stat, &client_name);
gss_release_buffer(&minor_stat, &service_name);
free_server_handle(handle);
return &ret;
}
| 167,514 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void HTMLDocument::addItemToMap(HashCountedSet<StringImpl*>& map, const AtomicString& name)
{
if (name.isEmpty())
return;
map.add(name.impl());
if (Frame* f = frame())
f->script()->namedItemAdded(this, name);
}
Commit Message: Fix tracking of the id attribute string if it is shared across elements.
The patch to remove AtomicStringImpl:
http://src.chromium.org/viewvc/blink?view=rev&rev=154790
Exposed a lifetime issue with strings for id attributes. We simply need to use
AtomicString.
BUG=290566
Review URL: https://codereview.chromium.org/33793004
git-svn-id: svn://svn.chromium.org/blink/trunk@160250 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | void HTMLDocument::addItemToMap(HashCountedSet<StringImpl*>& map, const AtomicString& name)
void HTMLDocument::addItemToMap(HashCountedSet<AtomicString>& map, const AtomicString& name)
{
if (name.isEmpty())
return;
map.add(name);
if (Frame* f = frame())
f->script()->namedItemAdded(this, name);
}
| 171,156 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int csum_len_for_type(int cst)
{
switch (cst) {
case CSUM_NONE:
return 1;
case CSUM_ARCHAIC:
return 2;
case CSUM_MD4:
case CSUM_MD4_OLD:
case CSUM_MD4_BUSTED:
return MD4_DIGEST_LEN;
case CSUM_MD5:
return MD5_DIGEST_LEN;
}
return 0;
}
Commit Message:
CWE ID: CWE-354 | int csum_len_for_type(int cst)
{
switch (cst) {
case CSUM_NONE:
return 1;
case CSUM_MD4_ARCHAIC:
return 2;
case CSUM_MD4:
case CSUM_MD4_OLD:
case CSUM_MD4_BUSTED:
return MD4_DIGEST_LEN;
case CSUM_MD5:
return MD5_DIGEST_LEN;
}
return 0;
}
| 164,642 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: cmsSEQ* CMSEXPORT cmsAllocProfileSequenceDescription(cmsContext ContextID, cmsUInt32Number n)
{
cmsSEQ* Seq;
cmsUInt32Number i;
if (n == 0) return NULL;
if (n > 255) return NULL;
Seq = (cmsSEQ*) _cmsMallocZero(ContextID, sizeof(cmsSEQ));
if (Seq == NULL) return NULL;
Seq -> ContextID = ContextID;
Seq -> seq = (cmsPSEQDESC*) _cmsCalloc(ContextID, n, sizeof(cmsPSEQDESC));
Seq -> n = n;
for (i=0; i < n; i++) {
Seq -> seq[i].Manufacturer = NULL;
Seq -> seq[i].Model = NULL;
Seq -> seq[i].Description = NULL;
}
return Seq;
}
Commit Message: Non happy-path fixes
CWE ID: | cmsSEQ* CMSEXPORT cmsAllocProfileSequenceDescription(cmsContext ContextID, cmsUInt32Number n)
{
cmsSEQ* Seq;
cmsUInt32Number i;
if (n == 0) return NULL;
if (n > 255) return NULL;
Seq = (cmsSEQ*) _cmsMallocZero(ContextID, sizeof(cmsSEQ));
if (Seq == NULL) return NULL;
Seq -> ContextID = ContextID;
Seq -> seq = (cmsPSEQDESC*) _cmsCalloc(ContextID, n, sizeof(cmsPSEQDESC));
Seq -> n = n;
if (Seq -> seq == NULL) {
_cmsFree(ContextID, Seq);
return NULL;
}
for (i=0; i < n; i++) {
Seq -> seq[i].Manufacturer = NULL;
Seq -> seq[i].Model = NULL;
Seq -> seq[i].Description = NULL;
}
return Seq;
}
| 166,542 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WebTransformationMatrix WebTransformOperations::blend(const WebTransformOperations& from, double progress) const
{
WebTransformationMatrix toReturn;
bool fromIdentity = from.isIdentity();
bool toIdentity = isIdentity();
if (fromIdentity && toIdentity)
return toReturn;
if (matchesTypes(from)) {
size_t numOperations = max(fromIdentity ? 0 : from.m_private->operations.size(),
toIdentity ? 0 : m_private->operations.size());
for (size_t i = 0; i < numOperations; ++i) {
WebTransformationMatrix blended = blendTransformOperations(
fromIdentity ? 0 : &from.m_private->operations[i],
toIdentity ? 0 : &m_private->operations[i],
progress);
toReturn.multiply(blended);
}
} else {
toReturn = apply();
WebTransformationMatrix fromTransform = from.apply();
toReturn.blend(fromTransform, progress);
}
return toReturn;
}
Commit Message: [chromium] We should accelerate all transformations, except when we must blend matrices that cannot be decomposed.
https://bugs.webkit.org/show_bug.cgi?id=95855
Reviewed by James Robinson.
Source/Platform:
WebTransformOperations are now able to report if they can successfully blend.
WebTransformationMatrix::blend now returns a bool if blending would fail.
* chromium/public/WebTransformOperations.h:
(WebTransformOperations):
* chromium/public/WebTransformationMatrix.h:
(WebTransformationMatrix):
Source/WebCore:
WebTransformOperations are now able to report if they can successfully blend.
WebTransformationMatrix::blend now returns a bool if blending would fail.
Unit tests:
AnimationTranslationUtilTest.createTransformAnimationWithNonDecomposableMatrix
AnimationTranslationUtilTest.createTransformAnimationWithNonInvertibleTransform
* platform/chromium/support/WebTransformOperations.cpp:
(WebKit::blendTransformOperations):
(WebKit::WebTransformOperations::blend):
(WebKit::WebTransformOperations::canBlendWith):
(WebKit):
(WebKit::WebTransformOperations::blendInternal):
* platform/chromium/support/WebTransformationMatrix.cpp:
(WebKit::WebTransformationMatrix::blend):
* platform/graphics/chromium/AnimationTranslationUtil.cpp:
(WebCore::WebTransformAnimationCurve):
Source/WebKit/chromium:
Added the following unit tests:
AnimationTranslationUtilTest.createTransformAnimationWithNonDecomposableMatrix
AnimationTranslationUtilTest.createTransformAnimationWithNonInvertibleTransform
* tests/AnimationTranslationUtilTest.cpp:
(WebKit::TEST):
(WebKit):
git-svn-id: svn://svn.chromium.org/blink/trunk@127868 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | WebTransformationMatrix WebTransformOperations::blend(const WebTransformOperations& from, double progress) const
{
WebTransformationMatrix toReturn;
blendInternal(from, progress, toReturn);
return toReturn;
}
| 171,002 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: dtls1_get_message_fragment(SSL *s, int st1, int stn, long max, int *ok)
{
unsigned char wire[DTLS1_HM_HEADER_LENGTH];
unsigned long len, frag_off, frag_len;
int i,al;
struct hm_header_st msg_hdr;
/* see if we have the required fragment already */
if ((frag_len = dtls1_retrieve_buffered_fragment(s,max,ok)) || *ok)
{
return frag_len;
}
/* read handshake message header */
i=s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,wire,
DTLS1_HM_HEADER_LENGTH, 0);
if (i <= 0) /* nbio, or an error */
{
s->rwstate=SSL_READING;
*ok = 0;
return i;
}
/* Handshake fails if message header is incomplete */
if (i != DTLS1_HM_HEADER_LENGTH)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
/* parse the message fragment header */
dtls1_get_message_header(wire, &msg_hdr);
/*
* if this is a future (or stale) message it gets buffered
* (or dropped)--no further processing at this time
* While listening, we accept seq 1 (ClientHello with cookie)
* although we're still expecting seq 0 (ClientHello)
*/
if (msg_hdr.seq != s->d1->handshake_read_seq && !(s->d1->listen && msg_hdr.seq == 1))
return dtls1_process_out_of_seq_message(s, &msg_hdr, ok);
len = msg_hdr.msg_len;
frag_off = msg_hdr.frag_off;
frag_len = msg_hdr.frag_len;
if (frag_len && frag_len < len)
return dtls1_reassemble_fragment(s, &msg_hdr, ok);
if (!s->server && s->d1->r_msg_hdr.frag_off == 0 &&
wire[0] == SSL3_MT_HELLO_REQUEST)
{
/* The server may always send 'Hello Request' messages --
* we are doing a handshake anyway now, so ignore them
* if their format is correct. Does not count for
* 'Finished' MAC. */
if (wire[1] == 0 && wire[2] == 0 && wire[3] == 0)
{
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE,
wire, DTLS1_HM_HEADER_LENGTH, s,
s->msg_callback_arg);
s->msg_callback_arg);
s->init_num = 0;
return dtls1_get_message_fragment(s, st1, stn,
max, ok);
}
else /* Incorrectly formated Hello request */
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
}
if ((al=dtls1_preprocess_fragment(s,&msg_hdr,max)))
goto f_err;
/* XDTLS: ressurect this when restart is in place */
s->state=stn;
if ( frag_len > 0)
{
unsigned char *p=(unsigned char *)s->init_buf->data+DTLS1_HM_HEADER_LENGTH;
i=s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,
&p[frag_off],frag_len,0);
/* XDTLS: fix this--message fragments cannot span multiple packets */
if (i <= 0)
{
s->rwstate=SSL_READING;
*ok = 0;
return i;
}
}
else
i = 0;
/* XDTLS: an incorrectly formatted fragment should cause the
* handshake to fail */
if (i != (int)frag_len)
{
al=SSL3_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL3_AD_ILLEGAL_PARAMETER);
goto f_err;
}
*ok = 1;
/* Note that s->init_num is *not* used as current offset in
* s->init_buf->data, but as a counter summing up fragments'
* lengths: as soon as they sum up to handshake packet
* length, we assume we have got all the fragments. */
s->init_num = frag_len;
return frag_len;
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
s->init_num = 0;
*ok=0;
return(-1);
}
Commit Message:
CWE ID: CWE-399 | dtls1_get_message_fragment(SSL *s, int st1, int stn, long max, int *ok)
{
unsigned char wire[DTLS1_HM_HEADER_LENGTH];
unsigned long len, frag_off, frag_len;
int i,al;
struct hm_header_st msg_hdr;
redo:
/* see if we have the required fragment already */
if ((frag_len = dtls1_retrieve_buffered_fragment(s,max,ok)) || *ok)
{
return frag_len;
}
/* read handshake message header */
i=s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,wire,
DTLS1_HM_HEADER_LENGTH, 0);
if (i <= 0) /* nbio, or an error */
{
s->rwstate=SSL_READING;
*ok = 0;
return i;
}
/* Handshake fails if message header is incomplete */
if (i != DTLS1_HM_HEADER_LENGTH)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
/* parse the message fragment header */
dtls1_get_message_header(wire, &msg_hdr);
/*
* if this is a future (or stale) message it gets buffered
* (or dropped)--no further processing at this time
* While listening, we accept seq 1 (ClientHello with cookie)
* although we're still expecting seq 0 (ClientHello)
*/
if (msg_hdr.seq != s->d1->handshake_read_seq && !(s->d1->listen && msg_hdr.seq == 1))
return dtls1_process_out_of_seq_message(s, &msg_hdr, ok);
len = msg_hdr.msg_len;
frag_off = msg_hdr.frag_off;
frag_len = msg_hdr.frag_len;
if (frag_len && frag_len < len)
return dtls1_reassemble_fragment(s, &msg_hdr, ok);
if (!s->server && s->d1->r_msg_hdr.frag_off == 0 &&
wire[0] == SSL3_MT_HELLO_REQUEST)
{
/* The server may always send 'Hello Request' messages --
* we are doing a handshake anyway now, so ignore them
* if their format is correct. Does not count for
* 'Finished' MAC. */
if (wire[1] == 0 && wire[2] == 0 && wire[3] == 0)
{
if (s->msg_callback)
s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE,
wire, DTLS1_HM_HEADER_LENGTH, s,
s->msg_callback_arg);
s->msg_callback_arg);
s->init_num = 0;
goto redo;
}
else /* Incorrectly formated Hello request */
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL_R_UNEXPECTED_MESSAGE);
goto f_err;
}
}
if ((al=dtls1_preprocess_fragment(s,&msg_hdr,max)))
goto f_err;
/* XDTLS: ressurect this when restart is in place */
s->state=stn;
if ( frag_len > 0)
{
unsigned char *p=(unsigned char *)s->init_buf->data+DTLS1_HM_HEADER_LENGTH;
i=s->method->ssl_read_bytes(s,SSL3_RT_HANDSHAKE,
&p[frag_off],frag_len,0);
/* XDTLS: fix this--message fragments cannot span multiple packets */
if (i <= 0)
{
s->rwstate=SSL_READING;
*ok = 0;
return i;
}
}
else
i = 0;
/* XDTLS: an incorrectly formatted fragment should cause the
* handshake to fail */
if (i != (int)frag_len)
{
al=SSL3_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_DTLS1_GET_MESSAGE_FRAGMENT,SSL3_AD_ILLEGAL_PARAMETER);
goto f_err;
}
*ok = 1;
/* Note that s->init_num is *not* used as current offset in
* s->init_buf->data, but as a counter summing up fragments'
* lengths: as soon as they sum up to handshake packet
* length, we assume we have got all the fragments. */
s->init_num = frag_len;
return frag_len;
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
s->init_num = 0;
*ok=0;
return(-1);
}
| 165,282 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void SerializerMarkupAccumulator::appendCustomAttributes(StringBuilder& result, Element* element, Namespaces* namespaces)
{
if (!element->isFrameOwnerElement())
return;
HTMLFrameOwnerElement* frameOwner = toHTMLFrameOwnerElement(element);
Frame* frame = frameOwner->contentFrame();
if (!frame)
return;
KURL url = frame->document()->url();
if (url.isValid() && !url.isBlankURL())
return;
url = m_serializer->urlForBlankFrame(frame);
appendAttribute(result, element, Attribute(frameOwnerURLAttributeName(*frameOwner), url.string()), namespaces);
}
Commit Message: Revert 162155 "This review merges the two existing page serializ..."
Change r162155 broke the world even though it was landed using the CQ.
> This review merges the two existing page serializers, WebPageSerializerImpl and
> PageSerializer, into one, PageSerializer. In addition to this it moves all
> the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the
> PageSerializerTest structure and splits out one test for MHTML into a new
> MHTMLTest file.
>
> Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the
> 'Save Page as MHTML' flag is enabled now uses the same code, and should thus
> have the same feature set. Meaning that both modes now should be a bit better.
>
> Detailed list of changes:
>
> - PageSerializerTest: Prepare for more DTD test
> - PageSerializerTest: Remove now unneccesary input image test
> - PageSerializerTest: Remove unused WebPageSerializer/Impl code
> - PageSerializerTest: Move data URI morph test
> - PageSerializerTest: Move data URI test
> - PageSerializerTest: Move namespace test
> - PageSerializerTest: Move SVG Image test
> - MHTMLTest: Move MHTML specific test to own test file
> - PageSerializerTest: Delete duplicate XML header test
> - PageSerializerTest: Move blank frame test
> - PageSerializerTest: Move CSS test
> - PageSerializerTest: Add frameset/frame test
> - PageSerializerTest: Move old iframe test
> - PageSerializerTest: Move old elements test
> - Use PageSerizer for saving web pages
> - PageSerializerTest: Test for rewriting links
> - PageSerializer: Add rewrite link accumulator
> - PageSerializer: Serialize images in iframes/frames src
> - PageSerializer: XHTML fix for meta tags
> - PageSerializer: Add presentation CSS
> - PageSerializer: Rename out parameter
>
> BUG=
> R=abarth@chromium.org
>
> Review URL: https://codereview.chromium.org/68613003
TBR=tiger@opera.com
Review URL: https://codereview.chromium.org/73673003
git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | void SerializerMarkupAccumulator::appendCustomAttributes(StringBuilder& result, Element* element, Namespaces* namespaces)
void SerializerMarkupAccumulator::appendCustomAttributes(StringBuilder& out, Element* element, Namespaces* namespaces)
{
if (!element->isFrameOwnerElement())
return;
HTMLFrameOwnerElement* frameOwner = toHTMLFrameOwnerElement(element);
Frame* frame = frameOwner->contentFrame();
if (!frame)
return;
KURL url = frame->document()->url();
if (url.isValid() && !url.isBlankURL())
return;
url = m_serializer->urlForBlankFrame(frame);
appendAttribute(out, element, Attribute(frameOwnerURLAttributeName(*frameOwner), url.string()), namespaces);
}
| 171,566 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int ssl3_client_hello(SSL *s)
{
unsigned char *buf;
unsigned char *p, *d;
int i;
unsigned long l;
int al = 0;
#ifndef OPENSSL_NO_COMP
int j;
SSL_COMP *comp;
#endif
buf = (unsigned char *)s->init_buf->data;
if (s->state == SSL3_ST_CW_CLNT_HELLO_A) {
SSL_SESSION *sess = s->session;
if ((sess == NULL) ||
(sess->ssl_version != s->version) ||
!sess->session_id_length || (sess->not_resumable)) {
if (!ssl_get_new_session(s, 0))
goto err;
}
if (s->method->version == DTLS_ANY_VERSION) {
/* Determine which DTLS version to use */
int options = s->options;
/* If DTLS 1.2 disabled correct the version number */
if (options & SSL_OP_NO_DTLSv1_2) {
if (tls1_suiteb(s)) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO,
SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE);
goto err;
}
/*
* Disabling all versions is silly: return an error.
*/
if (options & SSL_OP_NO_DTLSv1) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_WRONG_SSL_VERSION);
goto err;
}
/*
* Update method so we don't use any DTLS 1.2 features.
*/
s->method = DTLSv1_client_method();
s->version = DTLS1_VERSION;
} else {
/*
* We only support one version: update method
*/
if (options & SSL_OP_NO_DTLSv1)
s->method = DTLSv1_2_client_method();
s->version = DTLS1_2_VERSION;
}
s->client_version = s->version;
}
/* else use the pre-loaded session */
p = s->s3->client_random;
/*
* for DTLS if client_random is initialized, reuse it, we are
* required to use same upon reply to HelloVerify
*/
if (SSL_IS_DTLS(s)) {
size_t idx;
i = 1;
for (idx = 0; idx < sizeof(s->s3->client_random); idx++) {
if (p[idx]) {
i = 0;
break;
}
}
} else
i = 1;
if (i)
ssl_fill_hello_random(s, 0, p, sizeof(s->s3->client_random));
/* Do the message type and length last */
d = p = ssl_handshake_start(s);
/*-
* version indicates the negotiated version: for example from
* an SSLv2/v3 compatible client hello). The client_version
* field is the maximum version we permit and it is also
* used in RSA encrypted premaster secrets. Some servers can
* choke if we initially report a higher version then
* renegotiate to a lower one in the premaster secret. This
* didn't happen with TLS 1.0 as most servers supported it
* but it can with TLS 1.1 or later if the server only supports
* 1.0.
*
* Possible scenario with previous logic:
* 1. Client hello indicates TLS 1.2
* 2. Server hello says TLS 1.0
* 3. RSA encrypted premaster secret uses 1.2.
* 4. Handhaked proceeds using TLS 1.0.
* 5. Server sends hello request to renegotiate.
* 6. Client hello indicates TLS v1.0 as we now
* know that is maximum server supports.
* 7. Server chokes on RSA encrypted premaster secret
* containing version 1.0.
*
* For interoperability it should be OK to always use the
* maximum version we support in client hello and then rely
* on the checking of version to ensure the servers isn't
* being inconsistent: for example initially negotiating with
* TLS 1.0 and renegotiating with TLS 1.2. We do this by using
* client_version in client hello and not resetting it to
* the negotiated version.
*/
*(p++) = s->client_version >> 8;
*(p++) = s->client_version & 0xff;
/* Random stuff */
memcpy(p, s->s3->client_random, SSL3_RANDOM_SIZE);
p += SSL3_RANDOM_SIZE;
/* Session ID */
if (s->new_session)
i = 0;
else
i = s->session->session_id_length;
*(p++) = i;
if (i != 0) {
if (i > (int)sizeof(s->session->session_id)) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
memcpy(p, s->session->session_id, i);
p += i;
}
/* cookie stuff for DTLS */
if (SSL_IS_DTLS(s)) {
if (s->d1->cookie_len > sizeof(s->d1->cookie)) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
*(p++) = s->d1->cookie_len;
memcpy(p, s->d1->cookie, s->d1->cookie_len);
p += s->d1->cookie_len;
}
/* Ciphers supported */
i = ssl_cipher_list_to_bytes(s, SSL_get_ciphers(s), &(p[2]), 0);
if (i == 0) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_NO_CIPHERS_AVAILABLE);
goto err;
}
#ifdef OPENSSL_MAX_TLS1_2_CIPHER_LENGTH
/*
* Some servers hang if client hello > 256 bytes as hack workaround
* chop number of supported ciphers to keep it well below this if we
* use TLS v1.2
*/
if (TLS1_get_version(s) >= TLS1_2_VERSION
&& i > OPENSSL_MAX_TLS1_2_CIPHER_LENGTH)
i = OPENSSL_MAX_TLS1_2_CIPHER_LENGTH & ~1;
#endif
s2n(i, p);
p += i;
/* COMPRESSION */
#ifdef OPENSSL_NO_COMP
*(p++) = 1;
#else
if (!ssl_allow_compression(s) || !s->ctx->comp_methods)
j = 0;
else
j = sk_SSL_COMP_num(s->ctx->comp_methods);
*(p++) = 1 + j;
for (i = 0; i < j; i++) {
comp = sk_SSL_COMP_value(s->ctx->comp_methods, i);
*(p++) = comp->id;
}
#endif
*(p++) = 0; /* Add the NULL method */
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions */
if (ssl_prepare_clienthello_tlsext(s) <= 0) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
if ((p =
ssl_add_clienthello_tlsext(s, p, buf + SSL3_RT_MAX_PLAIN_LENGTH,
&al)) == NULL) {
ssl3_send_alert(s, SSL3_AL_FATAL, al);
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
#endif
l = p - d;
ssl_set_handshake_header(s, SSL3_MT_CLIENT_HELLO, l);
s->state = SSL3_ST_CW_CLNT_HELLO_B;
}
/* SSL3_ST_CW_CLNT_HELLO_B */
return ssl_do_write(s);
err:
return (-1);
}
Commit Message:
CWE ID: CWE-310 | int ssl3_client_hello(SSL *s)
{
unsigned char *buf;
unsigned char *p, *d;
int i;
unsigned long l;
int al = 0;
#ifndef OPENSSL_NO_COMP
int j;
SSL_COMP *comp;
#endif
buf = (unsigned char *)s->init_buf->data;
if (s->state == SSL3_ST_CW_CLNT_HELLO_A) {
SSL_SESSION *sess = s->session;
if ((sess == NULL) ||
(sess->ssl_version != s->version) ||
!sess->session_id_length || (sess->not_resumable)) {
if (!ssl_get_new_session(s, 0))
goto err;
}
if (s->method->version == DTLS_ANY_VERSION) {
/* Determine which DTLS version to use */
int options = s->options;
/* If DTLS 1.2 disabled correct the version number */
if (options & SSL_OP_NO_DTLSv1_2) {
if (tls1_suiteb(s)) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO,
SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE);
goto err;
}
/*
* Disabling all versions is silly: return an error.
*/
if (options & SSL_OP_NO_DTLSv1) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_WRONG_SSL_VERSION);
goto err;
}
/*
* Update method so we don't use any DTLS 1.2 features.
*/
s->method = DTLSv1_client_method();
s->version = DTLS1_VERSION;
} else {
/*
* We only support one version: update method
*/
if (options & SSL_OP_NO_DTLSv1)
s->method = DTLSv1_2_client_method();
s->version = DTLS1_2_VERSION;
}
s->client_version = s->version;
}
/* else use the pre-loaded session */
p = s->s3->client_random;
/*
* for DTLS if client_random is initialized, reuse it, we are
* required to use same upon reply to HelloVerify
*/
if (SSL_IS_DTLS(s)) {
size_t idx;
i = 1;
for (idx = 0; idx < sizeof(s->s3->client_random); idx++) {
if (p[idx]) {
i = 0;
break;
}
}
} else
i = 1;
if (i && ssl_fill_hello_random(s, 0, p,
sizeof(s->s3->client_random)) <= 0)
goto err;
/* Do the message type and length last */
d = p = ssl_handshake_start(s);
/*-
* version indicates the negotiated version: for example from
* an SSLv2/v3 compatible client hello). The client_version
* field is the maximum version we permit and it is also
* used in RSA encrypted premaster secrets. Some servers can
* choke if we initially report a higher version then
* renegotiate to a lower one in the premaster secret. This
* didn't happen with TLS 1.0 as most servers supported it
* but it can with TLS 1.1 or later if the server only supports
* 1.0.
*
* Possible scenario with previous logic:
* 1. Client hello indicates TLS 1.2
* 2. Server hello says TLS 1.0
* 3. RSA encrypted premaster secret uses 1.2.
* 4. Handhaked proceeds using TLS 1.0.
* 5. Server sends hello request to renegotiate.
* 6. Client hello indicates TLS v1.0 as we now
* know that is maximum server supports.
* 7. Server chokes on RSA encrypted premaster secret
* containing version 1.0.
*
* For interoperability it should be OK to always use the
* maximum version we support in client hello and then rely
* on the checking of version to ensure the servers isn't
* being inconsistent: for example initially negotiating with
* TLS 1.0 and renegotiating with TLS 1.2. We do this by using
* client_version in client hello and not resetting it to
* the negotiated version.
*/
*(p++) = s->client_version >> 8;
*(p++) = s->client_version & 0xff;
/* Random stuff */
memcpy(p, s->s3->client_random, SSL3_RANDOM_SIZE);
p += SSL3_RANDOM_SIZE;
/* Session ID */
if (s->new_session)
i = 0;
else
i = s->session->session_id_length;
*(p++) = i;
if (i != 0) {
if (i > (int)sizeof(s->session->session_id)) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
memcpy(p, s->session->session_id, i);
p += i;
}
/* cookie stuff for DTLS */
if (SSL_IS_DTLS(s)) {
if (s->d1->cookie_len > sizeof(s->d1->cookie)) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
*(p++) = s->d1->cookie_len;
memcpy(p, s->d1->cookie, s->d1->cookie_len);
p += s->d1->cookie_len;
}
/* Ciphers supported */
i = ssl_cipher_list_to_bytes(s, SSL_get_ciphers(s), &(p[2]), 0);
if (i == 0) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_NO_CIPHERS_AVAILABLE);
goto err;
}
#ifdef OPENSSL_MAX_TLS1_2_CIPHER_LENGTH
/*
* Some servers hang if client hello > 256 bytes as hack workaround
* chop number of supported ciphers to keep it well below this if we
* use TLS v1.2
*/
if (TLS1_get_version(s) >= TLS1_2_VERSION
&& i > OPENSSL_MAX_TLS1_2_CIPHER_LENGTH)
i = OPENSSL_MAX_TLS1_2_CIPHER_LENGTH & ~1;
#endif
s2n(i, p);
p += i;
/* COMPRESSION */
#ifdef OPENSSL_NO_COMP
*(p++) = 1;
#else
if (!ssl_allow_compression(s) || !s->ctx->comp_methods)
j = 0;
else
j = sk_SSL_COMP_num(s->ctx->comp_methods);
*(p++) = 1 + j;
for (i = 0; i < j; i++) {
comp = sk_SSL_COMP_value(s->ctx->comp_methods, i);
*(p++) = comp->id;
}
#endif
*(p++) = 0; /* Add the NULL method */
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions */
if (ssl_prepare_clienthello_tlsext(s) <= 0) {
SSLerr(SSL_F_SSL3_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
if ((p =
ssl_add_clienthello_tlsext(s, p, buf + SSL3_RT_MAX_PLAIN_LENGTH,
&al)) == NULL) {
ssl3_send_alert(s, SSL3_AL_FATAL, al);
SSLerr(SSL_F_SSL3_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
#endif
l = p - d;
ssl_set_handshake_header(s, SSL3_MT_CLIENT_HELLO, l);
s->state = SSL3_ST_CW_CLNT_HELLO_B;
}
/* SSL3_ST_CW_CLNT_HELLO_B */
return ssl_do_write(s);
err:
return (-1);
}
| 164,812 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: const CuePoint::TrackPosition* CuePoint::Find(const Track* pTrack) const
{
assert(pTrack);
const long long n = pTrack->GetNumber();
const TrackPosition* i = m_track_positions;
const TrackPosition* const j = i + m_track_positions_count;
while (i != j)
{
const TrackPosition& p = *i++;
if (p.m_track == n)
return &p;
}
return NULL; //no matching track number found
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | const CuePoint::TrackPosition* CuePoint::Find(const Track* pTrack) const
const long long n = pTrack->GetNumber();
const TrackPosition* i = m_track_positions;
const TrackPosition* const j = i + m_track_positions_count;
while (i != j) {
const TrackPosition& p = *i++;
if (p.m_track == n)
return &p;
}
return NULL; // no matching track number found
}
| 174,278 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void Document::open(Document* entered_document,
ExceptionState& exception_state) {
if (ImportLoader()) {
exception_state.ThrowDOMException(
kInvalidStateError, "Imported document doesn't support open().");
return;
}
if (!IsHTMLDocument()) {
exception_state.ThrowDOMException(kInvalidStateError,
"Only HTML documents support open().");
return;
}
if (throw_on_dynamic_markup_insertion_count_) {
exception_state.ThrowDOMException(
kInvalidStateError,
"Custom Element constructor should not use open().");
return;
}
if (entered_document) {
if (!GetSecurityOrigin()->IsSameSchemeHostPortAndSuborigin(
entered_document->GetSecurityOrigin())) {
exception_state.ThrowSecurityError(
"Can only call open() on same-origin documents.");
return;
}
SetSecurityOrigin(entered_document->GetSecurityOrigin());
if (this != entered_document) {
KURL new_url = entered_document->Url();
new_url.SetFragmentIdentifier(String());
SetURL(new_url);
}
cookie_url_ = entered_document->CookieURL();
}
open();
}
Commit Message: Inherit referrer and policy when creating a nested browsing context
BUG=763194
R=estark@chromium.org
Change-Id: Ide3950269adf26ba221f573dfa088e95291ab676
Reviewed-on: https://chromium-review.googlesource.com/732652
Reviewed-by: Emily Stark <estark@chromium.org>
Commit-Queue: Jochen Eisinger <jochen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#511211}
CWE ID: CWE-20 | void Document::open(Document* entered_document,
ExceptionState& exception_state) {
if (ImportLoader()) {
exception_state.ThrowDOMException(
kInvalidStateError, "Imported document doesn't support open().");
return;
}
if (!IsHTMLDocument()) {
exception_state.ThrowDOMException(kInvalidStateError,
"Only HTML documents support open().");
return;
}
if (throw_on_dynamic_markup_insertion_count_) {
exception_state.ThrowDOMException(
kInvalidStateError,
"Custom Element constructor should not use open().");
return;
}
if (entered_document) {
if (!GetSecurityOrigin()->IsSameSchemeHostPortAndSuborigin(
entered_document->GetSecurityOrigin())) {
exception_state.ThrowSecurityError(
"Can only call open() on same-origin documents.");
return;
}
SetSecurityOrigin(entered_document->GetSecurityOrigin());
if (this != entered_document) {
KURL new_url = entered_document->Url();
new_url.SetFragmentIdentifier(String());
SetURL(new_url);
SetReferrerPolicy(entered_document->GetReferrerPolicy());
}
cookie_url_ = entered_document->CookieURL();
}
open();
}
| 172,691 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: png_convert_to_rfc1123(png_structp png_ptr, png_timep ptime)
{
static PNG_CONST char short_months[12][4] =
{"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
if (png_ptr == NULL)
return (NULL);
if (png_ptr->time_buffer == NULL)
{
png_ptr->time_buffer = (png_charp)png_malloc(png_ptr, (png_uint_32)(29*
png_sizeof(char)));
}
#ifdef _WIN32_WCE
{
wchar_t time_buf[29];
wsprintf(time_buf, TEXT("%d %S %d %02d:%02d:%02d +0000"),
ptime->day % 32, short_months[(ptime->month - 1) % 12],
ptime->year, ptime->hour % 24, ptime->minute % 60,
ptime->second % 61);
WideCharToMultiByte(CP_ACP, 0, time_buf, -1, png_ptr->time_buffer,
29, NULL, NULL);
}
#else
#ifdef USE_FAR_KEYWORD
{
char near_time_buf[29];
png_snprintf6(near_time_buf, 29, "%d %s %d %02d:%02d:%02d +0000",
ptime->day % 32, short_months[(ptime->month - 1) % 12],
ptime->year, ptime->hour % 24, ptime->minute % 60,
ptime->second % 61);
png_memcpy(png_ptr->time_buffer, near_time_buf,
29*png_sizeof(char));
}
#else
png_snprintf6(png_ptr->time_buffer, 29, "%d %s %d %02d:%02d:%02d +0000",
ptime->day % 32, short_months[(ptime->month - 1) % 12],
ptime->year, ptime->hour % 24, ptime->minute % 60,
ptime->second % 61);
#endif
#endif /* _WIN32_WCE */
return ((png_charp)png_ptr->time_buffer);
}
Commit Message: third_party/libpng: update to 1.2.54
TBR=darin@chromium.org
BUG=560291
Review URL: https://codereview.chromium.org/1467263003
Cr-Commit-Position: refs/heads/master@{#362298}
CWE ID: CWE-119 | png_convert_to_rfc1123(png_structp png_ptr, png_timep ptime)
{
static PNG_CONST char short_months[12][4] =
{"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"};
if (png_ptr == NULL)
return (NULL);
if (png_ptr->time_buffer == NULL)
{
png_ptr->time_buffer = (png_charp)png_malloc(png_ptr, (png_uint_32)(29*
png_sizeof(char)));
}
#ifdef _WIN32_WCE
{
wchar_t time_buf[29];
wsprintf(time_buf, TEXT("%d %S %d %02d:%02d:%02d +0000"),
ptime->day % 32, short_months[(ptime->month - 1U) % 12],
ptime->year, ptime->hour % 24, ptime->minute % 60,
ptime->second % 61);
WideCharToMultiByte(CP_ACP, 0, time_buf, -1, png_ptr->time_buffer,
29, NULL, NULL);
}
#else
#ifdef USE_FAR_KEYWORD
{
char near_time_buf[29];
png_snprintf6(near_time_buf, 29, "%d %s %d %02d:%02d:%02d +0000",
ptime->day % 32, short_months[(ptime->month - 1U) % 12],
ptime->year, ptime->hour % 24, ptime->minute % 60,
ptime->second % 61);
png_memcpy(png_ptr->time_buffer, near_time_buf,
29*png_sizeof(char));
}
#else
png_snprintf6(png_ptr->time_buffer, 29, "%d %s %d %02d:%02d:%02d +0000",
ptime->day % 32, short_months[(ptime->month - 1U) % 12],
ptime->year, ptime->hour % 24, ptime->minute % 60,
ptime->second % 61);
#endif
#endif /* _WIN32_WCE */
return ((png_charp)png_ptr->time_buffer);
}
| 172,161 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static plist_t parse_bin_node(struct bplist_data *bplist, const char** object)
{
uint16_t type = 0;
uint64_t size = 0;
if (!object)
return NULL;
type = (**object) & BPLIST_MASK;
size = (**object) & BPLIST_FILL;
(*object)++;
if (size == BPLIST_FILL) {
switch (type) {
case BPLIST_DATA:
case BPLIST_STRING:
case BPLIST_UNICODE:
case BPLIST_ARRAY:
case BPLIST_SET:
case BPLIST_DICT:
{
uint16_t next_size = **object & BPLIST_FILL;
if ((**object & BPLIST_MASK) != BPLIST_UINT) {
PLIST_BIN_ERR("%s: invalid size node type for node type 0x%02x: found 0x%02x, expected 0x%02x\n", __func__, type, **object & BPLIST_MASK, BPLIST_UINT);
return NULL;
}
(*object)++;
next_size = 1 << next_size;
if (*object + next_size > bplist->offset_table) {
PLIST_BIN_ERR("%s: size node data bytes for node type 0x%02x point outside of valid range\n", __func__, type);
return NULL;
}
size = UINT_TO_HOST(*object, next_size);
(*object) += next_size;
break;
}
default:
break;
}
}
switch (type)
{
case BPLIST_NULL:
switch (size)
{
case BPLIST_TRUE:
{
plist_data_t data = plist_new_plist_data();
data->type = PLIST_BOOLEAN;
data->boolval = TRUE;
data->length = 1;
return node_create(NULL, data);
}
case BPLIST_FALSE:
{
plist_data_t data = plist_new_plist_data();
data->type = PLIST_BOOLEAN;
data->boolval = FALSE;
data->length = 1;
return node_create(NULL, data);
}
case BPLIST_NULL:
default:
return NULL;
}
case BPLIST_UINT:
if (*object + (uint64_t)(1 << size) > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_UINT data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_uint_node(object, size);
case BPLIST_REAL:
if (*object + (uint64_t)(1 << size) > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_REAL data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_real_node(object, size);
case BPLIST_DATE:
if (3 != size) {
PLIST_BIN_ERR("%s: invalid data size for BPLIST_DATE node\n", __func__);
return NULL;
}
if (*object + (uint64_t)(1 << size) > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_DATE data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_date_node(object, size);
case BPLIST_DATA:
if (*object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_DATA data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_data_node(object, size);
case BPLIST_STRING:
if (*object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_STRING data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_string_node(object, size);
case BPLIST_UNICODE:
if (size*2 < size) {
PLIST_BIN_ERR("%s: Integer overflow when calculating BPLIST_UNICODE data size.\n", __func__);
return NULL;
}
if (*object + size*2 > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_UNICODE data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_unicode_node(object, size);
case BPLIST_SET:
case BPLIST_ARRAY:
if (*object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_ARRAY data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_array_node(bplist, object, size);
case BPLIST_UID:
if (*object + size+1 > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_UID data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_uid_node(object, size);
case BPLIST_DICT:
if (*object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_REAL data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_dict_node(bplist, object, size);
default:
PLIST_BIN_ERR("%s: unexpected node type 0x%02x\n", __func__, type);
return NULL;
}
return NULL;
}
Commit Message: bplist: Fix data range check for string/data/dict/array nodes
Passing a size of 0xFFFFFFFFFFFFFFFF to parse_string_node() might result
in a memcpy with a size of -1, leading to undefined behavior.
This commit makes sure that the actual node data (which depends on the size)
is in the range start_of_object..start_of_object+size.
Credit to OSS-Fuzz
CWE ID: CWE-787 | static plist_t parse_bin_node(struct bplist_data *bplist, const char** object)
{
uint16_t type = 0;
uint64_t size = 0;
if (!object)
return NULL;
type = (**object) & BPLIST_MASK;
size = (**object) & BPLIST_FILL;
(*object)++;
if (size == BPLIST_FILL) {
switch (type) {
case BPLIST_DATA:
case BPLIST_STRING:
case BPLIST_UNICODE:
case BPLIST_ARRAY:
case BPLIST_SET:
case BPLIST_DICT:
{
uint16_t next_size = **object & BPLIST_FILL;
if ((**object & BPLIST_MASK) != BPLIST_UINT) {
PLIST_BIN_ERR("%s: invalid size node type for node type 0x%02x: found 0x%02x, expected 0x%02x\n", __func__, type, **object & BPLIST_MASK, BPLIST_UINT);
return NULL;
}
(*object)++;
next_size = 1 << next_size;
if (*object + next_size > bplist->offset_table) {
PLIST_BIN_ERR("%s: size node data bytes for node type 0x%02x point outside of valid range\n", __func__, type);
return NULL;
}
size = UINT_TO_HOST(*object, next_size);
(*object) += next_size;
break;
}
default:
break;
}
}
switch (type)
{
case BPLIST_NULL:
switch (size)
{
case BPLIST_TRUE:
{
plist_data_t data = plist_new_plist_data();
data->type = PLIST_BOOLEAN;
data->boolval = TRUE;
data->length = 1;
return node_create(NULL, data);
}
case BPLIST_FALSE:
{
plist_data_t data = plist_new_plist_data();
data->type = PLIST_BOOLEAN;
data->boolval = FALSE;
data->length = 1;
return node_create(NULL, data);
}
case BPLIST_NULL:
default:
return NULL;
}
case BPLIST_UINT:
if (*object + (uint64_t)(1 << size) > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_UINT data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_uint_node(object, size);
case BPLIST_REAL:
if (*object + (uint64_t)(1 << size) > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_REAL data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_real_node(object, size);
case BPLIST_DATE:
if (3 != size) {
PLIST_BIN_ERR("%s: invalid data size for BPLIST_DATE node\n", __func__);
return NULL;
}
if (*object + (uint64_t)(1 << size) > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_DATE data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_date_node(object, size);
case BPLIST_DATA:
if (*object + size < *object || *object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_DATA data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_data_node(object, size);
case BPLIST_STRING:
if (*object + size < *object || *object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_STRING data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_string_node(object, size);
case BPLIST_UNICODE:
if (size*2 < size) {
PLIST_BIN_ERR("%s: Integer overflow when calculating BPLIST_UNICODE data size.\n", __func__);
return NULL;
}
if (*object + size*2 < *object || *object + size*2 > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_UNICODE data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_unicode_node(object, size);
case BPLIST_SET:
case BPLIST_ARRAY:
if (*object + size < *object || *object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_ARRAY data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_array_node(bplist, object, size);
case BPLIST_UID:
if (*object + size+1 > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_UID data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_uid_node(object, size);
case BPLIST_DICT:
if (*object + size < *object || *object + size > bplist->offset_table) {
PLIST_BIN_ERR("%s: BPLIST_DICT data bytes point outside of valid range\n", __func__);
return NULL;
}
return parse_dict_node(bplist, object, size);
default:
PLIST_BIN_ERR("%s: unexpected node type 0x%02x\n", __func__, type);
return NULL;
}
return NULL;
}
| 168,334 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void jas_matrix_asr(jas_matrix_t *matrix, int n)
{
int i;
int j;
jas_seqent_t *rowstart;
int rowstep;
jas_seqent_t *data;
assert(n >= 0);
if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) {
assert(matrix->rows_);
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
*data = jas_seqent_asr(*data, n);
}
}
}
}
Commit Message: The generation of the configuration file jas_config.h has been completely
reworked in order to avoid pollution of the global namespace.
Some problematic types like uchar, ulong, and friends have been replaced
with names with a jas_ prefix.
An option max_samples has been added to the BMP and JPEG decoders to
restrict the maximum size of image that they can decode. This change
was made as a (possibly temporary) fix to address security concerns.
A max_samples command-line option has also been added to imginfo.
Whether an image component (for jas_image_t) is stored in memory or on
disk is now based on the component size (rather than the image size).
Some debug log message were added.
Some new integer overflow checks were added.
Some new safe integer add/multiply functions were added.
More pre-C99 cruft was removed. JasPer has numerous "hacks" to
handle pre-C99 compilers. JasPer now assumes C99 support. So, this
pre-C99 cruft is unnecessary and can be removed.
The regression jasper-doublefree-mem_close.jpg has been re-enabled.
Theoretically, it should work more predictably now.
CWE ID: CWE-190 | void jas_matrix_asr(jas_matrix_t *matrix, int n)
{
jas_matind_t i;
jas_matind_t j;
jas_seqent_t *rowstart;
jas_matind_t rowstep;
jas_seqent_t *data;
assert(n >= 0);
if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) {
assert(matrix->rows_);
rowstep = jas_matrix_rowstep(matrix);
for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i,
rowstart += rowstep) {
for (j = matrix->numcols_, data = rowstart; j > 0; --j,
++data) {
*data = jas_seqent_asr(*data, n);
}
}
}
}
| 168,698 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: Status XvMCGetDRInfo(Display *dpy, XvPortID port,
char **name, char **busID,
int *major, int *minor,
int *patchLevel,
int *isLocal)
{
XExtDisplayInfo *info = xvmc_find_display(dpy);
xvmcGetDRInfoReply rep;
xvmcGetDRInfoReq *req;
CARD32 magic;
#ifdef HAVE_SHMAT
volatile CARD32 *shMem;
struct timezone here;
struct timeval now;
here.tz_minuteswest = 0;
here.tz_dsttime = 0;
#endif
*name = NULL;
*busID = NULL;
XvMCCheckExtension (dpy, info, BadImplementation);
LockDisplay (dpy);
XvMCGetReq (GetDRInfo, req);
req->port = port;
magic = 0;
req->magic = 0;
#ifdef HAVE_SHMAT
req->shmKey = shmget(IPC_PRIVATE, 1024, IPC_CREAT | 0600);
/*
* We fill a shared memory page with a repetitive pattern. If the
* X server can read this pattern, we probably have a local connection.
* Note that we can trigger the remote X server to read any shared
* page on the remote machine, so we shouldn't be able to guess and verify
* any complicated data on those pages. Thats the explanation of this
* otherwise stupid-looking pattern algorithm.
*/
if (req->shmKey >= 0) {
shMem = (CARD32 *) shmat(req->shmKey, NULL, 0);
shmctl( req->shmKey, IPC_RMID, NULL);
if ( shMem ) {
register volatile CARD32 *shMemC = shMem;
register int i;
gettimeofday( &now, &here);
magic = now.tv_usec & 0x000FFFFF;
req->magic = magic;
i = 1024 / sizeof(CARD32);
while(i--) {
*shMemC++ = magic;
magic = ~magic;
}
} else {
req->shmKey = -1;
}
}
#else
req->shmKey = 0;
#endif
if (!_XReply (dpy, (xReply *) &rep, 0, xFalse)) {
UnlockDisplay (dpy);
SyncHandle ();
#ifdef HAVE_SHMAT
if ( req->shmKey >= 0) {
shmdt( (const void *) shMem );
}
#endif
return -1;
}
#ifdef HAVE_SHMAT
shmdt( (const void *) shMem );
#endif
if (rep.length > 0) {
unsigned long realSize = 0;
char *tmpBuf = NULL;
if ((rep.length < (INT_MAX >> 2)) &&
/* protect against overflow in strncpy below */
(rep.nameLen + rep.busIDLen > rep.nameLen)) {
realSize = rep.length << 2;
if (realSize >= (rep.nameLen + rep.busIDLen)) {
tmpBuf = Xmalloc(realSize);
*name = Xmalloc(rep.nameLen);
*busID = Xmalloc(rep.busIDLen);
}
}
if (*name && *busID && tmpBuf) {
_XRead(dpy, tmpBuf, realSize);
strncpy(*name,tmpBuf,rep.nameLen);
(*name)[rep.nameLen - 1] = '\0';
strncpy(*busID,tmpBuf+rep.nameLen,rep.busIDLen);
(*busID)[rep.busIDLen - 1] = '\0';
XFree(tmpBuf);
} else {
XFree(*name);
*name = NULL;
XFree(*busID);
*busID = NULL;
XFree(tmpBuf);
_XEatDataWords(dpy, rep.length);
UnlockDisplay (dpy);
SyncHandle ();
return -1;
}
}
UnlockDisplay (dpy);
SyncHandle ();
*major = rep.major;
*minor = rep.minor;
*patchLevel = rep.patchLevel;
*isLocal = (req->shmKey > 0) ? rep.isLocal : 1;
return (rep.length > 0) ? Success : BadImplementation;
}
Commit Message:
CWE ID: CWE-119 | Status XvMCGetDRInfo(Display *dpy, XvPortID port,
char **name, char **busID,
int *major, int *minor,
int *patchLevel,
int *isLocal)
{
XExtDisplayInfo *info = xvmc_find_display(dpy);
xvmcGetDRInfoReply rep;
xvmcGetDRInfoReq *req;
CARD32 magic;
#ifdef HAVE_SHMAT
volatile CARD32 *shMem;
struct timezone here;
struct timeval now;
here.tz_minuteswest = 0;
here.tz_dsttime = 0;
#endif
*name = NULL;
*busID = NULL;
XvMCCheckExtension (dpy, info, BadImplementation);
LockDisplay (dpy);
XvMCGetReq (GetDRInfo, req);
req->port = port;
magic = 0;
req->magic = 0;
#ifdef HAVE_SHMAT
req->shmKey = shmget(IPC_PRIVATE, 1024, IPC_CREAT | 0600);
/*
* We fill a shared memory page with a repetitive pattern. If the
* X server can read this pattern, we probably have a local connection.
* Note that we can trigger the remote X server to read any shared
* page on the remote machine, so we shouldn't be able to guess and verify
* any complicated data on those pages. Thats the explanation of this
* otherwise stupid-looking pattern algorithm.
*/
if (req->shmKey >= 0) {
shMem = (CARD32 *) shmat(req->shmKey, NULL, 0);
shmctl( req->shmKey, IPC_RMID, NULL);
if ( shMem ) {
register volatile CARD32 *shMemC = shMem;
register int i;
gettimeofday( &now, &here);
magic = now.tv_usec & 0x000FFFFF;
req->magic = magic;
i = 1024 / sizeof(CARD32);
while(i--) {
*shMemC++ = magic;
magic = ~magic;
}
} else {
req->shmKey = -1;
}
}
#else
req->shmKey = 0;
#endif
if (!_XReply (dpy, (xReply *) &rep, 0, xFalse)) {
UnlockDisplay (dpy);
SyncHandle ();
#ifdef HAVE_SHMAT
if ( req->shmKey >= 0) {
shmdt( (const void *) shMem );
}
#endif
return -1;
}
#ifdef HAVE_SHMAT
shmdt( (const void *) shMem );
#endif
if (rep.length > 0) {
unsigned long realSize = 0;
char *tmpBuf = NULL;
if ((rep.length < (INT_MAX >> 2)) &&
/* protect against overflow in strncpy below */
(rep.nameLen + rep.busIDLen > rep.nameLen)) {
realSize = rep.length << 2;
if (realSize >= (rep.nameLen + rep.busIDLen)) {
tmpBuf = Xmalloc(realSize);
*name = Xmalloc(rep.nameLen);
*busID = Xmalloc(rep.busIDLen);
}
}
if (*name && *busID && tmpBuf) {
_XRead(dpy, tmpBuf, realSize);
strncpy(*name,tmpBuf,rep.nameLen);
(*name)[rep.nameLen == 0 ? 0 : rep.nameLen - 1] = '\0';
strncpy(*busID,tmpBuf+rep.nameLen,rep.busIDLen);
(*busID)[rep.busIDLen == 0 ? 0 : rep.busIDLen - 1] = '\0';
XFree(tmpBuf);
} else {
XFree(*name);
*name = NULL;
XFree(*busID);
*busID = NULL;
XFree(tmpBuf);
_XEatDataWords(dpy, rep.length);
UnlockDisplay (dpy);
SyncHandle ();
return -1;
}
}
UnlockDisplay (dpy);
SyncHandle ();
*major = rep.major;
*minor = rep.minor;
*patchLevel = rep.patchLevel;
*isLocal = (req->shmKey > 0) ? rep.isLocal : 1;
return (rep.length > 0) ? Success : BadImplementation;
}
| 164,913 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void BrowserTabStripController::TabDetachedAt(TabContents* contents,
int model_index) {
hover_tab_selector_.CancelTabTransition();
tabstrip_->RemoveTabAt(model_index);
}
Commit Message: Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | void BrowserTabStripController::TabDetachedAt(TabContents* contents,
void BrowserTabStripController::TabDetachedAt(WebContents* contents,
int model_index) {
hover_tab_selector_.CancelTabTransition();
tabstrip_->RemoveTabAt(model_index);
}
| 171,523 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static inline int handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
if (nd->flags & LOOKUP_RCU) {
return follow_dotdot_rcu(nd);
} else
follow_dotdot(nd);
}
return 0;
}
Commit Message: vfs: Test for and handle paths that are unreachable from their mnt_root
In rare cases a directory can be renamed out from under a bind mount.
In those cases without special handling it becomes possible to walk up
the directory tree to the root dentry of the filesystem and down
from the root dentry to every other file or directory on the filesystem.
Like division by zero .. from an unconnected path can not be given
a useful semantic as there is no predicting at which path component
the code will realize it is unconnected. We certainly can not match
the current behavior as the current behavior is a security hole.
Therefore when encounting .. when following an unconnected path
return -ENOENT.
- Add a function path_connected to verify path->dentry is reachable
from path->mnt.mnt_root. AKA to validate that rename did not do
something nasty to the bind mount.
To avoid races path_connected must be called after following a path
component to it's next path component.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
CWE ID: CWE-254 | static inline int handle_dots(struct nameidata *nd, int type)
{
if (type == LAST_DOTDOT) {
if (nd->flags & LOOKUP_RCU) {
return follow_dotdot_rcu(nd);
} else
return follow_dotdot(nd);
}
return 0;
}
| 166,637 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void Browser::TabReplacedAt(TabStripModel* tab_strip_model,
TabContents* old_contents,
TabContents* new_contents,
int index) {
TabDetachedAtImpl(old_contents, index, DETACH_TYPE_REPLACE);
SessionService* session_service =
SessionServiceFactory::GetForProfile(profile_);
if (session_service)
session_service->TabClosing(old_contents->web_contents());
TabInsertedAt(new_contents->web_contents(), index, (index == active_index()));
int entry_count =
new_contents->web_contents()->GetController().GetEntryCount();
if (entry_count > 0) {
new_contents->web_contents()->GetController().NotifyEntryChanged(
new_contents->web_contents()->GetController().GetEntryAtIndex(
entry_count - 1),
entry_count - 1);
}
if (session_service) {
session_service->TabRestored(new_contents,
tab_strip_model_->IsTabPinned(index));
}
content::DevToolsManager::GetInstance()->ContentsReplaced(
old_contents->web_contents(), new_contents->web_contents());
}
Commit Message: Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-20 | void Browser::TabReplacedAt(TabStripModel* tab_strip_model,
TabContents* old_contents,
TabContents* new_contents,
int index) {
TabDetachedAtImpl(old_contents->web_contents(), index, DETACH_TYPE_REPLACE);
SessionService* session_service =
SessionServiceFactory::GetForProfile(profile_);
if (session_service)
session_service->TabClosing(old_contents->web_contents());
TabInsertedAt(new_contents->web_contents(), index, (index == active_index()));
int entry_count =
new_contents->web_contents()->GetController().GetEntryCount();
if (entry_count > 0) {
new_contents->web_contents()->GetController().NotifyEntryChanged(
new_contents->web_contents()->GetController().GetEntryAtIndex(
entry_count - 1),
entry_count - 1);
}
if (session_service) {
session_service->TabRestored(new_contents,
tab_strip_model_->IsTabPinned(index));
}
content::DevToolsManager::GetInstance()->ContentsReplaced(
old_contents->web_contents(), new_contents->web_contents());
}
| 171,509 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: image_transform_png_set_scale_16_add(image_transform *this,
PNG_CONST image_transform **that, png_byte colour_type, png_byte bit_depth)
{
UNUSED(colour_type)
this->next = *that;
*that = this;
return bit_depth > 8;
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | image_transform_png_set_scale_16_add(image_transform *this,
const image_transform **that, png_byte colour_type, png_byte bit_depth)
{
UNUSED(colour_type)
this->next = *that;
*that = this;
return bit_depth > 8;
}
| 173,645 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void TouchEventHandler::handleTouchPoint(Platform::TouchPoint& point, unsigned modifiers)
{
m_webPage->m_inputHandler->setInputModeEnabled();
bool shiftActive = modifiers & KEYMOD_SHIFT;
bool altActive = modifiers & KEYMOD_ALT;
bool ctrlActive = modifiers & KEYMOD_CTRL;
switch (point.m_state) {
case Platform::TouchPoint::TouchPressed:
{
m_webPage->m_inputHandler->clearDidSpellCheckState();
if (!m_lastFatFingersResult.isValid())
doFatFingers(point);
Element* elementUnderFatFinger = m_lastFatFingersResult.nodeAsElementIfApplicable();
if (m_lastFatFingersResult.isTextInput()) {
elementUnderFatFinger = m_lastFatFingersResult.nodeAsElementIfApplicable(FatFingersResult::ShadowContentNotAllowed, true /* shouldUseRootEditableElement */);
m_shouldRequestSpellCheckOptions = m_webPage->m_inputHandler->shouldRequestSpellCheckingOptionsForPoint(point.m_pos, elementUnderFatFinger, m_spellCheckOptionRequest);
}
handleFatFingerPressed(shiftActive, altActive, ctrlActive);
break;
}
case Platform::TouchPoint::TouchReleased:
{
if (!m_shouldRequestSpellCheckOptions)
m_webPage->m_inputHandler->processPendingKeyboardVisibilityChange();
if (m_webPage->m_inputHandler->isInputMode())
m_webPage->m_inputHandler->notifyClientOfKeyboardVisibilityChange(true);
m_webPage->m_tapHighlight->hide();
IntPoint adjustedPoint = m_webPage->mapFromContentsToViewport(m_lastFatFingersResult.adjustedPosition());
PlatformMouseEvent mouseEvent(adjustedPoint, m_lastScreenPoint, PlatformEvent::MouseReleased, 1, LeftButton, shiftActive, ctrlActive, altActive, TouchScreen);
m_webPage->handleMouseEvent(mouseEvent);
if (m_shouldRequestSpellCheckOptions) {
IntPoint pixelPositionRelativeToViewport = m_webPage->mapToTransformed(adjustedPoint);
IntSize screenOffset(m_lastScreenPoint - pixelPositionRelativeToViewport);
m_webPage->m_inputHandler->requestSpellingCheckingOptions(m_spellCheckOptionRequest, screenOffset);
m_shouldRequestSpellCheckOptions = false;
}
m_lastFatFingersResult.reset(); // Reset the fat finger result as its no longer valid when a user's finger is not on the screen.
break;
}
case Platform::TouchPoint::TouchMoved:
{
m_webPage->m_inputHandler->clearDidSpellCheckState();
PlatformMouseEvent mouseEvent(point.m_pos, m_lastScreenPoint, PlatformEvent::MouseMoved, 1, LeftButton, shiftActive, ctrlActive, altActive, TouchScreen);
m_lastScreenPoint = point.m_screenPos;
m_webPage->handleMouseEvent(mouseEvent);
break;
}
default:
break;
}
}
Commit Message: [BlackBerry] Adapt to new BlackBerry::Platform::TouchPoint API
https://bugs.webkit.org/show_bug.cgi?id=105143
RIM PR 171941
Reviewed by Rob Buis.
Internally reviewed by George Staikos.
Source/WebCore:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit.
Also adapt to new method names and encapsulation of TouchPoint data
members.
No change in behavior, no new tests.
* platform/blackberry/PlatformTouchPointBlackBerry.cpp:
(WebCore::PlatformTouchPoint::PlatformTouchPoint):
Source/WebKit/blackberry:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit. One exception
is when passing events to a full screen plugin.
Also adapt to new method names and encapsulation of TouchPoint data
members.
* Api/WebPage.cpp:
(BlackBerry::WebKit::WebPage::touchEvent):
(BlackBerry::WebKit::WebPage::touchPointAsMouseEvent):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchEventToFullScreenPlugin):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchPointAsMouseEventToFullScreenPlugin):
* WebKitSupport/InputHandler.cpp:
(BlackBerry::WebKit::InputHandler::shouldRequestSpellCheckingOptionsForPoint):
* WebKitSupport/InputHandler.h:
(InputHandler):
* WebKitSupport/TouchEventHandler.cpp:
(BlackBerry::WebKit::TouchEventHandler::doFatFingers):
(BlackBerry::WebKit::TouchEventHandler::handleTouchPoint):
* WebKitSupport/TouchEventHandler.h:
(TouchEventHandler):
Tools:
Adapt to new method names and encapsulation of TouchPoint data members.
* DumpRenderTree/blackberry/EventSender.cpp:
(addTouchPointCallback):
(updateTouchPointCallback):
(touchEndCallback):
(releaseTouchPointCallback):
(sendTouchEvent):
git-svn-id: svn://svn.chromium.org/blink/trunk@137880 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | void TouchEventHandler::handleTouchPoint(Platform::TouchPoint& point, unsigned modifiers)
void TouchEventHandler::handleTouchPoint(const Platform::TouchPoint& point, unsigned modifiers)
{
m_webPage->m_inputHandler->setInputModeEnabled();
bool shiftActive = modifiers & KEYMOD_SHIFT;
bool altActive = modifiers & KEYMOD_ALT;
bool ctrlActive = modifiers & KEYMOD_CTRL;
switch (point.state()) {
case Platform::TouchPoint::TouchPressed:
{
m_webPage->m_inputHandler->clearDidSpellCheckState();
if (!m_lastFatFingersResult.isValid())
doFatFingers(point);
Element* elementUnderFatFinger = m_lastFatFingersResult.nodeAsElementIfApplicable();
if (m_lastFatFingersResult.isTextInput()) {
elementUnderFatFinger = m_lastFatFingersResult.nodeAsElementIfApplicable(FatFingersResult::ShadowContentNotAllowed, true /* shouldUseRootEditableElement */);
m_shouldRequestSpellCheckOptions = m_webPage->m_inputHandler->shouldRequestSpellCheckingOptionsForPoint(point.documentContentPosition(), elementUnderFatFinger, m_spellCheckOptionRequest);
}
handleFatFingerPressed(shiftActive, altActive, ctrlActive);
break;
}
case Platform::TouchPoint::TouchReleased:
{
if (!m_shouldRequestSpellCheckOptions)
m_webPage->m_inputHandler->processPendingKeyboardVisibilityChange();
if (m_webPage->m_inputHandler->isInputMode())
m_webPage->m_inputHandler->notifyClientOfKeyboardVisibilityChange(true);
m_webPage->m_tapHighlight->hide();
IntPoint adjustedPoint = m_webPage->mapFromContentsToViewport(m_lastFatFingersResult.adjustedPosition());
PlatformMouseEvent mouseEvent(adjustedPoint, m_lastScreenPoint, PlatformEvent::MouseReleased, 1, LeftButton, shiftActive, ctrlActive, altActive, TouchScreen);
m_webPage->handleMouseEvent(mouseEvent);
if (m_shouldRequestSpellCheckOptions) {
IntPoint pixelPositionRelativeToViewport = m_webPage->mapToTransformed(adjustedPoint);
IntSize screenOffset(m_lastScreenPoint - pixelPositionRelativeToViewport);
m_webPage->m_inputHandler->requestSpellingCheckingOptions(m_spellCheckOptionRequest, screenOffset);
m_shouldRequestSpellCheckOptions = false;
}
m_lastFatFingersResult.reset(); // Reset the fat finger result as its no longer valid when a user's finger is not on the screen.
break;
}
case Platform::TouchPoint::TouchMoved:
{
m_webPage->m_inputHandler->clearDidSpellCheckState();
PlatformMouseEvent mouseEvent(point.documentViewportPosition(), m_lastScreenPoint, PlatformEvent::MouseMoved, 1, LeftButton, shiftActive, ctrlActive, altActive, TouchScreen);
m_lastScreenPoint = point.screenPosition();
m_webPage->handleMouseEvent(mouseEvent);
break;
}
default:
break;
}
}
| 170,770 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WebRTCSessionDescriptionDescriptor MockWebRTCPeerConnectionHandler::localDescription()
{
return m_localDescription;
}
Commit Message: Unreviewed, rolling out r127612, r127660, and r127664.
http://trac.webkit.org/changeset/127612
http://trac.webkit.org/changeset/127660
http://trac.webkit.org/changeset/127664
https://bugs.webkit.org/show_bug.cgi?id=95920
Source/Platform:
* Platform.gypi:
* chromium/public/WebRTCPeerConnectionHandler.h:
(WebKit):
(WebRTCPeerConnectionHandler):
* chromium/public/WebRTCVoidRequest.h: Removed.
Source/WebCore:
* CMakeLists.txt:
* GNUmakefile.list.am:
* Modules/mediastream/RTCErrorCallback.h:
(WebCore):
(RTCErrorCallback):
* Modules/mediastream/RTCErrorCallback.idl:
* Modules/mediastream/RTCPeerConnection.cpp:
(WebCore::RTCPeerConnection::createOffer):
* Modules/mediastream/RTCPeerConnection.h:
(WebCore):
(RTCPeerConnection):
* Modules/mediastream/RTCPeerConnection.idl:
* Modules/mediastream/RTCSessionDescriptionCallback.h:
(WebCore):
(RTCSessionDescriptionCallback):
* Modules/mediastream/RTCSessionDescriptionCallback.idl:
* Modules/mediastream/RTCSessionDescriptionRequestImpl.cpp:
(WebCore::RTCSessionDescriptionRequestImpl::create):
(WebCore::RTCSessionDescriptionRequestImpl::RTCSessionDescriptionRequestImpl):
(WebCore::RTCSessionDescriptionRequestImpl::requestSucceeded):
(WebCore::RTCSessionDescriptionRequestImpl::requestFailed):
(WebCore::RTCSessionDescriptionRequestImpl::clear):
* Modules/mediastream/RTCSessionDescriptionRequestImpl.h:
(RTCSessionDescriptionRequestImpl):
* Modules/mediastream/RTCVoidRequestImpl.cpp: Removed.
* Modules/mediastream/RTCVoidRequestImpl.h: Removed.
* WebCore.gypi:
* platform/chromium/support/WebRTCVoidRequest.cpp: Removed.
* platform/mediastream/RTCPeerConnectionHandler.cpp:
(RTCPeerConnectionHandlerDummy):
(WebCore::RTCPeerConnectionHandlerDummy::RTCPeerConnectionHandlerDummy):
* platform/mediastream/RTCPeerConnectionHandler.h:
(WebCore):
(WebCore::RTCPeerConnectionHandler::~RTCPeerConnectionHandler):
(RTCPeerConnectionHandler):
(WebCore::RTCPeerConnectionHandler::RTCPeerConnectionHandler):
* platform/mediastream/RTCVoidRequest.h: Removed.
* platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.cpp:
* platform/mediastream/chromium/RTCPeerConnectionHandlerChromium.h:
(RTCPeerConnectionHandlerChromium):
Tools:
* DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.cpp:
(MockWebRTCPeerConnectionHandler::SuccessCallbackTask::SuccessCallbackTask):
(MockWebRTCPeerConnectionHandler::SuccessCallbackTask::runIfValid):
(MockWebRTCPeerConnectionHandler::FailureCallbackTask::FailureCallbackTask):
(MockWebRTCPeerConnectionHandler::FailureCallbackTask::runIfValid):
(MockWebRTCPeerConnectionHandler::createOffer):
* DumpRenderTree/chromium/MockWebRTCPeerConnectionHandler.h:
(MockWebRTCPeerConnectionHandler):
(SuccessCallbackTask):
(FailureCallbackTask):
LayoutTests:
* fast/mediastream/RTCPeerConnection-createOffer.html:
* fast/mediastream/RTCPeerConnection-localDescription-expected.txt: Removed.
* fast/mediastream/RTCPeerConnection-localDescription.html: Removed.
* fast/mediastream/RTCPeerConnection-remoteDescription-expected.txt: Removed.
* fast/mediastream/RTCPeerConnection-remoteDescription.html: Removed.
git-svn-id: svn://svn.chromium.org/blink/trunk@127679 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-20 | WebRTCSessionDescriptionDescriptor MockWebRTCPeerConnectionHandler::localDescription()
| 170,360 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool Block::IsInvisible() const
{
return bool(int(m_flags & 0x08) != 0);
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | bool Block::IsInvisible() const
const Block::Frame& Block::GetFrame(int idx) const {
assert(idx >= 0);
assert(idx < m_frame_count);
const Frame& f = m_frames[idx];
assert(f.pos > 0);
assert(f.len > 0);
return f;
}
| 174,391 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
{
char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
char url_address[256], port[6];
int url_len, port_len = 0;
*sockaddr_url = url;
url_begin = strstr(url, "//");
if (!url_begin)
url_begin = url;
else
url_begin += 2;
/* Look for numeric ipv6 entries */
ipv6_begin = strstr(url_begin, "[");
ipv6_end = strstr(url_begin, "]");
if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
url_end = strstr(ipv6_end, ":");
else
url_end = strstr(url_begin, ":");
if (url_end) {
url_len = url_end - url_begin;
port_len = strlen(url_begin) - url_len - 1;
if (port_len < 1)
return false;
port_start = url_end + 1;
} else
url_len = strlen(url_begin);
if (url_len < 1)
return false;
sprintf(url_address, "%.*s", url_len, url_begin);
if (port_len) {
char *slash;
snprintf(port, 6, "%.*s", port_len, port_start);
slash = strchr(port, '/');
if (slash)
*slash = '\0';
} else
strcpy(port, "80");
*sockaddr_port = strdup(port);
*sockaddr_url = strdup(url_address);
return true;
}
Commit Message: Stratum: extract_sockaddr: Truncate overlong addresses rather than stack overflow
Thanks to Mick Ayzenberg <mick@dejavusecurity.com> for finding this!
CWE ID: CWE-119 | bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port)
{
char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL;
char url_address[256], port[6];
int url_len, port_len = 0;
*sockaddr_url = url;
url_begin = strstr(url, "//");
if (!url_begin)
url_begin = url;
else
url_begin += 2;
/* Look for numeric ipv6 entries */
ipv6_begin = strstr(url_begin, "[");
ipv6_end = strstr(url_begin, "]");
if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin)
url_end = strstr(ipv6_end, ":");
else
url_end = strstr(url_begin, ":");
if (url_end) {
url_len = url_end - url_begin;
port_len = strlen(url_begin) - url_len - 1;
if (port_len < 1)
return false;
port_start = url_end + 1;
} else
url_len = strlen(url_begin);
if (url_len < 1)
return false;
if (url_len >= sizeof(url_address))
{
applog(LOG_WARNING, "%s: Truncating overflowed address '%.*s'",
__func__, url_len, url_begin);
url_len = sizeof(url_address) - 1;
}
sprintf(url_address, "%.*s", url_len, url_begin);
if (port_len) {
char *slash;
snprintf(port, 6, "%.*s", port_len, port_start);
slash = strchr(port, '/');
if (slash)
*slash = '\0';
} else
strcpy(port, "80");
*sockaddr_port = strdup(port);
*sockaddr_url = strdup(url_address);
return true;
}
| 166,308 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
{
struct ucma_multicast *mc;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return NULL;
mutex_lock(&mut);
mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
mutex_unlock(&mut);
if (mc->id < 0)
goto error;
mc->ctx = ctx;
list_add_tail(&mc->list, &ctx->mc_list);
return mc;
error:
kfree(mc);
return NULL;
}
Commit Message: infiniband: fix a possible use-after-free bug
ucma_process_join() will free the new allocated "mc" struct,
if there is any error after that, especially the copy_to_user().
But in parallel, ucma_leave_multicast() could find this "mc"
through idr_find() before ucma_process_join() frees it, since it
is already published.
So "mc" could be used in ucma_leave_multicast() after it is been
allocated and freed in ucma_process_join(), since we don't refcnt
it.
Fix this by separating "publish" from ID allocation, so that we
can get an ID first and publish it later after copy_to_user().
Fixes: c8f6a362bf3e ("RDMA/cma: Add multicast communication support")
Reported-by: Noam Rathaus <noamr@beyondsecurity.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
CWE ID: CWE-416 | static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
{
struct ucma_multicast *mc;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return NULL;
mutex_lock(&mut);
mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&mut);
if (mc->id < 0)
goto error;
mc->ctx = ctx;
list_add_tail(&mc->list, &ctx->mc_list);
return mc;
error:
kfree(mc);
return NULL;
}
| 169,109 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long long CuePoint::GetTime(const Segment* pSegment) const
{
assert(pSegment);
assert(m_timecode >= 0);
const SegmentInfo* const pInfo = pSegment->GetInfo();
assert(pInfo);
const long long scale = pInfo->GetTimeCodeScale();
assert(scale >= 1);
const long long time = scale * m_timecode;
return time;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | long long CuePoint::GetTime(const Segment* pSegment) const
| 174,364 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int ssl3_get_client_key_exchange(SSL *s)
{
int i, al, ok;
long n;
unsigned long alg_k;
unsigned char *p;
#ifndef OPENSSL_NO_RSA
RSA *rsa = NULL;
EVP_PKEY *pkey = NULL;
#endif
#ifndef OPENSSL_NO_DH
BIGNUM *pub = NULL;
DH *dh_srvr, *dh_clnt = NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *srvr_ecdh = NULL;
EVP_PKEY *clnt_pub_pkey = NULL;
EC_POINT *clnt_ecpoint = NULL;
BN_CTX *bn_ctx = NULL;
#endif
n = s->method->ssl_get_message(s,
SSL3_ST_SR_KEY_EXCH_A,
SSL3_ST_SR_KEY_EXCH_B,
SSL3_MT_CLIENT_KEY_EXCHANGE, 2048, &ok);
if (!ok)
return ((int)n);
p = (unsigned char *)s->init_msg;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA) {
unsigned char rand_premaster_secret[SSL_MAX_MASTER_KEY_LENGTH];
int decrypt_len;
unsigned char decrypt_good, version_good;
size_t j;
/* FIX THIS UP EAY EAY EAY EAY */
if (s->s3->tmp.use_rsa_tmp) {
if ((s->cert != NULL) && (s->cert->rsa_tmp != NULL))
rsa = s->cert->rsa_tmp;
/*
* Don't do a callback because rsa_tmp should be sent already
*/
if (rsa == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_RSA_PKEY);
goto f_err;
}
} else {
pkey = s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey;
if ((pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) || (pkey->pkey.rsa == NULL)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
rsa = pkey->pkey.rsa;
}
/* TLS and [incidentally] DTLS{0xFEFF} */
if (s->version > SSL3_VERSION && s->version != DTLS1_BAD_VER) {
n2s(p, i);
if (n != i + 2) {
if (!(s->options & SSL_OP_TLS_D5_BUG)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
} else
p -= 2;
} else
n = i;
}
/*
* Reject overly short RSA ciphertext because we want to be sure
* that the buffer size makes it safe to iterate over the entire
* size of a premaster secret (SSL_MAX_MASTER_KEY_LENGTH). The
* actual expected size is larger due to RSA padding, but the
* bound is sufficient to be safe.
*/
if (n < SSL_MAX_MASTER_KEY_LENGTH) {
al = SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
/*
* We must not leak whether a decryption failure occurs because of
* Bleichenbacher's attack on PKCS #1 v1.5 RSA padding (see RFC 2246,
* section 7.4.7.1). The code follows that advice of the TLS RFC and
* generates a random premaster secret for the case that the decrypt
* fails. See https://tools.ietf.org/html/rfc5246#section-7.4.7.1
*/
/*
* should be RAND_bytes, but we cannot work around a failure.
*/
if (RAND_pseudo_bytes(rand_premaster_secret,
sizeof(rand_premaster_secret)) <= 0)
goto err;
decrypt_len =
RSA_private_decrypt((int)n, p, p, rsa, RSA_PKCS1_PADDING);
ERR_clear_error();
/*
* decrypt_len should be SSL_MAX_MASTER_KEY_LENGTH. decrypt_good will
* be 0xff if so and zero otherwise.
*/
decrypt_good =
constant_time_eq_int_8(decrypt_len, SSL_MAX_MASTER_KEY_LENGTH);
/*
* If the version in the decrypted pre-master secret is correct then
* version_good will be 0xff, otherwise it'll be zero. The
* Klima-Pokorny-Rosa extension of Bleichenbacher's attack
* (http://eprint.iacr.org/2003/052/) exploits the version number
* check as a "bad version oracle". Thus version checks are done in
* constant time and are treated like any other decryption error.
*/
version_good =
constant_time_eq_8(p[0], (unsigned)(s->client_version >> 8));
version_good &=
constant_time_eq_8(p[1], (unsigned)(s->client_version & 0xff));
/*
* The premaster secret must contain the same version number as the
* ClientHello to detect version rollback attacks (strangely, the
* protocol does not offer such protection for DH ciphersuites).
* However, buggy clients exist that send the negotiated protocol
* version instead if the server does not support the requested
* protocol version. If SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such
* clients.
*/
if (s->options & SSL_OP_TLS_ROLLBACK_BUG) {
unsigned char workaround_good;
workaround_good =
constant_time_eq_8(p[0], (unsigned)(s->version >> 8));
workaround_good &=
constant_time_eq_8(p[1], (unsigned)(s->version & 0xff));
version_good |= workaround_good;
}
/*
* Both decryption and version must be good for decrypt_good to
* remain non-zero (0xff).
*/
decrypt_good &= version_good;
/*
* Now copy rand_premaster_secret over from p using
* decrypt_good_mask. If decryption failed, then p does not
* contain valid plaintext, however, a check above guarantees
* it is still sufficiently large to read from.
*/
for (j = 0; j < sizeof(rand_premaster_secret); j++) {
p[j] = constant_time_select_8(decrypt_good, p[j],
rand_premaster_secret[j]);
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p,
sizeof
(rand_premaster_secret));
OPENSSL_cleanse(p, sizeof(rand_premaster_secret));
} else
#endif
#ifndef OPENSSL_NO_DH
if (alg_k & (SSL_kEDH | SSL_kDHr | SSL_kDHd)) {
int idx = -1;
EVP_PKEY *skey = NULL;
if (n > 1) {
n2s(p, i);
} else {
if (alg_k & SSL_kDHE) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
i = 0;
}
if (n && n != i + 2) {
if (!(s->options & SSL_OP_SSLEAY_080_CLIENT_DH_BUG)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto err;
} else {
p -= 2;
i = (int)n;
}
}
if (alg_k & SSL_kDHr)
idx = SSL_PKEY_DH_RSA;
else if (alg_k & SSL_kDHd)
idx = SSL_PKEY_DH_DSA;
if (idx >= 0) {
skey = s->cert->pkeys[idx].privatekey;
if ((skey == NULL) ||
(skey->type != EVP_PKEY_DH) || (skey->pkey.dh == NULL)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
dh_srvr = skey->pkey.dh;
} else if (s->s3->tmp.dh == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
} else
dh_srvr = s->s3->tmp.dh;
if (n == 0L) {
/* Get pubkey from cert */
EVP_PKEY *clkey = X509_get_pubkey(s->session->peer);
if (clkey) {
if (EVP_PKEY_cmp_parameters(clkey, skey) == 1)
dh_clnt = EVP_PKEY_get1_DH(clkey);
}
if (dh_clnt == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
EVP_PKEY_free(clkey);
pub = dh_clnt->pub_key;
} else
pub = BN_bin2bn(p, i, NULL);
if (pub == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_BN_LIB);
goto err;
}
i = DH_compute_key(p, pub, dh_srvr);
if (i <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB);
BN_clear_free(pub);
goto err;
}
DH_free(s->s3->tmp.dh);
s->s3->tmp.dh = NULL;
if (dh_clnt)
DH_free(dh_clnt);
else
BN_clear_free(pub);
pub = NULL;
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, i);
OPENSSL_cleanse(p, i);
if (dh_clnt)
return 2;
} else
#endif
#ifndef OPENSSL_NO_KRB5
if (alg_k & SSL_kKRB5) {
krb5_error_code krb5rc;
krb5_data enc_ticket;
krb5_data authenticator;
krb5_data enc_pms;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char pms[SSL_MAX_MASTER_KEY_LENGTH + EVP_MAX_BLOCK_LENGTH];
int padl, outl;
krb5_timestamp authtime = 0;
krb5_ticket_times ttimes;
int kerr = 0;
EVP_CIPHER_CTX_init(&ciph_ctx);
if (!kssl_ctx)
kssl_ctx = kssl_ctx_new();
n2s(p, i);
enc_ticket.length = i;
if (n < (long)(enc_ticket.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
enc_ticket.data = (char *)p;
p += enc_ticket.length;
n2s(p, i);
authenticator.length = i;
if (n < (long)(enc_ticket.length + authenticator.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
authenticator.data = (char *)p;
p += authenticator.length;
n2s(p, i);
enc_pms.length = i;
enc_pms.data = (char *)p;
p += enc_pms.length;
/*
* Note that the length is checked again below, ** after decryption
*/
if (enc_pms.length > sizeof pms) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (n != (long)(enc_ticket.length + authenticator.length +
enc_pms.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if ((krb5rc = kssl_sget_tkt(kssl_ctx, &enc_ticket, &ttimes,
&kssl_err)) != 0) {
# ifdef KSSL_DEBUG
fprintf(stderr, "kssl_sget_tkt rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr, "kssl_err text= %s\n", kssl_err.text);
# endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, kssl_err.reason);
goto err;
}
/*
* Note: no authenticator is not considered an error, ** but will
* return authtime == 0.
*/
if ((krb5rc = kssl_check_authent(kssl_ctx, &authenticator,
&authtime, &kssl_err)) != 0) {
# ifdef KSSL_DEBUG
fprintf(stderr, "kssl_check_authent rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr, "kssl_err text= %s\n", kssl_err.text);
# endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, kssl_err.reason);
goto err;
}
if ((krb5rc = kssl_validate_times(authtime, &ttimes)) != 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, krb5rc);
goto err;
}
# ifdef KSSL_DEBUG
kssl_ctx_show(kssl_ctx);
# endif /* KSSL_DEBUG */
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
memset(iv, 0, sizeof iv); /* per RFC 1510 */
if (!EVP_DecryptInit_ex(&ciph_ctx, enc, NULL, kssl_ctx->key, iv)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
if (!EVP_DecryptUpdate(&ciph_ctx, pms, &outl,
(unsigned char *)enc_pms.data, enc_pms.length))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
kerr = 1;
goto kclean;
}
if (outl > SSL_MAX_MASTER_KEY_LENGTH) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
kerr = 1;
goto kclean;
}
if (!EVP_DecryptFinal_ex(&ciph_ctx, &(pms[outl]), &padl)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
kerr = 1;
goto kclean;
}
outl += padl;
if (outl > SSL_MAX_MASTER_KEY_LENGTH) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
kerr = 1;
goto kclean;
}
if (!((pms[0] == (s->client_version >> 8))
&& (pms[1] == (s->client_version & 0xff)))) {
/*
* The premaster secret must contain the same version number as
* the ClientHello to detect version rollback attacks (strangely,
* the protocol does not offer such protection for DH
* ciphersuites). However, buggy clients exist that send random
* bytes instead of the protocol version. If
* SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients.
* (Perhaps we should have a separate BUG value for the Kerberos
* cipher)
*/
if (!(s->options & SSL_OP_TLS_ROLLBACK_BUG)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_AD_DECODE_ERROR);
kerr = 1;
goto kclean;
}
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
pms, outl);
if (kssl_ctx->client_princ) {
size_t len = strlen(kssl_ctx->client_princ);
if (len < SSL_MAX_KRB5_PRINCIPAL_LENGTH) {
s->session->krb5_client_princ_len = len;
memcpy(s->session->krb5_client_princ, kssl_ctx->client_princ,
len);
}
}
/*- Was doing kssl_ctx_free() here,
* but it caused problems for apache.
* kssl_ctx = kssl_ctx_free(kssl_ctx);
* if (s->kssl_ctx) s->kssl_ctx = NULL;
*/
kclean:
OPENSSL_cleanse(pms, sizeof(pms));
if (kerr)
goto err;
} else
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
if (alg_k & (SSL_kEECDH | SSL_kECDHr | SSL_kECDHe)) {
int ret = 1;
int field_size = 0;
const EC_KEY *tkey;
const EC_GROUP *group;
const BIGNUM *priv_key;
/* initialize structures for server's ECDH key pair */
if ((srvr_ecdh = EC_KEY_new()) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
/* Let's get server private key and group information */
if (alg_k & (SSL_kECDHr | SSL_kECDHe)) {
/* use the certificate */
tkey = s->cert->pkeys[SSL_PKEY_ECC].privatekey->pkey.ec;
} else {
/*
* use the ephermeral values we saved when generating the
* ServerKeyExchange msg.
*/
tkey = s->s3->tmp.ecdh;
}
group = EC_KEY_get0_group(tkey);
priv_key = EC_KEY_get0_private_key(tkey);
if (!EC_KEY_set_group(srvr_ecdh, group) ||
!EC_KEY_set_private_key(srvr_ecdh, priv_key)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
/* Let's get client's public key */
if ((clnt_ecpoint = EC_POINT_new(group)) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if (n == 0L) {
/* Client Publickey was in Client Certificate */
if (alg_k & SSL_kEECDH) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (((clnt_pub_pkey = X509_get_pubkey(s->session->peer))
== NULL) || (clnt_pub_pkey->type != EVP_PKEY_EC)) {
/*
* XXX: For now, we do not support client authentication
* using ECDH certificates so this branch (n == 0L) of the
* code is never executed. When that support is added, we
* ought to ensure the key received in the certificate is
* authorized for key agreement. ECDH_compute_key implicitly
* checks that the two ECDH shares are for the same group.
*/
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_UNABLE_TO_DECODE_ECDH_CERTS);
goto f_err;
}
if (EC_POINT_copy(clnt_ecpoint,
EC_KEY_get0_public_key(clnt_pub_pkey->
pkey.ec)) == 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
ret = 2; /* Skip certificate verify processing */
} else {
/*
* Get client's public key from encoded point in the
* ClientKeyExchange message.
*/
if ((bn_ctx = BN_CTX_new()) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Get encoded point length */
i = *p;
p += 1;
if (n != 1 + i) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
if (EC_POINT_oct2point(group, clnt_ecpoint, p, i, bn_ctx) == 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
/*
* p is pointing to somewhere in the buffer currently, so set it
* to the start
*/
p = (unsigned char *)s->init_buf->data;
}
/* Compute the shared pre-master secret */
field_size = EC_GROUP_get_degree(group);
if (field_size <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
i = ECDH_compute_key(p, (field_size + 7) / 8, clnt_ecpoint, srvr_ecdh,
NULL);
if (i <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
EC_KEY_free(s->s3->tmp.ecdh);
s->s3->tmp.ecdh = NULL;
/* Compute the master secret */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, i);
OPENSSL_cleanse(p, i);
return (ret);
} else
#endif
#ifndef OPENSSL_NO_PSK
if (alg_k & SSL_kPSK) {
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN * 2 + 4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
char tmp_id[PSK_MAX_IDENTITY_LEN + 1];
al = SSL_AD_HANDSHAKE_FAILURE;
n2s(p, i);
if (n != i + 2) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_LENGTH_MISMATCH);
goto psk_err;
}
if (i > PSK_MAX_IDENTITY_LEN) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto psk_err;
}
if (s->psk_server_callback == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_SERVER_CB);
goto psk_err;
}
/*
* Create guaranteed NULL-terminated identity string for the callback
*/
memcpy(tmp_id, p, i);
memset(tmp_id + i, 0, PSK_MAX_IDENTITY_LEN + 1 - i);
psk_len = s->psk_server_callback(s, tmp_id,
psk_or_pre_ms,
sizeof(psk_or_pre_ms));
OPENSSL_cleanse(tmp_id, PSK_MAX_IDENTITY_LEN + 1);
if (psk_len > PSK_MAX_PSK_LEN) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto psk_err;
} else if (psk_len == 0) {
/*
* PSK related to the given identity not found
*/
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
al = SSL_AD_UNKNOWN_PSK_IDENTITY;
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len = 2 + psk_len + 2 + psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms + psk_len + 4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t += psk_len;
s2n(psk_len, t);
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strdup((char *)p);
if (s->session->psk_identity == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
psk_or_pre_ms,
pre_ms_len);
psk_err = 0;
psk_err:
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
goto f_err;
} else
#endif
#ifndef OPENSSL_NO_SRP
if (alg_k & SSL_kSRP) {
int param_len;
n2s(p, i);
param_len = i + 2;
if (param_len > n) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_BAD_SRP_A_LENGTH);
goto f_err;
}
if (!(s->srp_ctx.A = BN_bin2bn(p, i, NULL))) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_BN_LIB);
goto err;
}
if (BN_ucmp(s->srp_ctx.A, s->srp_ctx.N) >= 0
|| BN_is_zero(s->srp_ctx.A)) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_BAD_SRP_PARAMETERS);
goto f_err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length =
SRP_generate_server_master_secret(s,
s->session->master_key)) < 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
p += i;
} else
#endif /* OPENSSL_NO_SRP */
if (alg_k & SSL_kGOST) {
int ret = 0;
EVP_PKEY_CTX *pkey_ctx;
EVP_PKEY *client_pub_pkey = NULL, *pk = NULL;
unsigned char premaster_secret[32], *start;
size_t outlen = 32, inlen;
unsigned long alg_a;
int Ttag, Tclass;
long Tlen;
/* Get our certificate private key */
alg_a = s->s3->tmp.new_cipher->algorithm_auth;
if (alg_a & SSL_aGOST94)
pk = s->cert->pkeys[SSL_PKEY_GOST94].privatekey;
else if (alg_a & SSL_aGOST01)
pk = s->cert->pkeys[SSL_PKEY_GOST01].privatekey;
pkey_ctx = EVP_PKEY_CTX_new(pk, NULL);
EVP_PKEY_decrypt_init(pkey_ctx);
/*
* If client certificate is present and is of the same type, maybe
* use it for key exchange. Don't mind errors from
* EVP_PKEY_derive_set_peer, because it is completely valid to use a
* client certificate for authorization only.
*/
client_pub_pkey = X509_get_pubkey(s->session->peer);
if (client_pub_pkey) {
if (EVP_PKEY_derive_set_peer(pkey_ctx, client_pub_pkey) <= 0)
ERR_clear_error();
}
/* Decrypt session key */
if (ASN1_get_object
((const unsigned char **)&p, &Tlen, &Ttag, &Tclass,
n) != V_ASN1_CONSTRUCTED || Ttag != V_ASN1_SEQUENCE
|| Tclass != V_ASN1_UNIVERSAL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto gerr;
}
start = p;
inlen = Tlen;
if (EVP_PKEY_decrypt
(pkey_ctx, premaster_secret, &outlen, start, inlen) <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto gerr;
}
/* Generate master secret */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
premaster_secret, 32);
OPENSSL_cleanse(premaster_secret, sizeof(premaster_secret));
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl
(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
ret = 2;
else
ret = 1;
gerr:
EVP_PKEY_free(client_pub_pkey);
EVP_PKEY_CTX_free(pkey_ctx);
if (ret)
return ret;
else
goto err;
} else {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_UNKNOWN_CIPHER_TYPE);
goto f_err;
}
return (1);
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
#if !defined(OPENSSL_NO_DH) || !defined(OPENSSL_NO_RSA) || !defined(OPENSSL_NO_ECDH) || defined(OPENSSL_NO_SRP)
err:
#endif
#ifndef OPENSSL_NO_ECDH
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
if (srvr_ecdh != NULL)
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
#endif
s->state = SSL_ST_ERR;
return (-1);
}
Commit Message:
CWE ID: CWE-362 | int ssl3_get_client_key_exchange(SSL *s)
{
int i, al, ok;
long n;
unsigned long alg_k;
unsigned char *p;
#ifndef OPENSSL_NO_RSA
RSA *rsa = NULL;
EVP_PKEY *pkey = NULL;
#endif
#ifndef OPENSSL_NO_DH
BIGNUM *pub = NULL;
DH *dh_srvr, *dh_clnt = NULL;
#endif
#ifndef OPENSSL_NO_KRB5
KSSL_ERR kssl_err;
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
EC_KEY *srvr_ecdh = NULL;
EVP_PKEY *clnt_pub_pkey = NULL;
EC_POINT *clnt_ecpoint = NULL;
BN_CTX *bn_ctx = NULL;
#endif
n = s->method->ssl_get_message(s,
SSL3_ST_SR_KEY_EXCH_A,
SSL3_ST_SR_KEY_EXCH_B,
SSL3_MT_CLIENT_KEY_EXCHANGE, 2048, &ok);
if (!ok)
return ((int)n);
p = (unsigned char *)s->init_msg;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
#ifndef OPENSSL_NO_RSA
if (alg_k & SSL_kRSA) {
unsigned char rand_premaster_secret[SSL_MAX_MASTER_KEY_LENGTH];
int decrypt_len;
unsigned char decrypt_good, version_good;
size_t j;
/* FIX THIS UP EAY EAY EAY EAY */
if (s->s3->tmp.use_rsa_tmp) {
if ((s->cert != NULL) && (s->cert->rsa_tmp != NULL))
rsa = s->cert->rsa_tmp;
/*
* Don't do a callback because rsa_tmp should be sent already
*/
if (rsa == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_RSA_PKEY);
goto f_err;
}
} else {
pkey = s->cert->pkeys[SSL_PKEY_RSA_ENC].privatekey;
if ((pkey == NULL) ||
(pkey->type != EVP_PKEY_RSA) || (pkey->pkey.rsa == NULL)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
rsa = pkey->pkey.rsa;
}
/* TLS and [incidentally] DTLS{0xFEFF} */
if (s->version > SSL3_VERSION && s->version != DTLS1_BAD_VER) {
n2s(p, i);
if (n != i + 2) {
if (!(s->options & SSL_OP_TLS_D5_BUG)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
} else
p -= 2;
} else
n = i;
}
/*
* Reject overly short RSA ciphertext because we want to be sure
* that the buffer size makes it safe to iterate over the entire
* size of a premaster secret (SSL_MAX_MASTER_KEY_LENGTH). The
* actual expected size is larger due to RSA padding, but the
* bound is sufficient to be safe.
*/
if (n < SSL_MAX_MASTER_KEY_LENGTH) {
al = SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
/*
* We must not leak whether a decryption failure occurs because of
* Bleichenbacher's attack on PKCS #1 v1.5 RSA padding (see RFC 2246,
* section 7.4.7.1). The code follows that advice of the TLS RFC and
* generates a random premaster secret for the case that the decrypt
* fails. See https://tools.ietf.org/html/rfc5246#section-7.4.7.1
*/
/*
* should be RAND_bytes, but we cannot work around a failure.
*/
if (RAND_pseudo_bytes(rand_premaster_secret,
sizeof(rand_premaster_secret)) <= 0)
goto err;
decrypt_len =
RSA_private_decrypt((int)n, p, p, rsa, RSA_PKCS1_PADDING);
ERR_clear_error();
/*
* decrypt_len should be SSL_MAX_MASTER_KEY_LENGTH. decrypt_good will
* be 0xff if so and zero otherwise.
*/
decrypt_good =
constant_time_eq_int_8(decrypt_len, SSL_MAX_MASTER_KEY_LENGTH);
/*
* If the version in the decrypted pre-master secret is correct then
* version_good will be 0xff, otherwise it'll be zero. The
* Klima-Pokorny-Rosa extension of Bleichenbacher's attack
* (http://eprint.iacr.org/2003/052/) exploits the version number
* check as a "bad version oracle". Thus version checks are done in
* constant time and are treated like any other decryption error.
*/
version_good =
constant_time_eq_8(p[0], (unsigned)(s->client_version >> 8));
version_good &=
constant_time_eq_8(p[1], (unsigned)(s->client_version & 0xff));
/*
* The premaster secret must contain the same version number as the
* ClientHello to detect version rollback attacks (strangely, the
* protocol does not offer such protection for DH ciphersuites).
* However, buggy clients exist that send the negotiated protocol
* version instead if the server does not support the requested
* protocol version. If SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such
* clients.
*/
if (s->options & SSL_OP_TLS_ROLLBACK_BUG) {
unsigned char workaround_good;
workaround_good =
constant_time_eq_8(p[0], (unsigned)(s->version >> 8));
workaround_good &=
constant_time_eq_8(p[1], (unsigned)(s->version & 0xff));
version_good |= workaround_good;
}
/*
* Both decryption and version must be good for decrypt_good to
* remain non-zero (0xff).
*/
decrypt_good &= version_good;
/*
* Now copy rand_premaster_secret over from p using
* decrypt_good_mask. If decryption failed, then p does not
* contain valid plaintext, however, a check above guarantees
* it is still sufficiently large to read from.
*/
for (j = 0; j < sizeof(rand_premaster_secret); j++) {
p[j] = constant_time_select_8(decrypt_good, p[j],
rand_premaster_secret[j]);
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p,
sizeof
(rand_premaster_secret));
OPENSSL_cleanse(p, sizeof(rand_premaster_secret));
} else
#endif
#ifndef OPENSSL_NO_DH
if (alg_k & (SSL_kEDH | SSL_kDHr | SSL_kDHd)) {
int idx = -1;
EVP_PKEY *skey = NULL;
if (n > 1) {
n2s(p, i);
} else {
if (alg_k & SSL_kDHE) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto f_err;
}
i = 0;
}
if (n && n != i + 2) {
if (!(s->options & SSL_OP_SSLEAY_080_CLIENT_DH_BUG)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DH_PUBLIC_VALUE_LENGTH_IS_WRONG);
goto err;
} else {
p -= 2;
i = (int)n;
}
}
if (alg_k & SSL_kDHr)
idx = SSL_PKEY_DH_RSA;
else if (alg_k & SSL_kDHd)
idx = SSL_PKEY_DH_DSA;
if (idx >= 0) {
skey = s->cert->pkeys[idx].privatekey;
if ((skey == NULL) ||
(skey->type != EVP_PKEY_DH) || (skey->pkey.dh == NULL)) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_RSA_CERTIFICATE);
goto f_err;
}
dh_srvr = skey->pkey.dh;
} else if (s->s3->tmp.dh == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
} else
dh_srvr = s->s3->tmp.dh;
if (n == 0L) {
/* Get pubkey from cert */
EVP_PKEY *clkey = X509_get_pubkey(s->session->peer);
if (clkey) {
if (EVP_PKEY_cmp_parameters(clkey, skey) == 1)
dh_clnt = EVP_PKEY_get1_DH(clkey);
}
if (dh_clnt == NULL) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_DH_KEY);
goto f_err;
}
EVP_PKEY_free(clkey);
pub = dh_clnt->pub_key;
} else
pub = BN_bin2bn(p, i, NULL);
if (pub == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_BN_LIB);
goto err;
}
i = DH_compute_key(p, pub, dh_srvr);
if (i <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_DH_LIB);
BN_clear_free(pub);
goto err;
}
DH_free(s->s3->tmp.dh);
s->s3->tmp.dh = NULL;
if (dh_clnt)
DH_free(dh_clnt);
else
BN_clear_free(pub);
pub = NULL;
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, i);
OPENSSL_cleanse(p, i);
if (dh_clnt)
return 2;
} else
#endif
#ifndef OPENSSL_NO_KRB5
if (alg_k & SSL_kKRB5) {
krb5_error_code krb5rc;
krb5_data enc_ticket;
krb5_data authenticator;
krb5_data enc_pms;
KSSL_CTX *kssl_ctx = s->kssl_ctx;
EVP_CIPHER_CTX ciph_ctx;
const EVP_CIPHER *enc = NULL;
unsigned char iv[EVP_MAX_IV_LENGTH];
unsigned char pms[SSL_MAX_MASTER_KEY_LENGTH + EVP_MAX_BLOCK_LENGTH];
int padl, outl;
krb5_timestamp authtime = 0;
krb5_ticket_times ttimes;
int kerr = 0;
EVP_CIPHER_CTX_init(&ciph_ctx);
if (!kssl_ctx)
kssl_ctx = kssl_ctx_new();
n2s(p, i);
enc_ticket.length = i;
if (n < (long)(enc_ticket.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
enc_ticket.data = (char *)p;
p += enc_ticket.length;
n2s(p, i);
authenticator.length = i;
if (n < (long)(enc_ticket.length + authenticator.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
authenticator.data = (char *)p;
p += authenticator.length;
n2s(p, i);
enc_pms.length = i;
enc_pms.data = (char *)p;
p += enc_pms.length;
/*
* Note that the length is checked again below, ** after decryption
*/
if (enc_pms.length > sizeof pms) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if (n != (long)(enc_ticket.length + authenticator.length +
enc_pms.length + 6)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto err;
}
if ((krb5rc = kssl_sget_tkt(kssl_ctx, &enc_ticket, &ttimes,
&kssl_err)) != 0) {
# ifdef KSSL_DEBUG
fprintf(stderr, "kssl_sget_tkt rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr, "kssl_err text= %s\n", kssl_err.text);
# endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, kssl_err.reason);
goto err;
}
/*
* Note: no authenticator is not considered an error, ** but will
* return authtime == 0.
*/
if ((krb5rc = kssl_check_authent(kssl_ctx, &authenticator,
&authtime, &kssl_err)) != 0) {
# ifdef KSSL_DEBUG
fprintf(stderr, "kssl_check_authent rtn %d [%d]\n",
krb5rc, kssl_err.reason);
if (kssl_err.text)
fprintf(stderr, "kssl_err text= %s\n", kssl_err.text);
# endif /* KSSL_DEBUG */
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, kssl_err.reason);
goto err;
}
if ((krb5rc = kssl_validate_times(authtime, &ttimes)) != 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, krb5rc);
goto err;
}
# ifdef KSSL_DEBUG
kssl_ctx_show(kssl_ctx);
# endif /* KSSL_DEBUG */
enc = kssl_map_enc(kssl_ctx->enctype);
if (enc == NULL)
goto err;
memset(iv, 0, sizeof iv); /* per RFC 1510 */
if (!EVP_DecryptInit_ex(&ciph_ctx, enc, NULL, kssl_ctx->key, iv)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto err;
}
if (!EVP_DecryptUpdate(&ciph_ctx, pms, &outl,
(unsigned char *)enc_pms.data, enc_pms.length))
{
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
kerr = 1;
goto kclean;
}
if (outl > SSL_MAX_MASTER_KEY_LENGTH) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
kerr = 1;
goto kclean;
}
if (!EVP_DecryptFinal_ex(&ciph_ctx, &(pms[outl]), &padl)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
kerr = 1;
goto kclean;
}
outl += padl;
if (outl > SSL_MAX_MASTER_KEY_LENGTH) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
kerr = 1;
goto kclean;
}
if (!((pms[0] == (s->client_version >> 8))
&& (pms[1] == (s->client_version & 0xff)))) {
/*
* The premaster secret must contain the same version number as
* the ClientHello to detect version rollback attacks (strangely,
* the protocol does not offer such protection for DH
* ciphersuites). However, buggy clients exist that send random
* bytes instead of the protocol version. If
* SSL_OP_TLS_ROLLBACK_BUG is set, tolerate such clients.
* (Perhaps we should have a separate BUG value for the Kerberos
* cipher)
*/
if (!(s->options & SSL_OP_TLS_ROLLBACK_BUG)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_AD_DECODE_ERROR);
kerr = 1;
goto kclean;
}
}
EVP_CIPHER_CTX_cleanup(&ciph_ctx);
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
pms, outl);
if (kssl_ctx->client_princ) {
size_t len = strlen(kssl_ctx->client_princ);
if (len < SSL_MAX_KRB5_PRINCIPAL_LENGTH) {
s->session->krb5_client_princ_len = len;
memcpy(s->session->krb5_client_princ, kssl_ctx->client_princ,
len);
}
}
/*- Was doing kssl_ctx_free() here,
* but it caused problems for apache.
* kssl_ctx = kssl_ctx_free(kssl_ctx);
* if (s->kssl_ctx) s->kssl_ctx = NULL;
*/
kclean:
OPENSSL_cleanse(pms, sizeof(pms));
if (kerr)
goto err;
} else
#endif /* OPENSSL_NO_KRB5 */
#ifndef OPENSSL_NO_ECDH
if (alg_k & (SSL_kEECDH | SSL_kECDHr | SSL_kECDHe)) {
int ret = 1;
int field_size = 0;
const EC_KEY *tkey;
const EC_GROUP *group;
const BIGNUM *priv_key;
/* initialize structures for server's ECDH key pair */
if ((srvr_ecdh = EC_KEY_new()) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
/* Let's get server private key and group information */
if (alg_k & (SSL_kECDHr | SSL_kECDHe)) {
/* use the certificate */
tkey = s->cert->pkeys[SSL_PKEY_ECC].privatekey->pkey.ec;
} else {
/*
* use the ephermeral values we saved when generating the
* ServerKeyExchange msg.
*/
tkey = s->s3->tmp.ecdh;
}
group = EC_KEY_get0_group(tkey);
priv_key = EC_KEY_get0_private_key(tkey);
if (!EC_KEY_set_group(srvr_ecdh, group) ||
!EC_KEY_set_private_key(srvr_ecdh, priv_key)) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
/* Let's get client's public key */
if ((clnt_ecpoint = EC_POINT_new(group)) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if (n == 0L) {
/* Client Publickey was in Client Certificate */
if (alg_k & SSL_kEECDH) {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_MISSING_TMP_ECDH_KEY);
goto f_err;
}
if (((clnt_pub_pkey = X509_get_pubkey(s->session->peer))
== NULL) || (clnt_pub_pkey->type != EVP_PKEY_EC)) {
/*
* XXX: For now, we do not support client authentication
* using ECDH certificates so this branch (n == 0L) of the
* code is never executed. When that support is added, we
* ought to ensure the key received in the certificate is
* authorized for key agreement. ECDH_compute_key implicitly
* checks that the two ECDH shares are for the same group.
*/
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_UNABLE_TO_DECODE_ECDH_CERTS);
goto f_err;
}
if (EC_POINT_copy(clnt_ecpoint,
EC_KEY_get0_public_key(clnt_pub_pkey->
pkey.ec)) == 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
ret = 2; /* Skip certificate verify processing */
} else {
/*
* Get client's public key from encoded point in the
* ClientKeyExchange message.
*/
if ((bn_ctx = BN_CTX_new()) == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
ERR_R_MALLOC_FAILURE);
goto err;
}
/* Get encoded point length */
i = *p;
p += 1;
if (n != 1 + i) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
if (EC_POINT_oct2point(group, clnt_ecpoint, p, i, bn_ctx) == 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_EC_LIB);
goto err;
}
/*
* p is pointing to somewhere in the buffer currently, so set it
* to the start
*/
p = (unsigned char *)s->init_buf->data;
}
/* Compute the shared pre-master secret */
field_size = EC_GROUP_get_degree(group);
if (field_size <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
i = ECDH_compute_key(p, (field_size + 7) / 8, clnt_ecpoint, srvr_ecdh,
NULL);
if (i <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_ECDH_LIB);
goto err;
}
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
EC_KEY_free(s->s3->tmp.ecdh);
s->s3->tmp.ecdh = NULL;
/* Compute the master secret */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
p, i);
OPENSSL_cleanse(p, i);
return (ret);
} else
#endif
#ifndef OPENSSL_NO_PSK
if (alg_k & SSL_kPSK) {
unsigned char *t = NULL;
unsigned char psk_or_pre_ms[PSK_MAX_PSK_LEN * 2 + 4];
unsigned int pre_ms_len = 0, psk_len = 0;
int psk_err = 1;
char tmp_id[PSK_MAX_IDENTITY_LEN + 1];
al = SSL_AD_HANDSHAKE_FAILURE;
n2s(p, i);
if (n != i + 2) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_LENGTH_MISMATCH);
goto psk_err;
}
if (i > PSK_MAX_IDENTITY_LEN) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DATA_LENGTH_TOO_LONG);
goto psk_err;
}
if (s->psk_server_callback == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_NO_SERVER_CB);
goto psk_err;
}
/*
* Create guaranteed NULL-terminated identity string for the callback
*/
memcpy(tmp_id, p, i);
memset(tmp_id + i, 0, PSK_MAX_IDENTITY_LEN + 1 - i);
psk_len = s->psk_server_callback(s, tmp_id,
psk_or_pre_ms,
sizeof(psk_or_pre_ms));
OPENSSL_cleanse(tmp_id, PSK_MAX_IDENTITY_LEN + 1);
if (psk_len > PSK_MAX_PSK_LEN) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto psk_err;
} else if (psk_len == 0) {
/*
* PSK related to the given identity not found
*/
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
al = SSL_AD_UNKNOWN_PSK_IDENTITY;
goto psk_err;
}
/* create PSK pre_master_secret */
pre_ms_len = 2 + psk_len + 2 + psk_len;
t = psk_or_pre_ms;
memmove(psk_or_pre_ms + psk_len + 4, psk_or_pre_ms, psk_len);
s2n(psk_len, t);
memset(t, 0, psk_len);
t += psk_len;
s2n(psk_len, t);
if (s->session->psk_identity != NULL)
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = BUF_strndup((char *)p, i);
if (s->session->psk_identity == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto psk_err;
}
if (s->session->psk_identity_hint != NULL)
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = BUF_strdup(s->ctx->psk_identity_hint);
if (s->ctx->psk_identity_hint != NULL &&
s->session->psk_identity_hint == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto psk_err;
}
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
psk_or_pre_ms,
pre_ms_len);
psk_err = 0;
psk_err:
OPENSSL_cleanse(psk_or_pre_ms, sizeof(psk_or_pre_ms));
if (psk_err != 0)
goto f_err;
} else
#endif
#ifndef OPENSSL_NO_SRP
if (alg_k & SSL_kSRP) {
int param_len;
n2s(p, i);
param_len = i + 2;
if (param_len > n) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_BAD_SRP_A_LENGTH);
goto f_err;
}
if (!(s->srp_ctx.A = BN_bin2bn(p, i, NULL))) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_BN_LIB);
goto err;
}
if (BN_ucmp(s->srp_ctx.A, s->srp_ctx.N) >= 0
|| BN_is_zero(s->srp_ctx.A)) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_BAD_SRP_PARAMETERS);
goto f_err;
}
if (s->session->srp_username != NULL)
OPENSSL_free(s->session->srp_username);
s->session->srp_username = BUF_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if ((s->session->master_key_length =
SRP_generate_server_master_secret(s,
s->session->master_key)) < 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
p += i;
} else
#endif /* OPENSSL_NO_SRP */
if (alg_k & SSL_kGOST) {
int ret = 0;
EVP_PKEY_CTX *pkey_ctx;
EVP_PKEY *client_pub_pkey = NULL, *pk = NULL;
unsigned char premaster_secret[32], *start;
size_t outlen = 32, inlen;
unsigned long alg_a;
int Ttag, Tclass;
long Tlen;
/* Get our certificate private key */
alg_a = s->s3->tmp.new_cipher->algorithm_auth;
if (alg_a & SSL_aGOST94)
pk = s->cert->pkeys[SSL_PKEY_GOST94].privatekey;
else if (alg_a & SSL_aGOST01)
pk = s->cert->pkeys[SSL_PKEY_GOST01].privatekey;
pkey_ctx = EVP_PKEY_CTX_new(pk, NULL);
EVP_PKEY_decrypt_init(pkey_ctx);
/*
* If client certificate is present and is of the same type, maybe
* use it for key exchange. Don't mind errors from
* EVP_PKEY_derive_set_peer, because it is completely valid to use a
* client certificate for authorization only.
*/
client_pub_pkey = X509_get_pubkey(s->session->peer);
if (client_pub_pkey) {
if (EVP_PKEY_derive_set_peer(pkey_ctx, client_pub_pkey) <= 0)
ERR_clear_error();
}
/* Decrypt session key */
if (ASN1_get_object
((const unsigned char **)&p, &Tlen, &Ttag, &Tclass,
n) != V_ASN1_CONSTRUCTED || Ttag != V_ASN1_SEQUENCE
|| Tclass != V_ASN1_UNIVERSAL) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto gerr;
}
start = p;
inlen = Tlen;
if (EVP_PKEY_decrypt
(pkey_ctx, premaster_secret, &outlen, start, inlen) <= 0) {
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE,
SSL_R_DECRYPTION_FAILED);
goto gerr;
}
/* Generate master secret */
s->session->master_key_length =
s->method->ssl3_enc->generate_master_secret(s,
s->
session->master_key,
premaster_secret, 32);
OPENSSL_cleanse(premaster_secret, sizeof(premaster_secret));
/* Check if pubkey from client certificate was used */
if (EVP_PKEY_CTX_ctrl
(pkey_ctx, -1, -1, EVP_PKEY_CTRL_PEER_KEY, 2, NULL) > 0)
ret = 2;
else
ret = 1;
gerr:
EVP_PKEY_free(client_pub_pkey);
EVP_PKEY_CTX_free(pkey_ctx);
if (ret)
return ret;
else
goto err;
} else {
al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_SSL3_GET_CLIENT_KEY_EXCHANGE, SSL_R_UNKNOWN_CIPHER_TYPE);
goto f_err;
}
return (1);
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
#if !defined(OPENSSL_NO_DH) || !defined(OPENSSL_NO_RSA) || !defined(OPENSSL_NO_ECDH) || defined(OPENSSL_NO_SRP)
err:
#endif
#ifndef OPENSSL_NO_ECDH
EVP_PKEY_free(clnt_pub_pkey);
EC_POINT_free(clnt_ecpoint);
if (srvr_ecdh != NULL)
EC_KEY_free(srvr_ecdh);
BN_CTX_free(bn_ctx);
#endif
s->state = SSL_ST_ERR;
return (-1);
}
| 164,717 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
int32_t msgType __unused, const sp<IMemory> &data) {
ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
Mutex::Autolock autoLock(mLock);
if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
ALOGV("Drop frame at %lld/%lld us", (long long)timestampUs, (long long)mStartTimeUs);
releaseOneRecordingFrame(data);
return;
}
if (skipCurrentFrame(timestampUs)) {
releaseOneRecordingFrame(data);
return;
}
if (mNumFramesReceived > 0) {
if (timestampUs <= mLastFrameTimestampUs) {
ALOGW("Dropping frame with backward timestamp %lld (last %lld)",
(long long)timestampUs, (long long)mLastFrameTimestampUs);
releaseOneRecordingFrame(data);
return;
}
if (timestampUs - mLastFrameTimestampUs > mGlitchDurationThresholdUs) {
++mNumGlitches;
}
}
mLastFrameTimestampUs = timestampUs;
if (mNumFramesReceived == 0) {
mFirstFrameTimeUs = timestampUs;
if (mStartTimeUs > 0) {
if (timestampUs < mStartTimeUs) {
releaseOneRecordingFrame(data);
return;
}
mStartTimeUs = timestampUs - mStartTimeUs;
}
}
++mNumFramesReceived;
CHECK(data != NULL && data->size() > 0);
mFramesReceived.push_back(data);
int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
mFrameTimes.push_back(timeUs);
ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
mStartTimeUs, timeUs);
mFrameAvailableCondition.signal();
}
Commit Message: DO NOT MERGE: Camera: Adjust pointers to ANW buffers to avoid infoleak
Subtract address of a random static object from pointers being routed
through app process.
Bug: 28466701
Change-Id: Idcbfe81e9507433769672f3dc6d67db5eeed4e04
CWE ID: CWE-200 | void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
int32_t msgType __unused, const sp<IMemory> &data) {
ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
Mutex::Autolock autoLock(mLock);
if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
ALOGV("Drop frame at %lld/%lld us", (long long)timestampUs, (long long)mStartTimeUs);
releaseOneRecordingFrame(data);
return;
}
if (skipCurrentFrame(timestampUs)) {
releaseOneRecordingFrame(data);
return;
}
if (mNumFramesReceived > 0) {
if (timestampUs <= mLastFrameTimestampUs) {
ALOGW("Dropping frame with backward timestamp %lld (last %lld)",
(long long)timestampUs, (long long)mLastFrameTimestampUs);
releaseOneRecordingFrame(data);
return;
}
if (timestampUs - mLastFrameTimestampUs > mGlitchDurationThresholdUs) {
++mNumGlitches;
}
}
mLastFrameTimestampUs = timestampUs;
if (mNumFramesReceived == 0) {
mFirstFrameTimeUs = timestampUs;
if (mStartTimeUs > 0) {
if (timestampUs < mStartTimeUs) {
releaseOneRecordingFrame(data);
return;
}
mStartTimeUs = timestampUs - mStartTimeUs;
}
}
++mNumFramesReceived;
CHECK(data != NULL && data->size() > 0);
// b/28466701
adjustIncomingANWBuffer(data.get());
mFramesReceived.push_back(data);
int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
mFrameTimes.push_back(timeUs);
ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
mStartTimeUs, timeUs);
mFrameAvailableCondition.signal();
}
| 173,508 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static cJSON *cJSON_New_Item( void )
{
cJSON* node = (cJSON*) cJSON_malloc( sizeof(cJSON) );
if ( node )
memset( node, 0, sizeof(cJSON) );
return node;
}
Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <bmah@es.net>
CWE ID: CWE-119 | static cJSON *cJSON_New_Item( void )
static cJSON *cJSON_New_Item(void)
{
cJSON* node = (cJSON*)cJSON_malloc(sizeof(cJSON));
if (node) memset(node,0,sizeof(cJSON));
return node;
}
| 167,291 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static av_cold int vqa_decode_init(AVCodecContext *avctx)
{
VqaContext *s = avctx->priv_data;
int i, j, codebook_index;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
/* make sure the extradata made it */
if (s->avctx->extradata_size != VQA_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE);
return -1;
}
/* load up the VQA parameters from the header */
s->vqa_version = s->avctx->extradata[0];
s->width = AV_RL16(&s->avctx->extradata[6]);
s->height = AV_RL16(&s->avctx->extradata[8]);
if(av_image_check_size(s->width, s->height, 0, avctx)){
s->width= s->height= 0;
return -1;
}
s->vector_width = s->avctx->extradata[10];
s->vector_height = s->avctx->extradata[11];
s->partial_count = s->partial_countdown = s->avctx->extradata[13];
/* the vector dimensions have to meet very stringent requirements */
if ((s->vector_width != 4) ||
((s->vector_height != 2) && (s->vector_height != 4))) {
/* return without further initialization */
return -1;
}
/* allocate codebooks */
s->codebook_size = MAX_CODEBOOK_SIZE;
s->codebook = av_malloc(s->codebook_size);
/* allocate decode buffer */
s->decode_buffer_size = (s->width / s->vector_width) *
(s->height / s->vector_height) * 2;
s->decode_buffer = av_malloc(s->decode_buffer_size);
if (!s->decode_buffer)
goto fail;
/* initialize the solid-color vectors */
if (s->vector_height == 4) {
codebook_index = 0xFF00 * 16;
for (i = 0; i < 256; i++)
for (j = 0; j < 16; j++)
s->codebook[codebook_index++] = i;
} else {
codebook_index = 0xF00 * 8;
for (i = 0; i < 256; i++)
for (j = 0; j < 8; j++)
s->codebook[codebook_index++] = i;
}
s->next_codebook_buffer_index = 0;
s->frame.data[0] = NULL;
return 0;
fail:
av_freep(&s->codebook);
av_freep(&s->next_codebook_buffer);
av_freep(&s->decode_buffer);
return AVERROR(ENOMEM);
}
Commit Message:
CWE ID: CWE-119 | static av_cold int vqa_decode_init(AVCodecContext *avctx)
{
VqaContext *s = avctx->priv_data;
int i, j, codebook_index;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
/* make sure the extradata made it */
if (s->avctx->extradata_size != VQA_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE);
return -1;
}
/* load up the VQA parameters from the header */
s->vqa_version = s->avctx->extradata[0];
s->width = AV_RL16(&s->avctx->extradata[6]);
s->height = AV_RL16(&s->avctx->extradata[8]);
if(av_image_check_size(s->width, s->height, 0, avctx)){
s->width= s->height= 0;
return -1;
}
s->vector_width = s->avctx->extradata[10];
s->vector_height = s->avctx->extradata[11];
s->partial_count = s->partial_countdown = s->avctx->extradata[13];
/* the vector dimensions have to meet very stringent requirements */
if ((s->vector_width != 4) ||
((s->vector_height != 2) && (s->vector_height != 4))) {
/* return without further initialization */
return -1;
}
if (s->width & (s->vector_width - 1) ||
s->height & (s->vector_height - 1)) {
av_log(avctx, AV_LOG_ERROR, "Image size not multiple of block size\n");
return AVERROR_INVALIDDATA;
}
/* allocate codebooks */
s->codebook_size = MAX_CODEBOOK_SIZE;
s->codebook = av_malloc(s->codebook_size);
/* allocate decode buffer */
s->decode_buffer_size = (s->width / s->vector_width) *
(s->height / s->vector_height) * 2;
s->decode_buffer = av_malloc(s->decode_buffer_size);
if (!s->decode_buffer)
goto fail;
/* initialize the solid-color vectors */
if (s->vector_height == 4) {
codebook_index = 0xFF00 * 16;
for (i = 0; i < 256; i++)
for (j = 0; j < 16; j++)
s->codebook[codebook_index++] = i;
} else {
codebook_index = 0xF00 * 8;
for (i = 0; i < 256; i++)
for (j = 0; j < 8; j++)
s->codebook[codebook_index++] = i;
}
s->next_codebook_buffer_index = 0;
s->frame.data[0] = NULL;
return 0;
fail:
av_freep(&s->codebook);
av_freep(&s->next_codebook_buffer);
av_freep(&s->decode_buffer);
return AVERROR(ENOMEM);
}
| 165,148 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void dex_parse_debug_item(RBinFile *binfile, RBinDexObj *bin,
RBinDexClass *c, int MI, int MA, int paddr, int ins_size,
int insns_size, char *class_name, int regsz,
int debug_info_off) {
struct r_bin_t *rbin = binfile->rbin;
const ut8 *p4 = r_buf_get_at (binfile->buf, debug_info_off, NULL);
const ut8 *p4_end = p4 + binfile->buf->length - debug_info_off;
ut64 line_start;
ut64 parameters_size;
ut64 param_type_idx;
ut16 argReg = regsz - ins_size;
ut64 source_file_idx = c->source_file;
RList *params, *debug_positions, *emitted_debug_locals = NULL;
bool keep = true;
if (argReg > regsz) {
return; // this return breaks tests
}
p4 = r_uleb128 (p4, p4_end - p4, &line_start);
p4 = r_uleb128 (p4, p4_end - p4, ¶meters_size);
ut32 address = 0;
ut32 line = line_start;
if (!(debug_positions = r_list_newf ((RListFree)free))) {
return;
}
if (!(emitted_debug_locals = r_list_newf ((RListFree)free))) {
r_list_free (debug_positions);
return;
}
struct dex_debug_local_t debug_locals[regsz];
memset (debug_locals, 0, sizeof (struct dex_debug_local_t) * regsz);
if (!(MA & 0x0008)) {
debug_locals[argReg].name = "this";
debug_locals[argReg].descriptor = r_str_newf("%s;", class_name);
debug_locals[argReg].startAddress = 0;
debug_locals[argReg].signature = NULL;
debug_locals[argReg].live = true;
argReg++;
}
if (!(params = dex_method_signature2 (bin, MI))) {
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
return;
}
RListIter *iter = r_list_iterator (params);
char *name;
char *type;
int reg;
r_list_foreach (params, iter, type) {
if ((argReg >= regsz) || !type || parameters_size <= 0) {
r_list_free (debug_positions);
r_list_free (params);
r_list_free (emitted_debug_locals);
return;
}
p4 = r_uleb128 (p4, p4_end - p4, ¶m_type_idx); // read uleb128p1
param_type_idx -= 1;
name = getstr (bin, param_type_idx);
reg = argReg;
switch (type[0]) {
case 'D':
case 'J':
argReg += 2;
break;
default:
argReg += 1;
break;
}
if (name) {
debug_locals[reg].name = name;
debug_locals[reg].descriptor = type;
debug_locals[reg].signature = NULL;
debug_locals[reg].startAddress = address;
debug_locals[reg].live = true;
}
--parameters_size;
}
ut8 opcode = *(p4++) & 0xff;
while (keep) {
switch (opcode) {
case 0x0: // DBG_END_SEQUENCE
keep = false;
break;
case 0x1: // DBG_ADVANCE_PC
{
ut64 addr_diff;
p4 = r_uleb128 (p4, p4_end - p4, &addr_diff);
address += addr_diff;
}
break;
case 0x2: // DBG_ADVANCE_LINE
{
st64 line_diff = r_sleb128 (&p4, p4_end);
line += line_diff;
}
break;
case 0x3: // DBG_START_LOCAL
{
ut64 register_num;
ut64 name_idx;
ut64 type_idx;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
p4 = r_uleb128 (p4, p4_end - p4, &name_idx);
name_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &type_idx);
type_idx -= 1;
if (register_num >= regsz) {
r_list_free (debug_positions);
r_list_free (params);
return;
}
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].name = getstr (bin, name_idx);
debug_locals[register_num].descriptor = dex_type_descriptor (bin, type_idx);
debug_locals[register_num].startAddress = address;
debug_locals[register_num].signature = NULL;
debug_locals[register_num].live = true;
}
break;
case 0x4: //DBG_START_LOCAL_EXTENDED
{
ut64 register_num;
ut64 name_idx;
ut64 type_idx;
ut64 sig_idx;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
p4 = r_uleb128 (p4, p4_end - p4, &name_idx);
name_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &type_idx);
type_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &sig_idx);
sig_idx -= 1;
if (register_num >= regsz) {
r_list_free (debug_positions);
r_list_free (params);
return;
}
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].name = getstr (bin, name_idx);
debug_locals[register_num].descriptor = dex_type_descriptor (bin, type_idx);
debug_locals[register_num].startAddress = address;
debug_locals[register_num].signature = getstr (bin, sig_idx);
debug_locals[register_num].live = true;
}
break;
case 0x5: // DBG_END_LOCAL
{
ut64 register_num;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].live = false;
}
break;
case 0x6: // DBG_RESTART_LOCAL
{
ut64 register_num;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
if (!debug_locals[register_num].live) {
debug_locals[register_num].startAddress = address;
debug_locals[register_num].live = true;
}
}
break;
case 0x7: //DBG_SET_PROLOGUE_END
break;
case 0x8: //DBG_SET_PROLOGUE_BEGIN
break;
case 0x9:
{
p4 = r_uleb128 (p4, p4_end - p4, &source_file_idx);
source_file_idx--;
}
break;
default:
{
int adjusted_opcode = opcode - 0x0a;
address += (adjusted_opcode / 15);
line += -4 + (adjusted_opcode % 15);
struct dex_debug_position_t *position =
malloc (sizeof (struct dex_debug_position_t));
if (!position) {
keep = false;
break;
}
position->source_file_idx = source_file_idx;
position->address = address;
position->line = line;
r_list_append (debug_positions, position);
}
break;
}
opcode = *(p4++) & 0xff;
}
if (!binfile->sdb_addrinfo) {
binfile->sdb_addrinfo = sdb_new0 ();
}
char *fileline;
char offset[64];
char *offset_ptr;
RListIter *iter1;
struct dex_debug_position_t *pos;
r_list_foreach (debug_positions, iter1, pos) {
fileline = r_str_newf ("%s|%"PFMT64d, getstr (bin, pos->source_file_idx), pos->line);
offset_ptr = sdb_itoa (pos->address + paddr, offset, 16);
sdb_set (binfile->sdb_addrinfo, offset_ptr, fileline, 0);
sdb_set (binfile->sdb_addrinfo, fileline, offset_ptr, 0);
}
if (!dexdump) {
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
r_list_free (params);
return;
}
RListIter *iter2;
struct dex_debug_position_t *position;
rbin->cb_printf (" positions :\n");
r_list_foreach (debug_positions, iter2, position) {
rbin->cb_printf (" 0x%04llx line=%llu\n",
position->address, position->line);
}
rbin->cb_printf (" locals :\n");
RListIter *iter3;
struct dex_debug_local_t *local;
r_list_foreach (emitted_debug_locals, iter3, local) {
if (local->signature) {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s %s\n",
local->startAddress, local->endAddress,
local->reg, local->name, local->descriptor,
local->signature);
} else {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s\n",
local->startAddress, local->endAddress,
local->reg, local->name, local->descriptor);
}
}
for (reg = 0; reg < regsz; reg++) {
if (debug_locals[reg].live) {
if (debug_locals[reg].signature) {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s "
"%s\n",
debug_locals[reg].startAddress,
insns_size, reg, debug_locals[reg].name,
debug_locals[reg].descriptor,
debug_locals[reg].signature);
} else {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s"
"\n",
debug_locals[reg].startAddress,
insns_size, reg, debug_locals[reg].name,
debug_locals[reg].descriptor);
}
}
}
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
r_list_free (params);
}
Commit Message: fix #6872
CWE ID: CWE-476 | static void dex_parse_debug_item(RBinFile *binfile, RBinDexObj *bin,
RBinDexClass *c, int MI, int MA, int paddr, int ins_size,
int insns_size, char *class_name, int regsz,
int debug_info_off) {
struct r_bin_t *rbin = binfile->rbin;
const ut8 *p4 = r_buf_get_at (binfile->buf, debug_info_off, NULL);
const ut8 *p4_end = p4 + binfile->buf->length - debug_info_off;
ut64 line_start;
ut64 parameters_size;
ut64 param_type_idx;
ut16 argReg = regsz - ins_size;
ut64 source_file_idx = c->source_file;
RList *params, *debug_positions, *emitted_debug_locals = NULL;
bool keep = true;
if (argReg > regsz) {
return; // this return breaks tests
}
p4 = r_uleb128 (p4, p4_end - p4, &line_start);
p4 = r_uleb128 (p4, p4_end - p4, ¶meters_size);
ut32 address = 0;
ut32 line = line_start;
if (!(debug_positions = r_list_newf ((RListFree)free))) {
return;
}
if (!(emitted_debug_locals = r_list_newf ((RListFree)free))) {
r_list_free (debug_positions);
return;
}
struct dex_debug_local_t debug_locals[regsz];
memset (debug_locals, 0, sizeof (struct dex_debug_local_t) * regsz);
if (!(MA & 0x0008)) {
debug_locals[argReg].name = "this";
debug_locals[argReg].descriptor = r_str_newf("%s;", class_name);
debug_locals[argReg].startAddress = 0;
debug_locals[argReg].signature = NULL;
debug_locals[argReg].live = true;
argReg++;
}
if (!(params = dex_method_signature2 (bin, MI))) {
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
return;
}
RListIter *iter = r_list_iterator (params);
char *name;
char *type;
int reg;
r_list_foreach (params, iter, type) {
if ((argReg >= regsz) || !type || parameters_size <= 0) {
r_list_free (debug_positions);
r_list_free (params);
r_list_free (emitted_debug_locals);
return;
}
p4 = r_uleb128 (p4, p4_end - p4, ¶m_type_idx); // read uleb128p1
param_type_idx -= 1;
name = getstr (bin, param_type_idx);
reg = argReg;
switch (type[0]) {
case 'D':
case 'J':
argReg += 2;
break;
default:
argReg += 1;
break;
}
if (name) {
debug_locals[reg].name = name;
debug_locals[reg].descriptor = type;
debug_locals[reg].signature = NULL;
debug_locals[reg].startAddress = address;
debug_locals[reg].live = true;
}
--parameters_size;
}
if (p4 <= 0) {
return;
}
ut8 opcode = *(p4++) & 0xff;
while (keep) {
switch (opcode) {
case 0x0: // DBG_END_SEQUENCE
keep = false;
break;
case 0x1: // DBG_ADVANCE_PC
{
ut64 addr_diff;
p4 = r_uleb128 (p4, p4_end - p4, &addr_diff);
address += addr_diff;
}
break;
case 0x2: // DBG_ADVANCE_LINE
{
st64 line_diff = r_sleb128 (&p4, p4_end);
line += line_diff;
}
break;
case 0x3: // DBG_START_LOCAL
{
ut64 register_num;
ut64 name_idx;
ut64 type_idx;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
p4 = r_uleb128 (p4, p4_end - p4, &name_idx);
name_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &type_idx);
type_idx -= 1;
if (register_num >= regsz) {
r_list_free (debug_positions);
r_list_free (params);
return;
}
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].name = getstr (bin, name_idx);
debug_locals[register_num].descriptor = dex_type_descriptor (bin, type_idx);
debug_locals[register_num].startAddress = address;
debug_locals[register_num].signature = NULL;
debug_locals[register_num].live = true;
}
break;
case 0x4: //DBG_START_LOCAL_EXTENDED
{
ut64 register_num;
ut64 name_idx;
ut64 type_idx;
ut64 sig_idx;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
p4 = r_uleb128 (p4, p4_end - p4, &name_idx);
name_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &type_idx);
type_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &sig_idx);
sig_idx -= 1;
if (register_num >= regsz) {
r_list_free (debug_positions);
r_list_free (params);
return;
}
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].name = getstr (bin, name_idx);
debug_locals[register_num].descriptor = dex_type_descriptor (bin, type_idx);
debug_locals[register_num].startAddress = address;
debug_locals[register_num].signature = getstr (bin, sig_idx);
debug_locals[register_num].live = true;
}
break;
case 0x5: // DBG_END_LOCAL
{
ut64 register_num;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].live = false;
}
break;
case 0x6: // DBG_RESTART_LOCAL
{
ut64 register_num;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
if (!debug_locals[register_num].live) {
debug_locals[register_num].startAddress = address;
debug_locals[register_num].live = true;
}
}
break;
case 0x7: //DBG_SET_PROLOGUE_END
break;
case 0x8: //DBG_SET_PROLOGUE_BEGIN
break;
case 0x9:
{
p4 = r_uleb128 (p4, p4_end - p4, &source_file_idx);
source_file_idx--;
}
break;
default:
{
int adjusted_opcode = opcode - 0x0a;
address += (adjusted_opcode / 15);
line += -4 + (adjusted_opcode % 15);
struct dex_debug_position_t *position =
malloc (sizeof (struct dex_debug_position_t));
if (!position) {
keep = false;
break;
}
position->source_file_idx = source_file_idx;
position->address = address;
position->line = line;
r_list_append (debug_positions, position);
}
break;
}
opcode = *(p4++) & 0xff;
}
if (!binfile->sdb_addrinfo) {
binfile->sdb_addrinfo = sdb_new0 ();
}
char *fileline;
char offset[64];
char *offset_ptr;
RListIter *iter1;
struct dex_debug_position_t *pos;
r_list_foreach (debug_positions, iter1, pos) {
fileline = r_str_newf ("%s|%"PFMT64d, getstr (bin, pos->source_file_idx), pos->line);
offset_ptr = sdb_itoa (pos->address + paddr, offset, 16);
sdb_set (binfile->sdb_addrinfo, offset_ptr, fileline, 0);
sdb_set (binfile->sdb_addrinfo, fileline, offset_ptr, 0);
}
if (!dexdump) {
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
r_list_free (params);
return;
}
RListIter *iter2;
struct dex_debug_position_t *position;
rbin->cb_printf (" positions :\n");
r_list_foreach (debug_positions, iter2, position) {
rbin->cb_printf (" 0x%04llx line=%llu\n",
position->address, position->line);
}
rbin->cb_printf (" locals :\n");
RListIter *iter3;
struct dex_debug_local_t *local;
r_list_foreach (emitted_debug_locals, iter3, local) {
if (local->signature) {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s %s\n",
local->startAddress, local->endAddress,
local->reg, local->name, local->descriptor,
local->signature);
} else {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s\n",
local->startAddress, local->endAddress,
local->reg, local->name, local->descriptor);
}
}
for (reg = 0; reg < regsz; reg++) {
if (debug_locals[reg].live) {
if (debug_locals[reg].signature) {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s "
"%s\n",
debug_locals[reg].startAddress,
insns_size, reg, debug_locals[reg].name,
debug_locals[reg].descriptor,
debug_locals[reg].signature);
} else {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s"
"\n",
debug_locals[reg].startAddress,
insns_size, reg, debug_locals[reg].name,
debug_locals[reg].descriptor);
}
}
}
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
r_list_free (params);
}
| 168,340 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: spnego_gss_import_sec_context(
OM_uint32 *minor_status,
const gss_buffer_t interprocess_token,
gss_ctx_id_t *context_handle)
{
OM_uint32 ret;
ret = gss_import_sec_context(minor_status,
interprocess_token,
context_handle);
return (ret);
}
Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[ghudson@mit.edu: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
CWE ID: CWE-18 | spnego_gss_import_sec_context(
OM_uint32 *minor_status,
const gss_buffer_t interprocess_token,
gss_ctx_id_t *context_handle)
{
/*
* Until we implement partial context exports, there are no SPNEGO
* exported context tokens, only tokens for underlying mechs. So just
* return an error for now.
*/
return GSS_S_UNAVAILABLE;
}
| 166,659 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: GfxImageColorMap::GfxImageColorMap(int bitsA, Object *decode,
GfxColorSpace *colorSpaceA) {
GfxIndexedColorSpace *indexedCS;
GfxSeparationColorSpace *sepCS;
int maxPixel, indexHigh;
Guchar *lookup2;
Function *sepFunc;
Object obj;
double x[gfxColorMaxComps];
double y[gfxColorMaxComps];
int i, j, k, byte;
double mapped;
ok = gTrue;
bits = bitsA;
maxPixel = (1 << bits) - 1;
colorSpace = colorSpaceA;
if (maxPixel > 255) maxPixel = 255;
for (k = 0; k < gfxColorMaxComps; ++k) {
lookup[k] = NULL;
}
if (decode->isNull()) {
nComps = colorSpace->getNComps();
colorSpace->getDefaultRanges(decodeLow, decodeRange, maxPixel);
} else if (decode->isArray()) {
nComps = decode->arrayGetLength() / 2;
if (nComps != colorSpace->getNComps()) {
goto err1;
}
for (i = 0; i < nComps; ++i) {
decode->arrayGet(2*i, &obj);
if (!obj.isNum()) {
goto err2;
}
decodeLow[i] = obj.getNum();
obj.free();
decode->arrayGet(2*i+1, &obj);
if (!obj.isNum()) {
goto err2;
}
decodeRange[i] = obj.getNum() - decodeLow[i];
obj.free();
}
} else {
goto err1;
}
colorSpace2 = NULL;
nComps2 = 0;
if (colorSpace->getMode() == csIndexed) {
indexedCS = (GfxIndexedColorSpace *)colorSpace;
colorSpace2 = indexedCS->getBase();
indexHigh = indexedCS->getIndexHigh();
nComps2 = colorSpace2->getNComps();
lookup2 = indexedCS->getLookup();
colorSpace2->getDefaultRanges(x, y, indexHigh);
byte_lookup = (Guchar *)gmalloc ((maxPixel + 1) * nComps2);
for (k = 0; k < nComps2; ++k) {
lookup[k] = (GfxColorComp *)gmallocn(maxPixel + 1,
sizeof(GfxColorComp));
for (i = 0; i <= maxPixel; ++i) {
j = (int)(decodeLow[0] + (i * decodeRange[0]) / maxPixel + 0.5);
if (j < 0) {
j = 0;
} else if (j > indexHigh) {
j = indexHigh;
}
mapped = x[k] + (lookup2[j*nComps2 + k] / 255.0) * y[k];
lookup[k][i] = dblToCol(mapped);
byte_lookup[i * nComps2 + k] = (Guchar) (mapped * 255);
}
}
} else if (colorSpace->getMode() == csSeparation) {
sepCS = (GfxSeparationColorSpace *)colorSpace;
colorSpace2 = sepCS->getAlt();
nComps2 = colorSpace2->getNComps();
sepFunc = sepCS->getFunc();
byte_lookup = (Guchar *)gmallocn ((maxPixel + 1), nComps2);
for (k = 0; k < nComps2; ++k) {
lookup[k] = (GfxColorComp *)gmallocn(maxPixel + 1,
sizeof(GfxColorComp));
for (i = 0; i <= maxPixel; ++i) {
x[0] = decodeLow[0] + (i * decodeRange[0]) / maxPixel;
sepFunc->transform(x, y);
lookup[k][i] = dblToCol(y[k]);
byte_lookup[i*nComps2 + k] = (Guchar) (y[k] * 255);
}
}
} else {
byte_lookup = (Guchar *)gmallocn ((maxPixel + 1), nComps);
for (k = 0; k < nComps; ++k) {
lookup[k] = (GfxColorComp *)gmallocn(maxPixel + 1,
sizeof(GfxColorComp));
for (i = 0; i <= maxPixel; ++i) {
mapped = decodeLow[k] + (i * decodeRange[k]) / maxPixel;
lookup[k][i] = dblToCol(mapped);
byte = (int) (mapped * 255.0 + 0.5);
if (byte < 0)
byte = 0;
else if (byte > 255)
byte = 255;
byte_lookup[i * nComps + k] = byte;
}
}
}
return;
err2:
obj.free();
err1:
ok = gFalse;
byte_lookup = NULL;
}
Commit Message:
CWE ID: CWE-189 | GfxImageColorMap::GfxImageColorMap(int bitsA, Object *decode,
GfxColorSpace *colorSpaceA) {
GfxIndexedColorSpace *indexedCS;
GfxSeparationColorSpace *sepCS;
int maxPixel, indexHigh;
Guchar *lookup2;
Function *sepFunc;
Object obj;
double x[gfxColorMaxComps];
double y[gfxColorMaxComps];
int i, j, k, byte;
double mapped;
ok = gTrue;
bits = bitsA;
maxPixel = (1 << bits) - 1;
colorSpace = colorSpaceA;
if (maxPixel > 255) maxPixel = 255;
for (k = 0; k < gfxColorMaxComps; ++k) {
lookup[k] = NULL;
}
if (decode->isNull()) {
nComps = colorSpace->getNComps();
colorSpace->getDefaultRanges(decodeLow, decodeRange, maxPixel);
} else if (decode->isArray()) {
nComps = decode->arrayGetLength() / 2;
if (nComps != colorSpace->getNComps()) {
goto err1;
}
for (i = 0; i < nComps; ++i) {
decode->arrayGet(2*i, &obj);
if (!obj.isNum()) {
goto err2;
}
decodeLow[i] = obj.getNum();
obj.free();
decode->arrayGet(2*i+1, &obj);
if (!obj.isNum()) {
goto err2;
}
decodeRange[i] = obj.getNum() - decodeLow[i];
obj.free();
}
} else {
goto err1;
}
colorSpace2 = NULL;
nComps2 = 0;
if (colorSpace->getMode() == csIndexed) {
indexedCS = (GfxIndexedColorSpace *)colorSpace;
colorSpace2 = indexedCS->getBase();
indexHigh = indexedCS->getIndexHigh();
nComps2 = colorSpace2->getNComps();
lookup2 = indexedCS->getLookup();
colorSpace2->getDefaultRanges(x, y, indexHigh);
byte_lookup = (Guchar *)gmallocn ((maxPixel + 1), nComps2);
for (k = 0; k < nComps2; ++k) {
lookup[k] = (GfxColorComp *)gmallocn(maxPixel + 1,
sizeof(GfxColorComp));
for (i = 0; i <= maxPixel; ++i) {
j = (int)(decodeLow[0] + (i * decodeRange[0]) / maxPixel + 0.5);
if (j < 0) {
j = 0;
} else if (j > indexHigh) {
j = indexHigh;
}
mapped = x[k] + (lookup2[j*nComps2 + k] / 255.0) * y[k];
lookup[k][i] = dblToCol(mapped);
byte_lookup[i * nComps2 + k] = (Guchar) (mapped * 255);
}
}
} else if (colorSpace->getMode() == csSeparation) {
sepCS = (GfxSeparationColorSpace *)colorSpace;
colorSpace2 = sepCS->getAlt();
nComps2 = colorSpace2->getNComps();
sepFunc = sepCS->getFunc();
byte_lookup = (Guchar *)gmallocn ((maxPixel + 1), nComps2);
for (k = 0; k < nComps2; ++k) {
lookup[k] = (GfxColorComp *)gmallocn(maxPixel + 1,
sizeof(GfxColorComp));
for (i = 0; i <= maxPixel; ++i) {
x[0] = decodeLow[0] + (i * decodeRange[0]) / maxPixel;
sepFunc->transform(x, y);
lookup[k][i] = dblToCol(y[k]);
byte_lookup[i*nComps2 + k] = (Guchar) (y[k] * 255);
}
}
} else {
byte_lookup = (Guchar *)gmallocn ((maxPixel + 1), nComps);
for (k = 0; k < nComps; ++k) {
lookup[k] = (GfxColorComp *)gmallocn(maxPixel + 1,
sizeof(GfxColorComp));
for (i = 0; i <= maxPixel; ++i) {
mapped = decodeLow[k] + (i * decodeRange[k]) / maxPixel;
lookup[k][i] = dblToCol(mapped);
byte = (int) (mapped * 255.0 + 0.5);
if (byte < 0)
byte = 0;
else if (byte > 255)
byte = 255;
byte_lookup[i * nComps + k] = byte;
}
}
}
return;
err2:
obj.free();
err1:
ok = gFalse;
byte_lookup = NULL;
}
| 164,608 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void InspectorClientImpl::clearBrowserCache()
{
if (WebDevToolsAgentImpl* agent = devToolsAgent())
agent->clearBrowserCache();
}
Commit Message: [4/4] Process clearBrowserCahce/cookies commands in browser.
BUG=366585
Review URL: https://codereview.chromium.org/251183005
git-svn-id: svn://svn.chromium.org/blink/trunk@172984 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | void InspectorClientImpl::clearBrowserCache()
| 171,346 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void coroutine_fn v9fs_link(void *opaque)
{
V9fsPDU *pdu = opaque;
int32_t dfid, oldfid;
V9fsFidState *dfidp, *oldfidp;
V9fsString name;
size_t offset = 7;
int err = 0;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
err = -EEXIST;
goto out_nofid;
}
dfidp = get_fid(pdu, dfid);
if (dfidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
oldfidp = get_fid(pdu, oldfid);
if (oldfidp == NULL) {
err = -ENOENT;
goto out;
}
err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
if (!err) {
err = offset;
}
out:
put_fid(pdu, dfidp);
out_nofid:
pdu_complete(pdu, err);
}
Commit Message:
CWE ID: CWE-399 | static void coroutine_fn v9fs_link(void *opaque)
{
V9fsPDU *pdu = opaque;
int32_t dfid, oldfid;
V9fsFidState *dfidp, *oldfidp;
V9fsString name;
size_t offset = 7;
int err = 0;
v9fs_string_init(&name);
err = pdu_unmarshal(pdu, offset, "dds", &dfid, &oldfid, &name);
if (err < 0) {
goto out_nofid;
}
trace_v9fs_link(pdu->tag, pdu->id, dfid, oldfid, name.data);
if (name_is_illegal(name.data)) {
err = -ENOENT;
goto out_nofid;
}
if (!strcmp(".", name.data) || !strcmp("..", name.data)) {
err = -EEXIST;
goto out_nofid;
}
dfidp = get_fid(pdu, dfid);
if (dfidp == NULL) {
err = -ENOENT;
goto out_nofid;
}
oldfidp = get_fid(pdu, oldfid);
if (oldfidp == NULL) {
err = -ENOENT;
goto out;
}
err = v9fs_co_link(pdu, oldfidp, dfidp, &name);
if (!err) {
err = offset;
}
put_fid(pdu, oldfidp);
out:
put_fid(pdu, dfidp);
out_nofid:
pdu_complete(pdu, err);
}
| 164,907 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len)
{
struct btrfs_dir_item *dir_item;
unsigned long name_ptr;
u32 total_len;
u32 cur = 0;
u32 this_len;
struct extent_buffer *leaf;
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
if (verify_dir_item(root, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
while (cur < total_len) {
this_len = sizeof(*dir_item) +
btrfs_dir_name_len(leaf, dir_item) +
btrfs_dir_data_len(leaf, dir_item);
name_ptr = (unsigned long)(dir_item + 1);
if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
return dir_item;
cur += this_len;
dir_item = (struct btrfs_dir_item *)((char *)dir_item +
this_len);
}
return NULL;
}
Commit Message: Btrfs: make xattr replace operations atomic
Replacing a xattr consists of doing a lookup for its existing value, delete
the current value from the respective leaf, release the search path and then
finally insert the new value. This leaves a time window where readers (getxattr,
listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs,
so this has security implications.
This change also fixes 2 other existing issues which were:
*) Deleting the old xattr value without verifying first if the new xattr will
fit in the existing leaf item (in case multiple xattrs are packed in the
same item due to name hash collision);
*) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't
exist but we have have an existing item that packs muliple xattrs with
the same name hash as the input xattr. In this case we should return ENOSPC.
A test case for xfstests follows soon.
Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace
implementation.
Reported-by: Alexandre Oliva <oliva@gnu.org>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
CWE ID: CWE-362 | static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len)
{
struct btrfs_dir_item *dir_item;
unsigned long name_ptr;
u32 total_len;
u32 cur = 0;
u32 this_len;
struct extent_buffer *leaf;
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
if (verify_dir_item(root, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
while (cur < total_len) {
this_len = sizeof(*dir_item) +
btrfs_dir_name_len(leaf, dir_item) +
btrfs_dir_data_len(leaf, dir_item);
name_ptr = (unsigned long)(dir_item + 1);
if (btrfs_dir_name_len(leaf, dir_item) == name_len &&
memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
return dir_item;
cur += this_len;
dir_item = (struct btrfs_dir_item *)((char *)dir_item +
this_len);
}
return NULL;
}
| 166,764 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description)
{
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = type->match,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
key_ref_t key;
int ret;
if (!ctx.match_data.cmp)
return ERR_PTR(-ENOKEY);
if (type->match_preparse) {
ret = type->match_preparse(&ctx.match_data);
if (ret < 0)
return ERR_PTR(ret);
}
key = keyring_search_aux(keyring, &ctx);
if (type->match_free)
type->match_free(&ctx.match_data);
return key;
}
Commit Message: KEYS: Remove key_type::match in favour of overriding default by match_preparse
A previous patch added a ->match_preparse() method to the key type. This is
allowed to override the function called by the iteration algorithm.
Therefore, we can just set a default that simply checks for an exact match of
the key description with the original criterion data and allow match_preparse
to override it as needed.
The key_type::match op is then redundant and can be removed, as can the
user_match() function.
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
CWE ID: CWE-476 | key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description)
{
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
key_ref_t key;
int ret;
if (type->match_preparse) {
ret = type->match_preparse(&ctx.match_data);
if (ret < 0)
return ERR_PTR(ret);
}
key = keyring_search_aux(keyring, &ctx);
if (type->match_free)
type->match_free(&ctx.match_data);
return key;
}
| 168,440 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: cib_remote_signon(cib_t * cib, const char *name, enum cib_conn_type type)
{
int rc = pcmk_ok;
cib_remote_opaque_t *private = cib->variant_opaque;
if (private->passwd == NULL) {
struct termios settings;
int rc;
rc = tcgetattr(0, &settings);
settings.c_lflag &= ~ECHO;
rc = tcsetattr(0, TCSANOW, &settings);
fprintf(stderr, "Password: ");
private->passwd = calloc(1, 1024);
rc = scanf("%s", private->passwd);
fprintf(stdout, "\n");
/* fprintf(stderr, "entered: '%s'\n", buffer); */
if (rc < 1) {
private->passwd = NULL;
}
settings.c_lflag |= ECHO;
rc = tcsetattr(0, TCSANOW, &settings);
}
if (private->server == NULL || private->user == NULL) {
rc = -EINVAL;
}
if (rc == pcmk_ok) {
rc = cib_tls_signon(cib, &(private->command));
}
if (rc == pcmk_ok) {
rc = cib_tls_signon(cib, &(private->callback));
}
if (rc == pcmk_ok) {
xmlNode *hello =
cib_create_op(0, private->callback.token, CRM_OP_REGISTER, NULL, NULL, NULL, 0, NULL);
crm_xml_add(hello, F_CIB_CLIENTNAME, name);
crm_send_remote_msg(private->command.session, hello, private->command.encrypted);
free_xml(hello);
}
if (rc == pcmk_ok) {
fprintf(stderr, "%s: Opened connection to %s:%d\n", name, private->server, private->port);
cib->state = cib_connected_command;
cib->type = cib_command;
} else {
fprintf(stderr, "%s: Connection to %s:%d failed: %s\n",
name, private->server, private->port, pcmk_strerror(rc));
}
return rc;
}
Commit Message: High: core: Internal tls api improvements for reuse with future LRMD tls backend.
CWE ID: CWE-399 | cib_remote_signon(cib_t * cib, const char *name, enum cib_conn_type type)
{
int rc = pcmk_ok;
cib_remote_opaque_t *private = cib->variant_opaque;
if (private->passwd == NULL) {
struct termios settings;
int rc;
rc = tcgetattr(0, &settings);
settings.c_lflag &= ~ECHO;
rc = tcsetattr(0, TCSANOW, &settings);
fprintf(stderr, "Password: ");
private->passwd = calloc(1, 1024);
rc = scanf("%s", private->passwd);
fprintf(stdout, "\n");
/* fprintf(stderr, "entered: '%s'\n", buffer); */
if (rc < 1) {
private->passwd = NULL;
}
settings.c_lflag |= ECHO;
rc = tcsetattr(0, TCSANOW, &settings);
}
if (private->server == NULL || private->user == NULL) {
rc = -EINVAL;
}
if (rc == pcmk_ok) {
rc = cib_tls_signon(cib, &(private->command), FALSE);
}
if (rc == pcmk_ok) {
rc = cib_tls_signon(cib, &(private->callback), TRUE);
}
if (rc == pcmk_ok) {
xmlNode *hello =
cib_create_op(0, private->callback.token, CRM_OP_REGISTER, NULL, NULL, NULL, 0, NULL);
crm_xml_add(hello, F_CIB_CLIENTNAME, name);
crm_send_remote_msg(private->command.session, hello, private->command.encrypted);
free_xml(hello);
}
if (rc == pcmk_ok) {
fprintf(stderr, "%s: Opened connection to %s:%d\n", name, private->server, private->port);
cib->state = cib_connected_command;
cib->type = cib_command;
} else {
fprintf(stderr, "%s: Connection to %s:%d failed: %s\n",
name, private->server, private->port, pcmk_strerror(rc));
}
return rc;
}
| 166,153 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in)
{
jpc_dec_tile_t *tile;
jpc_pi_t *pi;
int ret;
tile = dec->curtile;
pi = tile->pi;
for (;;) {
if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) {
switch (jpc_dec_lookahead(in)) {
case JPC_MS_EOC:
case JPC_MS_SOT:
return 0;
break;
case JPC_MS_SOP:
case JPC_MS_EPH:
case 0:
break;
default:
return -1;
break;
}
}
if ((ret = jpc_pi_next(pi))) {
return ret;
}
if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) {
jas_eprintf("warning: stopping decode prematurely as requested\n");
return 0;
}
if (jas_getdbglevel() >= 1) {
jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d "
"rlvlno=%02d prcno=%03d lyrno=%02d\n", (long)
jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi),
jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi));
}
if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi), jpc_pi_rlvlno(pi),
jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) {
return -1;
}
++dec->numpkts;
}
return 0;
}
Commit Message: Fixed numerous integer overflow problems in the code for packet iterators
in the JPC decoder.
CWE ID: CWE-125 | int jpc_dec_decodepkts(jpc_dec_t *dec, jas_stream_t *pkthdrstream, jas_stream_t *in)
{
jpc_dec_tile_t *tile;
jpc_pi_t *pi;
int ret;
tile = dec->curtile;
pi = tile->pi;
for (;;) {
if (!tile->pkthdrstream || jas_stream_peekc(tile->pkthdrstream) == EOF) {
switch (jpc_dec_lookahead(in)) {
case JPC_MS_EOC:
case JPC_MS_SOT:
return 0;
break;
case JPC_MS_SOP:
case JPC_MS_EPH:
case 0:
break;
default:
return -1;
break;
}
}
if ((ret = jpc_pi_next(pi))) {
return ret;
}
if (dec->maxpkts >= 0 && dec->numpkts >= dec->maxpkts) {
jas_eprintf("warning: stopping decode prematurely as requested\n");
return 0;
}
if (jas_getdbglevel() >= 1) {
jas_eprintf("packet offset=%08ld prg=%d cmptno=%02d "
"rlvlno=%02d prcno=%03d lyrno=%02d\n", (long)
jas_stream_getrwcount(in), jpc_pi_prg(pi), jpc_pi_cmptno(pi),
jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi));
}
if (jpc_dec_decodepkt(dec, pkthdrstream, in, jpc_pi_cmptno(pi),
jpc_pi_rlvlno(pi), jpc_pi_prcno(pi), jpc_pi_lyrno(pi))) {
return -1;
}
++dec->numpkts;
}
return 0;
}
| 169,443 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void encode_frame(vpx_codec_ctx_t *codec,
vpx_image_t *img,
int frame_index,
int flags,
VpxVideoWriter *writer) {
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1,
flags, VPX_DL_GOOD_QUALITY);
if (res != VPX_CODEC_OK)
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
pkt->data.frame.buf,
pkt->data.frame.sz,
pkt->data.frame.pts)) {
die_codec(codec, "Failed to write compressed frame");
}
printf(keyframe ? "K" : ".");
fflush(stdout);
}
}
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | static void encode_frame(vpx_codec_ctx_t *codec,
static int encode_frame(vpx_codec_ctx_t *codec,
vpx_image_t *img,
int frame_index,
int flags,
VpxVideoWriter *writer) {
int got_pkts = 0;
vpx_codec_iter_t iter = NULL;
const vpx_codec_cx_pkt_t *pkt = NULL;
const vpx_codec_err_t res = vpx_codec_encode(codec, img, frame_index, 1,
flags, VPX_DL_GOOD_QUALITY);
if (res != VPX_CODEC_OK)
die_codec(codec, "Failed to encode frame");
while ((pkt = vpx_codec_get_cx_data(codec, &iter)) != NULL) {
got_pkts = 1;
if (pkt->kind == VPX_CODEC_CX_FRAME_PKT) {
const int keyframe = (pkt->data.frame.flags & VPX_FRAME_IS_KEY) != 0;
if (!vpx_video_writer_write_frame(writer,
pkt->data.frame.buf,
pkt->data.frame.sz,
pkt->data.frame.pts)) {
die_codec(codec, "Failed to write compressed frame");
}
printf(keyframe ? "K" : ".");
fflush(stdout);
}
}
return got_pkts;
}
| 174,488 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = msg->msg_name;
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
unsigned int hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
int max_level;
int data_len = 0;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
if (msg->msg_namelen) {
err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
if (err < 0)
goto out;
namelen = err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer_get(sk);
if (!other)
goto out;
}
if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
&& (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
if (len > SKB_MAX_ALLOC)
data_len = min_t(size_t,
len - SKB_MAX_ALLOC,
MAX_SKB_FRAGS * PAGE_SIZE);
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
err = unix_scm_to_skb(siocb->scm, skb, true);
if (err < 0)
goto out_free;
max_level = err + 1;
unix_get_secdata(siocb->scm, skb);
skb_put(skb, len - data_len);
skb->data_len = data_len;
skb->len = len;
err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
if (err)
goto out_free;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
restart:
if (!other) {
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
hash, &err);
if (other == NULL)
goto out_free;
}
if (sk_filter(other, skb) < 0) {
/* Toss the packet but do not return any error to the sender */
err = len;
goto out_free;
}
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
if (sock_flag(other, SOCK_DEAD)) {
/*
* Check with 1003.1g - what should
* datagram error
*/
unix_state_unlock(other);
sock_put(other);
err = 0;
unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
unix_state_unlock(sk);
}
other = NULL;
if (err)
goto out_free;
goto restart;
}
err = -EPIPE;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
}
if (unix_peer(other) != sk && unix_recvq_full(other)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
}
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out_free;
goto restart;
}
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
return len;
out_unlock:
unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
return err;
}
Commit Message: af_netlink: force credentials passing [CVE-2012-3520]
Pablo Neira Ayuso discovered that avahi and
potentially NetworkManager accept spoofed Netlink messages because of a
kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data
to the receiver if the sender did not provide such data, instead of not
including any such data at all or including the correct data from the
peer (as it is the case with AF_UNIX).
This bug was introduced in commit 16e572626961
(af_unix: dont send SCM_CREDENTIALS by default)
This patch forces passing credentials for netlink, as
before the regression.
Another fix would be to not add SCM_CREDENTIALS in
netlink messages if not provided by the sender, but it
might break some programs.
With help from Florian Weimer & Petr Matousek
This issue is designated as CVE-2012-3520
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Petr Matousek <pmatouse@redhat.com>
Cc: Florian Weimer <fweimer@redhat.com>
Cc: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: CWE-287 | static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = msg->msg_name;
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
unsigned int hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie tmp_scm;
int max_level;
int data_len = 0;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
wait_for_unix_gc();
err = scm_send(sock, msg, siocb->scm, false);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
if (msg->msg_namelen) {
err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
if (err < 0)
goto out;
namelen = err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer_get(sk);
if (!other)
goto out;
}
if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
&& (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
if (len > SKB_MAX_ALLOC)
data_len = min_t(size_t,
len - SKB_MAX_ALLOC,
MAX_SKB_FRAGS * PAGE_SIZE);
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out;
err = unix_scm_to_skb(siocb->scm, skb, true);
if (err < 0)
goto out_free;
max_level = err + 1;
unix_get_secdata(siocb->scm, skb);
skb_put(skb, len - data_len);
skb->data_len = data_len;
skb->len = len;
err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
if (err)
goto out_free;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
restart:
if (!other) {
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
hash, &err);
if (other == NULL)
goto out_free;
}
if (sk_filter(other, skb) < 0) {
/* Toss the packet but do not return any error to the sender */
err = len;
goto out_free;
}
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
if (sock_flag(other, SOCK_DEAD)) {
/*
* Check with 1003.1g - what should
* datagram error
*/
unix_state_unlock(other);
sock_put(other);
err = 0;
unix_state_lock(sk);
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
unix_state_unlock(sk);
}
other = NULL;
if (err)
goto out_free;
goto restart;
}
err = -EPIPE;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
}
if (unix_peer(other) != sk && unix_recvq_full(other)) {
if (!timeo) {
err = -EAGAIN;
goto out_unlock;
}
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out_free;
goto restart;
}
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other, len);
sock_put(other);
scm_destroy(siocb->scm);
return len;
out_unlock:
unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
if (other)
sock_put(other);
scm_destroy(siocb->scm);
return err;
}
| 165,579 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: FileTransfer::DoUpload(filesize_t *total_bytes, ReliSock *s)
{
int rc;
MyString fullname;
filesize_t bytes;
bool is_the_executable;
bool upload_success = false;
bool do_download_ack = false;
bool do_upload_ack = false;
bool try_again = false;
int hold_code = 0;
int hold_subcode = 0;
MyString error_desc;
bool I_go_ahead_always = false;
bool peer_goes_ahead_always = false;
DCTransferQueue xfer_queue(m_xfer_queue_contact_info);
CondorError errstack;
bool first_failed_file_transfer_happened = false;
bool first_failed_upload_success = false;
bool first_failed_try_again = false;
int first_failed_hold_code = 0;
int first_failed_hold_subcode = 0;
MyString first_failed_error_desc;
int first_failed_line_number;
*total_bytes = 0;
dprintf(D_FULLDEBUG,"entering FileTransfer::DoUpload\n");
priv_state saved_priv = PRIV_UNKNOWN;
if( want_priv_change ) {
saved_priv = set_priv( desired_priv_state );
}
s->encode();
if( !s->code(m_final_transfer_flag) ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
bool socket_default_crypto = s->get_encryption();
if( want_priv_change && saved_priv == PRIV_UNKNOWN ) {
saved_priv = set_priv( desired_priv_state );
}
FileTransferList filelist;
ExpandFileTransferList( FilesToSend, filelist );
FileTransferList::iterator filelist_it;
for( filelist_it = filelist.begin();
filelist_it != filelist.end();
filelist_it++ )
{
char const *filename = filelist_it->srcName();
char const *dest_dir = filelist_it->destDir();
if( dest_dir && *dest_dir ) {
dprintf(D_FULLDEBUG,"DoUpload: sending file %s to %s%c\n",filename,dest_dir,DIR_DELIM_CHAR);
}
else {
dprintf(D_FULLDEBUG,"DoUpload: sending file %s\n",filename);
}
bool is_url;
is_url = false;
if( param_boolean("ENABLE_URL_TRANSFERS", true) && IsUrl(filename) ) {
is_url = true;
fullname = filename;
dprintf(D_FULLDEBUG, "DoUpload: sending %s as URL.\n", filename);
} else if( filename[0] != '/' && filename[0] != '\\' && filename[1] != ':' ){
fullname.sprintf("%s%c%s",Iwd,DIR_DELIM_CHAR,filename);
} else {
fullname = filename;
}
#ifdef WIN32
if( !is_url && perm_obj && !is_the_executable &&
(perm_obj->read_access(fullname.Value()) != 1) ) {
upload_success = false;
error_desc.sprintf("error reading from %s: permission denied",fullname.Value());
do_upload_ack = true; // tell receiver that we failed
do_download_ack = true;
try_again = false; // put job on hold
hold_code = CONDOR_HOLD_CODE_UploadFileError;
hold_subcode = EPERM;
return ExitDoUpload(total_bytes,s,saved_priv,socket_default_crypto,
upload_success,do_upload_ack,do_download_ack,
try_again,hold_code,hold_subcode,
error_desc.Value(),__LINE__);
}
#endif
if (is_the_executable) {} // Done to get rid of the compiler set-but-not-used warnings.
int file_command = 1;
int file_subcommand = 0;
if ( DontEncryptFiles->file_contains_withwildcard(filename) ) {
file_command = 3;
}
if ( EncryptFiles->file_contains_withwildcard(filename) ) {
file_command = 2;
}
if ( X509UserProxy && file_strcmp( filename, X509UserProxy ) == 0 &&
DelegateX509Credentials ) {
file_command = 4;
}
if ( is_url ) {
file_command = 5;
}
if ( m_final_transfer_flag && OutputDestination ) {
dprintf(D_FULLDEBUG, "FILETRANSFER: Using command 999:7 for OutputDestionation: %s\n",
OutputDestination);
file_command = 999;
file_subcommand = 7;
}
bool fail_because_mkdir_not_supported = false;
bool fail_because_symlink_not_supported = false;
if( filelist_it->is_directory ) {
if( filelist_it->is_symlink ) {
fail_because_symlink_not_supported = true;
dprintf(D_ALWAYS,"DoUpload: attempting to transfer symlink %s which points to a directory. This is not supported.\n",filename);
}
else if( PeerUnderstandsMkdir ) {
file_command = 6;
}
else {
fail_because_mkdir_not_supported = true;
dprintf(D_ALWAYS,"DoUpload: attempting to transfer directory %s, but the version of Condor we are talking to is too old to support that!\n",
filename);
}
}
dprintf ( D_FULLDEBUG, "FILETRANSFER: outgoing file_command is %i for %s\n",
file_command, filename );
if( !s->snd_int(file_command,FALSE) ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if (file_command == 2) {
s->set_crypto_mode(true);
} else if (file_command == 3) {
s->set_crypto_mode(false);
}
else {
s->set_crypto_mode(socket_default_crypto);
}
MyString dest_filename;
if ( ExecFile && !simple_init && (file_strcmp(ExecFile,filename)==0 )) {
is_the_executable = true;
dest_filename = CONDOR_EXEC;
} else {
is_the_executable = false;
if( dest_dir && *dest_dir ) {
dest_filename.sprintf("%s%c",dest_dir,DIR_DELIM_CHAR);
}
dest_filename.sprintf_cat( condor_basename(filename) );
}
if( !s->put(dest_filename.Value()) ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( PeerDoesGoAhead ) {
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG, "DoUpload: failed on eom before GoAhead; exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( !peer_goes_ahead_always ) {
if( !ReceiveTransferGoAhead(s,fullname.Value(),false,peer_goes_ahead_always) ) {
dprintf(D_FULLDEBUG, "DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
}
if( !I_go_ahead_always ) {
if( !ObtainAndSendTransferGoAhead(xfer_queue,false,s,fullname.Value(),I_go_ahead_always) ) {
dprintf(D_FULLDEBUG, "DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
}
s->encode();
}
if ( file_command == 999) {
ClassAd file_info;
file_info.Assign("ProtocolVersion", 1);
file_info.Assign("Command", file_command);
file_info.Assign("SubCommand", file_subcommand);
if(file_subcommand == 7) {
MyString source_filename;
source_filename = Iwd;
source_filename += DIR_DELIM_CHAR;
source_filename += filename;
MyString URL;
URL = OutputDestination;
URL += DIR_DELIM_CHAR;
URL += filename;
dprintf (D_FULLDEBUG, "DoUpload: calling IFTP(fn,U): fn\"%s\", U\"%s\"\n", source_filename.Value(), URL.Value());
dprintf (D_FULLDEBUG, "LocalProxyName: %s\n", LocalProxyName.Value());
rc = InvokeFileTransferPlugin(errstack, source_filename.Value(), URL.Value(), LocalProxyName.Value());
dprintf (D_FULLDEBUG, "DoUpload: IFTP(fn,U): fn\"%s\", U\"%s\" returns %i\n", source_filename.Value(), URL.Value(), rc);
file_info.Assign("Filename", source_filename);
file_info.Assign("OutputDestination", URL);
file_info.Assign("Result", rc);
if (rc) {
file_info.Assign("ErrorString", errstack.getFullText());
}
if(!file_info.put(*s)) {
dprintf(D_FULLDEBUG,"DoDownload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
MyString junkbuf;
file_info.sPrint(junkbuf);
bytes = junkbuf.Length();
} else {
dprintf( D_ALWAYS, "DoUpload: invalid subcommand %i, skipping %s.",
file_subcommand, filename);
bytes = 0;
rc = 0;
}
} else if ( file_command == 4 ) {
if ( (PeerDoesGoAhead || s->end_of_message()) ) {
time_t expiration_time = GetDesiredDelegatedJobCredentialExpiration(&jobAd);
rc = s->put_x509_delegation( &bytes, fullname.Value(), expiration_time, NULL );
dprintf( D_FULLDEBUG,
"DoUpload: put_x509_delegation() returned %d\n",
rc );
} else {
rc = -1;
}
} else if (file_command == 5) {
if(!s->code(fullname)) {
dprintf( D_FULLDEBUG, "DoUpload: failed to send fullname: %s\n", fullname.Value());
rc = -1;
} else {
dprintf( D_FULLDEBUG, "DoUpload: sent fullname and NO eom: %s\n", fullname.Value());
rc = 0;
}
bytes = fullname.Length();
} else if( file_command == 6 ) { // mkdir
bytes = sizeof( filelist_it->file_mode );
if( !s->put( filelist_it->file_mode ) ) {
rc = -1;
dprintf(D_ALWAYS,"DoUpload: failed to send mkdir mode\n");
}
else {
rc = 0;
}
} else if( fail_because_mkdir_not_supported || fail_because_symlink_not_supported ) {
if( TransferFilePermissions ) {
rc = s->put_file_with_permissions( &bytes, NULL_FILE );
}
else {
rc = s->put_file( &bytes, NULL_FILE );
}
if( rc == 0 ) {
rc = PUT_FILE_OPEN_FAILED;
errno = EISDIR;
}
} else if ( TransferFilePermissions ) {
rc = s->put_file_with_permissions( &bytes, fullname.Value() );
} else {
rc = s->put_file( &bytes, fullname.Value() );
}
if( rc < 0 ) {
int the_error = errno;
upload_success = false;
error_desc.sprintf("error sending %s",fullname.Value());
if((rc == PUT_FILE_OPEN_FAILED) || (rc == PUT_FILE_PLUGIN_FAILED)) {
if (rc == PUT_FILE_OPEN_FAILED) {
error_desc.replaceString("sending","reading from");
error_desc.sprintf_cat(": (errno %d) %s",the_error,strerror(the_error));
if( fail_because_mkdir_not_supported ) {
error_desc.sprintf_cat("; Remote condor version is too old to transfer directories.");
}
if( fail_because_symlink_not_supported ) {
error_desc.sprintf_cat("; Transfer of symlinks to directories is not supported.");
}
} else {
error_desc.sprintf_cat(": %s", errstack.getFullText());
}
try_again = false; // put job on hold
hold_code = CONDOR_HOLD_CODE_UploadFileError;
hold_subcode = the_error;
if (first_failed_file_transfer_happened == false) {
first_failed_file_transfer_happened = true;
first_failed_upload_success = false;
first_failed_try_again = false;
first_failed_hold_code = hold_code;
first_failed_hold_subcode = the_error;
first_failed_error_desc = error_desc;
first_failed_line_number = __LINE__;
}
}
else {
do_download_ack = true;
do_upload_ack = false;
try_again = true;
return ExitDoUpload(total_bytes,s,saved_priv,
socket_default_crypto,upload_success,
do_upload_ack,do_download_ack,
try_again,hold_code,hold_subcode,
error_desc.Value(),__LINE__);
}
}
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
*total_bytes += bytes;
if( dest_filename.FindChar(DIR_DELIM_CHAR) < 0 &&
dest_filename != condor_basename(JobStdoutFile.Value()) &&
dest_filename != condor_basename(JobStderrFile.Value()) )
{
Info.addSpooledFile( dest_filename.Value() );
}
}
do_download_ack = true;
do_upload_ack = true;
if (first_failed_file_transfer_happened == true) {
return ExitDoUpload(total_bytes,s,saved_priv,socket_default_crypto,
first_failed_upload_success,do_upload_ack,do_download_ack,
first_failed_try_again,first_failed_hold_code,
first_failed_hold_subcode,first_failed_error_desc.Value(),
first_failed_line_number);
}
upload_success = true;
return ExitDoUpload(total_bytes,s,saved_priv,socket_default_crypto,
upload_success,do_upload_ack,do_download_ack,
try_again,hold_code,hold_subcode,NULL,__LINE__);
}
Commit Message:
CWE ID: CWE-134 | FileTransfer::DoUpload(filesize_t *total_bytes, ReliSock *s)
{
int rc;
MyString fullname;
filesize_t bytes;
bool is_the_executable;
bool upload_success = false;
bool do_download_ack = false;
bool do_upload_ack = false;
bool try_again = false;
int hold_code = 0;
int hold_subcode = 0;
MyString error_desc;
bool I_go_ahead_always = false;
bool peer_goes_ahead_always = false;
DCTransferQueue xfer_queue(m_xfer_queue_contact_info);
CondorError errstack;
bool first_failed_file_transfer_happened = false;
bool first_failed_upload_success = false;
bool first_failed_try_again = false;
int first_failed_hold_code = 0;
int first_failed_hold_subcode = 0;
MyString first_failed_error_desc;
int first_failed_line_number;
*total_bytes = 0;
dprintf(D_FULLDEBUG,"entering FileTransfer::DoUpload\n");
priv_state saved_priv = PRIV_UNKNOWN;
if( want_priv_change ) {
saved_priv = set_priv( desired_priv_state );
}
s->encode();
if( !s->code(m_final_transfer_flag) ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
bool socket_default_crypto = s->get_encryption();
if( want_priv_change && saved_priv == PRIV_UNKNOWN ) {
saved_priv = set_priv( desired_priv_state );
}
FileTransferList filelist;
ExpandFileTransferList( FilesToSend, filelist );
FileTransferList::iterator filelist_it;
for( filelist_it = filelist.begin();
filelist_it != filelist.end();
filelist_it++ )
{
char const *filename = filelist_it->srcName();
char const *dest_dir = filelist_it->destDir();
if( dest_dir && *dest_dir ) {
dprintf(D_FULLDEBUG,"DoUpload: sending file %s to %s%c\n",filename,dest_dir,DIR_DELIM_CHAR);
}
else {
dprintf(D_FULLDEBUG,"DoUpload: sending file %s\n",filename);
}
bool is_url;
is_url = false;
if( param_boolean("ENABLE_URL_TRANSFERS", true) && IsUrl(filename) ) {
is_url = true;
fullname = filename;
dprintf(D_FULLDEBUG, "DoUpload: sending %s as URL.\n", filename);
} else if( filename[0] != '/' && filename[0] != '\\' && filename[1] != ':' ){
fullname.sprintf("%s%c%s",Iwd,DIR_DELIM_CHAR,filename);
} else {
fullname = filename;
}
#ifdef WIN32
if( !is_url && perm_obj && !is_the_executable &&
(perm_obj->read_access(fullname.Value()) != 1) ) {
upload_success = false;
error_desc.sprintf("error reading from %s: permission denied",fullname.Value());
do_upload_ack = true; // tell receiver that we failed
do_download_ack = true;
try_again = false; // put job on hold
hold_code = CONDOR_HOLD_CODE_UploadFileError;
hold_subcode = EPERM;
return ExitDoUpload(total_bytes,s,saved_priv,socket_default_crypto,
upload_success,do_upload_ack,do_download_ack,
try_again,hold_code,hold_subcode,
error_desc.Value(),__LINE__);
}
#endif
if (is_the_executable) {} // Done to get rid of the compiler set-but-not-used warnings.
int file_command = 1;
int file_subcommand = 0;
if ( DontEncryptFiles->file_contains_withwildcard(filename) ) {
file_command = 3;
}
if ( EncryptFiles->file_contains_withwildcard(filename) ) {
file_command = 2;
}
if ( X509UserProxy && file_strcmp( filename, X509UserProxy ) == 0 &&
DelegateX509Credentials ) {
file_command = 4;
}
if ( is_url ) {
file_command = 5;
}
if ( m_final_transfer_flag && OutputDestination ) {
dprintf(D_FULLDEBUG, "FILETRANSFER: Using command 999:7 for OutputDestionation: %s\n",
OutputDestination);
file_command = 999;
file_subcommand = 7;
}
bool fail_because_mkdir_not_supported = false;
bool fail_because_symlink_not_supported = false;
if( filelist_it->is_directory ) {
if( filelist_it->is_symlink ) {
fail_because_symlink_not_supported = true;
dprintf(D_ALWAYS,"DoUpload: attempting to transfer symlink %s which points to a directory. This is not supported.\n",filename);
}
else if( PeerUnderstandsMkdir ) {
file_command = 6;
}
else {
fail_because_mkdir_not_supported = true;
dprintf(D_ALWAYS,"DoUpload: attempting to transfer directory %s, but the version of Condor we are talking to is too old to support that!\n",
filename);
}
}
dprintf ( D_FULLDEBUG, "FILETRANSFER: outgoing file_command is %i for %s\n",
file_command, filename );
if( !s->snd_int(file_command,FALSE) ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if (file_command == 2) {
s->set_crypto_mode(true);
} else if (file_command == 3) {
s->set_crypto_mode(false);
}
else {
s->set_crypto_mode(socket_default_crypto);
}
MyString dest_filename;
if ( ExecFile && !simple_init && (file_strcmp(ExecFile,filename)==0 )) {
is_the_executable = true;
dest_filename = CONDOR_EXEC;
} else {
is_the_executable = false;
if( dest_dir && *dest_dir ) {
dest_filename.sprintf("%s%c",dest_dir,DIR_DELIM_CHAR);
}
dest_filename.sprintf_cat( "%s", condor_basename(filename) );
}
if( !s->put(dest_filename.Value()) ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( PeerDoesGoAhead ) {
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG, "DoUpload: failed on eom before GoAhead; exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
if( !peer_goes_ahead_always ) {
if( !ReceiveTransferGoAhead(s,fullname.Value(),false,peer_goes_ahead_always) ) {
dprintf(D_FULLDEBUG, "DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
}
if( !I_go_ahead_always ) {
if( !ObtainAndSendTransferGoAhead(xfer_queue,false,s,fullname.Value(),I_go_ahead_always) ) {
dprintf(D_FULLDEBUG, "DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
}
s->encode();
}
if ( file_command == 999) {
ClassAd file_info;
file_info.Assign("ProtocolVersion", 1);
file_info.Assign("Command", file_command);
file_info.Assign("SubCommand", file_subcommand);
if(file_subcommand == 7) {
MyString source_filename;
source_filename = Iwd;
source_filename += DIR_DELIM_CHAR;
source_filename += filename;
MyString URL;
URL = OutputDestination;
URL += DIR_DELIM_CHAR;
URL += filename;
dprintf (D_FULLDEBUG, "DoUpload: calling IFTP(fn,U): fn\"%s\", U\"%s\"\n", source_filename.Value(), URL.Value());
dprintf (D_FULLDEBUG, "LocalProxyName: %s\n", LocalProxyName.Value());
rc = InvokeFileTransferPlugin(errstack, source_filename.Value(), URL.Value(), LocalProxyName.Value());
dprintf (D_FULLDEBUG, "DoUpload: IFTP(fn,U): fn\"%s\", U\"%s\" returns %i\n", source_filename.Value(), URL.Value(), rc);
file_info.Assign("Filename", source_filename);
file_info.Assign("OutputDestination", URL);
file_info.Assign("Result", rc);
if (rc) {
file_info.Assign("ErrorString", errstack.getFullText());
}
if(!file_info.put(*s)) {
dprintf(D_FULLDEBUG,"DoDownload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
MyString junkbuf;
file_info.sPrint(junkbuf);
bytes = junkbuf.Length();
} else {
dprintf( D_ALWAYS, "DoUpload: invalid subcommand %i, skipping %s.",
file_subcommand, filename);
bytes = 0;
rc = 0;
}
} else if ( file_command == 4 ) {
if ( (PeerDoesGoAhead || s->end_of_message()) ) {
time_t expiration_time = GetDesiredDelegatedJobCredentialExpiration(&jobAd);
rc = s->put_x509_delegation( &bytes, fullname.Value(), expiration_time, NULL );
dprintf( D_FULLDEBUG,
"DoUpload: put_x509_delegation() returned %d\n",
rc );
} else {
rc = -1;
}
} else if (file_command == 5) {
if(!s->code(fullname)) {
dprintf( D_FULLDEBUG, "DoUpload: failed to send fullname: %s\n", fullname.Value());
rc = -1;
} else {
dprintf( D_FULLDEBUG, "DoUpload: sent fullname and NO eom: %s\n", fullname.Value());
rc = 0;
}
bytes = fullname.Length();
} else if( file_command == 6 ) { // mkdir
bytes = sizeof( filelist_it->file_mode );
if( !s->put( filelist_it->file_mode ) ) {
rc = -1;
dprintf(D_ALWAYS,"DoUpload: failed to send mkdir mode\n");
}
else {
rc = 0;
}
} else if( fail_because_mkdir_not_supported || fail_because_symlink_not_supported ) {
if( TransferFilePermissions ) {
rc = s->put_file_with_permissions( &bytes, NULL_FILE );
}
else {
rc = s->put_file( &bytes, NULL_FILE );
}
if( rc == 0 ) {
rc = PUT_FILE_OPEN_FAILED;
errno = EISDIR;
}
} else if ( TransferFilePermissions ) {
rc = s->put_file_with_permissions( &bytes, fullname.Value() );
} else {
rc = s->put_file( &bytes, fullname.Value() );
}
if( rc < 0 ) {
int the_error = errno;
upload_success = false;
error_desc.sprintf("error sending %s",fullname.Value());
if((rc == PUT_FILE_OPEN_FAILED) || (rc == PUT_FILE_PLUGIN_FAILED)) {
if (rc == PUT_FILE_OPEN_FAILED) {
error_desc.replaceString("sending","reading from");
error_desc.sprintf_cat(": (errno %d) %s",the_error,strerror(the_error));
if( fail_because_mkdir_not_supported ) {
error_desc.sprintf_cat("; Remote condor version is too old to transfer directories.");
}
if( fail_because_symlink_not_supported ) {
error_desc.sprintf_cat("; Transfer of symlinks to directories is not supported.");
}
} else {
error_desc.sprintf_cat(": %s", errstack.getFullText());
}
try_again = false; // put job on hold
hold_code = CONDOR_HOLD_CODE_UploadFileError;
hold_subcode = the_error;
if (first_failed_file_transfer_happened == false) {
first_failed_file_transfer_happened = true;
first_failed_upload_success = false;
first_failed_try_again = false;
first_failed_hold_code = hold_code;
first_failed_hold_subcode = the_error;
first_failed_error_desc = error_desc;
first_failed_line_number = __LINE__;
}
}
else {
do_download_ack = true;
do_upload_ack = false;
try_again = true;
return ExitDoUpload(total_bytes,s,saved_priv,
socket_default_crypto,upload_success,
do_upload_ack,do_download_ack,
try_again,hold_code,hold_subcode,
error_desc.Value(),__LINE__);
}
}
if( !s->end_of_message() ) {
dprintf(D_FULLDEBUG,"DoUpload: exiting at %d\n",__LINE__);
return_and_resetpriv( -1 );
}
*total_bytes += bytes;
if( dest_filename.FindChar(DIR_DELIM_CHAR) < 0 &&
dest_filename != condor_basename(JobStdoutFile.Value()) &&
dest_filename != condor_basename(JobStderrFile.Value()) )
{
Info.addSpooledFile( dest_filename.Value() );
}
}
do_download_ack = true;
do_upload_ack = true;
if (first_failed_file_transfer_happened == true) {
return ExitDoUpload(total_bytes,s,saved_priv,socket_default_crypto,
first_failed_upload_success,do_upload_ack,do_download_ack,
first_failed_try_again,first_failed_hold_code,
first_failed_hold_subcode,first_failed_error_desc.Value(),
first_failed_line_number);
}
upload_success = true;
return ExitDoUpload(total_bytes,s,saved_priv,socket_default_crypto,
upload_success,do_upload_ack,do_download_ack,
try_again,hold_code,hold_subcode,NULL,__LINE__);
}
| 165,386 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ActionReply Smb4KMountHelper::mount(const QVariantMap &args)
{
ActionReply reply;
reply.addData("mh_mountpoint", args["mh_mountpoint"]);
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
command << args["mh_options"].toStringList();
#elif defined(Q_OS_FREEBSD) || defined(Q_OS_NETBSD)
command << args["mh_command"].toString();
command << args["mh_options"].toStringList();
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
#else
#endif
proc.setProgram(command);
proc.start();
if (proc.waitForStarted(-1))
{
bool user_kill = false;
QStringList command;
#if defined(Q_OS_LINUX)
command << args["mh_command"].toString();
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
command << args["mh_options"].toStringList();
#elif defined(Q_OS_FREEBSD) || defined(Q_OS_NETBSD)
command << args["mh_command"].toString();
command << args["mh_options"].toStringList();
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
{
}
if (HelperSupport::isStopped())
{
proc.kill();
user_kill = true;
break;
}
else
{
}
}
if (proc.exitStatus() == KProcess::CrashExit)
{
if (!user_kill)
{
reply.setErrorCode(ActionReply::HelperError);
reply.setErrorDescription(i18n("The mount process crashed."));
return reply;
}
else
{
}
}
else
{
QString stdErr = QString::fromUtf8(proc.readAllStandardError());
reply.addData("mh_error_message", stdErr.trimmed());
}
}
Commit Message:
CWE ID: CWE-20 | ActionReply Smb4KMountHelper::mount(const QVariantMap &args)
{
ActionReply reply;
//
// Get the mount executable
//
const QString mount = findMountExecutable();
//
// Check the executable
//
if (mount != args["mh_command"].toString())
{
// Something weird is going on, bail out.
reply.setErrorCode(ActionReply::HelperError);
reply.setErrorDescription(i18n("Wrong executable passed. Bailing out."));
return reply;
}
else
{
// Do nothing
}
reply.addData("mh_mountpoint", args["mh_mountpoint"]);
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
command << args["mh_options"].toStringList();
#elif defined(Q_OS_FREEBSD) || defined(Q_OS_NETBSD)
command << args["mh_command"].toString();
command << args["mh_options"].toStringList();
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
#else
#endif
proc.setProgram(command);
proc.start();
if (proc.waitForStarted(-1))
{
bool user_kill = false;
QStringList command;
#if defined(Q_OS_LINUX)
command << mount;
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
command << args["mh_options"].toStringList();
#elif defined(Q_OS_FREEBSD) || defined(Q_OS_NETBSD)
command << mount;
command << args["mh_options"].toStringList();
command << args["mh_unc"].toString();
command << args["mh_mountpoint"].toString();
{
}
if (HelperSupport::isStopped())
{
proc.kill();
user_kill = true;
break;
}
else
{
}
}
if (proc.exitStatus() == KProcess::CrashExit)
{
if (!user_kill)
{
reply.setErrorCode(ActionReply::HelperError);
reply.setErrorDescription(i18n("The mount process crashed."));
return reply;
}
else
{
}
}
else
{
QString stdErr = QString::fromUtf8(proc.readAllStandardError());
reply.addData("mh_error_message", stdErr.trimmed());
}
}
| 164,826 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
assert((size_t)CDF_SHORT_SEC_SIZE(h) == len);
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + CDF_SHORT_SEC_POS(h, id), len);
return len;
}
Commit Message: add more check found by cert's fuzzer.
CWE ID: CWE-119 | cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
if (sst->sst_len < (size_t)id) {
DPRINTF(("bad sector id %d > %d\n", id, sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
((const char *)sst->sst_tab) + pos, len);
return len;
}
| 169,884 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES];
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
struct page *localpage = NULL;
int ret;
if (buflen < PAGE_SIZE) {
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
localpage = alloc_page(GFP_KERNEL);
resp_buf = page_address(localpage);
if (localpage == NULL)
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
if (res.acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, res.acl_len);
else
nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
if (buf) {
ret = -ERANGE;
if (res.acl_len > buflen)
goto out_free;
if (localpage)
memcpy(buf, resp_buf, res.acl_len);
}
ret = res.acl_len;
out_free:
if (localpage)
__free_page(localpage);
return ret;
}
Commit Message: NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: stable@kernel.org
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
CWE ID: CWE-189 | static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
{
struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
struct nfs_getaclargs args = {
.fh = NFS_FH(inode),
.acl_pages = pages,
.acl_len = buflen,
};
struct nfs_getaclres res = {
.acl_len = buflen,
};
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
.rpc_resp = &res,
};
int ret = -ENOMEM, npages, i, acl_len = 0;
npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* As long as we're doing a round trip to the server anyway,
* let's be prepared for a page of acl data. */
if (npages == 0)
npages = 1;
for (i = 0; i < npages; i++) {
pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i])
goto out_free;
}
if (npages > 1) {
/* for decoding across pages */
args.acl_scratch = alloc_page(GFP_KERNEL);
if (!args.acl_scratch)
goto out_free;
}
args.acl_len = npages * PAGE_SIZE;
args.acl_pgbase = 0;
/* Let decode_getfacl know not to fail if the ACL data is larger than
* the page we send as a guess */
if (buf == NULL)
res.acl_flags |= NFS4_ACL_LEN_REQUEST;
resp_buf = page_address(pages[0]);
dprintk("%s buf %p buflen %ld npages %d args.acl_len %ld\n",
__func__, buf, buflen, npages, args.acl_len);
ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
&msg, &args.seq_args, &res.seq_res, 0);
if (ret)
goto out_free;
acl_len = res.acl_len - res.acl_data_offset;
if (acl_len > args.acl_len)
nfs4_write_cached_acl(inode, NULL, acl_len);
else
nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset,
acl_len);
if (buf) {
ret = -ERANGE;
if (acl_len > buflen)
goto out_free;
_copy_from_pages(buf, pages, res.acl_data_offset,
res.acl_len);
}
ret = acl_len;
out_free:
for (i = 0; i < npages; i++)
if (pages[i])
__free_page(pages[i]);
if (args.acl_scratch)
__free_page(args.acl_scratch);
return ret;
}
| 165,716 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: Ins_IUP( INS_ARG )
{
IUP_WorkerRec V;
FT_Byte mask;
FT_UInt first_point; /* first point of contour */
FT_UInt end_point; /* end point (last+1) of contour */
FT_UInt first_touched; /* first touched point in contour */
FT_UInt cur_touched; /* current touched point in contour */
FT_UInt point; /* current point */
FT_Short contour; /* current contour */
FT_UNUSED_ARG;
/* ignore empty outlines */
if ( CUR.pts.n_contours == 0 )
return;
if ( CUR.opcode & 1 )
{
mask = FT_CURVE_TAG_TOUCH_X;
V.orgs = CUR.pts.org;
V.curs = CUR.pts.cur;
V.orus = CUR.pts.orus;
}
else
{
mask = FT_CURVE_TAG_TOUCH_Y;
V.orgs = (FT_Vector*)( (FT_Pos*)CUR.pts.org + 1 );
V.curs = (FT_Vector*)( (FT_Pos*)CUR.pts.cur + 1 );
V.orus = (FT_Vector*)( (FT_Pos*)CUR.pts.orus + 1 );
}
V.max_points = CUR.pts.n_points;
contour = 0;
point = 0;
do
{
end_point = CUR.pts.contours[contour] - CUR.pts.first_point;
first_point = point;
if ( CUR.pts.n_points <= end_point )
end_point = CUR.pts.n_points;
while ( point <= end_point && ( CUR.pts.tags[point] & mask ) == 0 )
point++;
if ( point <= end_point )
{
first_touched = point;
cur_touched = point;
point++;
while ( point <= end_point )
{
if ( ( CUR.pts.tags[point] & mask ) != 0 )
{
if ( point > 0 )
_iup_worker_interpolate( &V,
cur_touched + 1,
point - 1,
cur_touched,
point );
cur_touched = point;
}
point++;
}
if ( cur_touched == first_touched )
_iup_worker_shift( &V, first_point, end_point, cur_touched );
else
{
_iup_worker_interpolate( &V,
(FT_UShort)( cur_touched + 1 ),
end_point,
cur_touched,
first_touched );
if ( first_touched > 0 )
_iup_worker_interpolate( &V,
first_point,
first_touched - 1,
cur_touched,
first_touched );
}
}
contour++;
} while ( contour < CUR.pts.n_contours );
}
Commit Message:
CWE ID: CWE-119 | Ins_IUP( INS_ARG )
{
IUP_WorkerRec V;
FT_Byte mask;
FT_UInt first_point; /* first point of contour */
FT_UInt end_point; /* end point (last+1) of contour */
FT_UInt first_touched; /* first touched point in contour */
FT_UInt cur_touched; /* current touched point in contour */
FT_UInt point; /* current point */
FT_Short contour; /* current contour */
FT_UNUSED_ARG;
/* ignore empty outlines */
if ( CUR.pts.n_contours == 0 )
return;
if ( CUR.opcode & 1 )
{
mask = FT_CURVE_TAG_TOUCH_X;
V.orgs = CUR.pts.org;
V.curs = CUR.pts.cur;
V.orus = CUR.pts.orus;
}
else
{
mask = FT_CURVE_TAG_TOUCH_Y;
V.orgs = (FT_Vector*)( (FT_Pos*)CUR.pts.org + 1 );
V.curs = (FT_Vector*)( (FT_Pos*)CUR.pts.cur + 1 );
V.orus = (FT_Vector*)( (FT_Pos*)CUR.pts.orus + 1 );
}
V.max_points = CUR.pts.n_points;
contour = 0;
point = 0;
do
{
end_point = CUR.pts.contours[contour] - CUR.pts.first_point;
first_point = point;
if ( BOUNDS ( end_point, CUR.pts.n_points ) )
end_point = CUR.pts.n_points - 1;
while ( point <= end_point && ( CUR.pts.tags[point] & mask ) == 0 )
point++;
if ( point <= end_point )
{
first_touched = point;
cur_touched = point;
point++;
while ( point <= end_point )
{
if ( ( CUR.pts.tags[point] & mask ) != 0 )
{
if ( point > 0 )
_iup_worker_interpolate( &V,
cur_touched + 1,
point - 1,
cur_touched,
point );
cur_touched = point;
}
point++;
}
if ( cur_touched == first_touched )
_iup_worker_shift( &V, first_point, end_point, cur_touched );
else
{
_iup_worker_interpolate( &V,
(FT_UShort)( cur_touched + 1 ),
end_point,
cur_touched,
first_touched );
if ( first_touched > 0 )
_iup_worker_interpolate( &V,
first_point,
first_touched - 1,
cur_touched,
first_touched );
}
}
contour++;
} while ( contour < CUR.pts.n_contours );
}
| 165,002 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
memcpy(&b[4], cmd->msg, cmd->msg_len);
state->config->send_command(fe, 0x72,
sizeof(b) - (6 - cmd->msg_len), b,
NULL, NULL);
return 0;
}
Commit Message: [media] ttusb-dec: buffer overflow in ioctl
We need to add a limit check here so we don't overflow the buffer.
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
CWE ID: CWE-119 | static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd)
{
struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
u8 b[] = { 0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
if (cmd->msg_len > sizeof(b) - 4)
return -EINVAL;
memcpy(&b[4], cmd->msg, cmd->msg_len);
state->config->send_command(fe, 0x72,
sizeof(b) - (6 - cmd->msg_len), b,
NULL, NULL);
return 0;
}
| 166,241 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int jp2_box_put(jp2_box_t *box, jas_stream_t *out)
{
jas_stream_t *tmpstream;
bool extlen;
bool dataflag;
tmpstream = 0;
dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA));
if (dataflag) {
if (!(tmpstream = jas_stream_memopen(0, 0))) {
goto error;
}
if (box->ops->putdata) {
if ((*box->ops->putdata)(box, tmpstream)) {
goto error;
}
}
box->len = jas_stream_tell(tmpstream) + JP2_BOX_HDRLEN(false);
jas_stream_rewind(tmpstream);
}
extlen = (box->len >= (((uint_fast64_t)1) << 32)) != 0;
if (jp2_putuint32(out, extlen ? 1 : box->len)) {
goto error;
}
if (jp2_putuint32(out, box->type)) {
goto error;
}
if (extlen) {
if (jp2_putuint64(out, box->len)) {
goto error;
}
}
if (dataflag) {
if (jas_stream_copy(out, tmpstream, box->len - JP2_BOX_HDRLEN(false))) {
goto error;
}
jas_stream_close(tmpstream);
}
return 0;
error:
if (tmpstream) {
jas_stream_close(tmpstream);
}
return -1;
}
Commit Message: Fixed bugs due to uninitialized data in the JP2 decoder.
Also, added some comments marking I/O stream interfaces that probably
need to be changed (in the long term) to fix integer overflow problems.
CWE ID: CWE-476 | int jp2_box_put(jp2_box_t *box, jas_stream_t *out)
{
jas_stream_t *tmpstream;
bool extlen;
bool dataflag;
tmpstream = 0;
dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA));
if (dataflag) {
if (!(tmpstream = jas_stream_memopen(0, 0))) {
goto error;
}
if (box->ops->putdata) {
if ((*box->ops->putdata)(box, tmpstream)) {
goto error;
}
}
box->len = jas_stream_tell(tmpstream) + JP2_BOX_HDRLEN(false);
jas_stream_rewind(tmpstream);
}
extlen = (box->len >= (((uint_fast64_t)1) << 32)) != 0;
if (jp2_putuint32(out, extlen ? 1 : box->len)) {
goto error;
}
if (jp2_putuint32(out, box->type)) {
goto error;
}
if (extlen) {
if (jp2_putuint64(out, box->len)) {
goto error;
}
}
if (dataflag) {
if (jas_stream_copy(out, tmpstream, box->len -
JP2_BOX_HDRLEN(false))) {
jas_eprintf("cannot copy box data\n");
goto error;
}
jas_stream_close(tmpstream);
}
return 0;
error:
if (tmpstream) {
jas_stream_close(tmpstream);
}
return -1;
}
| 168,319 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int handle_exception(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
unsigned long cr2, rip, dr6;
u32 vect_info;
enum emulation_result er;
vect_info = vmx->idt_vectoring_info;
intr_info = vmx->exit_intr_info;
if (is_machine_check(intr_info))
return handle_machine_check(vcpu);
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
vmx_fpu_activate(vcpu);
return 1;
}
if (is_invalid_opcode(intr_info)) {
if (is_guest_mode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
error_code = 0;
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
/*
* The #PF with PFEC.RSVD = 1 indicates the guest is accessing
* MMIO, it is better to report an internal error.
* See the comments in vmx_handle_exit.
*/
if ((vect_info & VECTORING_INFO_VALID_MASK) &&
!(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vect_info;
vcpu->run->internal.data[1] = intr_info;
vcpu->run->internal.data[2] = error_code;
return 0;
}
if (is_page_fault(intr_info)) {
/* EPT won't cause page fault directly */
BUG_ON(enable_ept);
cr2 = vmcs_readl(EXIT_QUALIFICATION);
trace_kvm_page_fault(cr2, error_code);
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
}
ex_no = intr_info & INTR_INFO_VECTOR_MASK;
if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
return handle_rmode_exception(vcpu, ex_no, error_code);
switch (ex_no) {
case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= dr6 | DR6_RTM;
if (!(dr6 & ~DR6_RESERVED)) /* icebp */
skip_emulated_instruction(vcpu);
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
/* fall through */
case BP_VECTOR:
/*
* Update instruction length as we may reinject #BP from
* user space while in guest debugging mode. Reading it for
* #DB as well causes no harm, it is not used in that case.
*/
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
rip = kvm_rip_read(vcpu);
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.exception = ex_no;
break;
default:
kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
kvm_run->ex.exception = ex_no;
kvm_run->ex.error_code = error_code;
break;
}
return 0;
}
Commit Message: KVM: x86: work around infinite loop in microcode when #AC is delivered
It was found that a guest can DoS a host by triggering an infinite
stream of "alignment check" (#AC) exceptions. This causes the
microcode to enter an infinite loop where the core never receives
another interrupt. The host kernel panics pretty quickly due to the
effects (CVE-2015-5307).
Signed-off-by: Eric Northup <digitaleric@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
CWE ID: CWE-399 | static int handle_exception(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
unsigned long cr2, rip, dr6;
u32 vect_info;
enum emulation_result er;
vect_info = vmx->idt_vectoring_info;
intr_info = vmx->exit_intr_info;
if (is_machine_check(intr_info))
return handle_machine_check(vcpu);
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
vmx_fpu_activate(vcpu);
return 1;
}
if (is_invalid_opcode(intr_info)) {
if (is_guest_mode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
error_code = 0;
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
/*
* The #PF with PFEC.RSVD = 1 indicates the guest is accessing
* MMIO, it is better to report an internal error.
* See the comments in vmx_handle_exit.
*/
if ((vect_info & VECTORING_INFO_VALID_MASK) &&
!(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vect_info;
vcpu->run->internal.data[1] = intr_info;
vcpu->run->internal.data[2] = error_code;
return 0;
}
if (is_page_fault(intr_info)) {
/* EPT won't cause page fault directly */
BUG_ON(enable_ept);
cr2 = vmcs_readl(EXIT_QUALIFICATION);
trace_kvm_page_fault(cr2, error_code);
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
}
ex_no = intr_info & INTR_INFO_VECTOR_MASK;
if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
return handle_rmode_exception(vcpu, ex_no, error_code);
switch (ex_no) {
case AC_VECTOR:
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
return 1;
case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= dr6 | DR6_RTM;
if (!(dr6 & ~DR6_RESERVED)) /* icebp */
skip_emulated_instruction(vcpu);
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
/* fall through */
case BP_VECTOR:
/*
* Update instruction length as we may reinject #BP from
* user space while in guest debugging mode. Reading it for
* #DB as well causes no harm, it is not used in that case.
*/
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
rip = kvm_rip_read(vcpu);
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.exception = ex_no;
break;
default:
kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
kvm_run->ex.exception = ex_no;
kvm_run->ex.error_code = error_code;
break;
}
return 0;
}
| 166,599 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ImageLoader::DoUpdateFromElement(BypassMainWorldBehavior bypass_behavior,
UpdateFromElementBehavior update_behavior,
const KURL& url,
ReferrerPolicy referrer_policy,
UpdateType update_type) {
pending_task_.reset();
std::unique_ptr<IncrementLoadEventDelayCount> load_delay_counter;
load_delay_counter.swap(delay_until_do_update_from_element_);
Document& document = element_->GetDocument();
if (!document.IsActive())
return;
AtomicString image_source_url = element_->ImageSourceURL();
ImageResourceContent* new_image_content = nullptr;
if (!url.IsNull() && !url.IsEmpty()) {
ResourceLoaderOptions resource_loader_options;
resource_loader_options.initiator_info.name = GetElement()->localName();
ResourceRequest resource_request(url);
if (update_behavior == kUpdateForcedReload) {
resource_request.SetCacheMode(mojom::FetchCacheMode::kBypassCache);
resource_request.SetPreviewsState(WebURLRequest::kPreviewsNoTransform);
}
if (referrer_policy != kReferrerPolicyDefault) {
resource_request.SetHTTPReferrer(SecurityPolicy::GenerateReferrer(
referrer_policy, url, document.OutgoingReferrer()));
}
if (IsHTMLPictureElement(GetElement()->parentNode()) ||
!GetElement()->FastGetAttribute(HTMLNames::srcsetAttr).IsNull())
resource_request.SetRequestContext(
WebURLRequest::kRequestContextImageSet);
bool page_is_being_dismissed =
document.PageDismissalEventBeingDispatched() != Document::kNoDismissal;
if (page_is_being_dismissed) {
resource_request.SetHTTPHeaderField(HTTPNames::Cache_Control,
"max-age=0");
resource_request.SetKeepalive(true);
resource_request.SetRequestContext(WebURLRequest::kRequestContextPing);
}
FetchParameters params(resource_request, resource_loader_options);
ConfigureRequest(params, bypass_behavior, *element_,
document.GetClientHintsPreferences());
if (update_behavior != kUpdateForcedReload && document.GetFrame())
document.GetFrame()->MaybeAllowImagePlaceholder(params);
new_image_content = ImageResourceContent::Fetch(params, document.Fetcher());
if (page_is_being_dismissed)
new_image_content = nullptr;
ClearFailedLoadURL();
} else {
if (!image_source_url.IsNull()) {
DispatchErrorEvent();
}
NoImageResourceToLoad();
}
ImageResourceContent* old_image_content = image_content_.Get();
if (old_image_content != new_image_content)
RejectPendingDecodes(update_type);
if (update_behavior == kUpdateSizeChanged && element_->GetLayoutObject() &&
element_->GetLayoutObject()->IsImage() &&
new_image_content == old_image_content) {
ToLayoutImage(element_->GetLayoutObject())->IntrinsicSizeChanged();
} else {
if (pending_load_event_.IsActive())
pending_load_event_.Cancel();
if (pending_error_event_.IsActive() && new_image_content)
pending_error_event_.Cancel();
UpdateImageState(new_image_content);
UpdateLayoutObject();
if (new_image_content) {
new_image_content->AddObserver(this);
}
if (old_image_content) {
old_image_content->RemoveObserver(this);
}
}
if (LayoutImageResource* image_resource = GetLayoutImageResource())
image_resource->ResetAnimation();
}
Commit Message: Use correct Request Context when EMBED or OBJECT requests an image
When an OBJECT or EMBED element requests an image, it does so using
an ImageLoader. To ensure that Content-Security-Policy restrictions
are applied correctly in this scenario, we must adjust the request's
context to indicate the originating element.
Bug: 811691
Change-Id: I0fd8010970a12e68e845a54310695acc0b3f7625
Reviewed-on: https://chromium-review.googlesource.com/924589
Commit-Queue: Eric Lawrence <elawrence@chromium.org>
Reviewed-by: Mike West <mkwst@chromium.org>
Cr-Commit-Position: refs/heads/master@{#537846}
CWE ID: CWE-20 | void ImageLoader::DoUpdateFromElement(BypassMainWorldBehavior bypass_behavior,
UpdateFromElementBehavior update_behavior,
const KURL& url,
ReferrerPolicy referrer_policy,
UpdateType update_type) {
pending_task_.reset();
std::unique_ptr<IncrementLoadEventDelayCount> load_delay_counter;
load_delay_counter.swap(delay_until_do_update_from_element_);
Document& document = element_->GetDocument();
if (!document.IsActive())
return;
AtomicString image_source_url = element_->ImageSourceURL();
ImageResourceContent* new_image_content = nullptr;
if (!url.IsNull() && !url.IsEmpty()) {
ResourceLoaderOptions resource_loader_options;
resource_loader_options.initiator_info.name = GetElement()->localName();
ResourceRequest resource_request(url);
if (update_behavior == kUpdateForcedReload) {
resource_request.SetCacheMode(mojom::FetchCacheMode::kBypassCache);
resource_request.SetPreviewsState(WebURLRequest::kPreviewsNoTransform);
}
if (referrer_policy != kReferrerPolicyDefault) {
resource_request.SetHTTPReferrer(SecurityPolicy::GenerateReferrer(
referrer_policy, url, document.OutgoingReferrer()));
}
// Correct the RequestContext if necessary.
if (IsHTMLPictureElement(GetElement()->parentNode()) ||
!GetElement()->FastGetAttribute(HTMLNames::srcsetAttr).IsNull()) {
resource_request.SetRequestContext(
WebURLRequest::kRequestContextImageSet);
} else if (IsHTMLObjectElement(GetElement())) {
resource_request.SetRequestContext(WebURLRequest::kRequestContextObject);
} else if (IsHTMLEmbedElement(GetElement())) {
resource_request.SetRequestContext(WebURLRequest::kRequestContextEmbed);
}
bool page_is_being_dismissed =
document.PageDismissalEventBeingDispatched() != Document::kNoDismissal;
if (page_is_being_dismissed) {
resource_request.SetHTTPHeaderField(HTTPNames::Cache_Control,
"max-age=0");
resource_request.SetKeepalive(true);
resource_request.SetRequestContext(WebURLRequest::kRequestContextPing);
}
FetchParameters params(resource_request, resource_loader_options);
ConfigureRequest(params, bypass_behavior, *element_,
document.GetClientHintsPreferences());
if (update_behavior != kUpdateForcedReload && document.GetFrame())
document.GetFrame()->MaybeAllowImagePlaceholder(params);
new_image_content = ImageResourceContent::Fetch(params, document.Fetcher());
if (page_is_being_dismissed)
new_image_content = nullptr;
ClearFailedLoadURL();
} else {
if (!image_source_url.IsNull()) {
DispatchErrorEvent();
}
NoImageResourceToLoad();
}
ImageResourceContent* old_image_content = image_content_.Get();
if (old_image_content != new_image_content)
RejectPendingDecodes(update_type);
if (update_behavior == kUpdateSizeChanged && element_->GetLayoutObject() &&
element_->GetLayoutObject()->IsImage() &&
new_image_content == old_image_content) {
ToLayoutImage(element_->GetLayoutObject())->IntrinsicSizeChanged();
} else {
if (pending_load_event_.IsActive())
pending_load_event_.Cancel();
if (pending_error_event_.IsActive() && new_image_content)
pending_error_event_.Cancel();
UpdateImageState(new_image_content);
UpdateLayoutObject();
if (new_image_content) {
new_image_content->AddObserver(this);
}
if (old_image_content) {
old_image_content->RemoveObserver(this);
}
}
if (LayoutImageResource* image_resource = GetLayoutImageResource())
image_resource->ResetAnimation();
}
| 172,792 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int newary(struct ipc_namespace *ns, struct ipc_params *params)
{
int id;
int retval;
struct sem_array *sma;
int size;
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
int i;
if (!nsems)
return -EINVAL;
if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC;
size = sizeof (*sma) + nsems * sizeof (struct sem);
sma = ipc_rcu_alloc(size);
if (!sma) {
return -ENOMEM;
}
memset (sma, 0, size);
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
if (retval) {
ipc_rcu_putref(sma);
return retval;
}
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (id < 0) {
security_sem_free(sma);
ipc_rcu_putref(sma);
return id;
}
ns->used_sems += nsems;
sma->sem_base = (struct sem *) &sma[1];
for (i = 0; i < nsems; i++)
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
sem_unlock(sma);
return sma->sem_perm.id;
}
Commit Message: ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[davidlohr.bueso@hp.com: do not call sem_lock when bogus sma]
[davidlohr.bueso@hp.com: make refcounter atomic]
Signed-off-by: Rik van Riel <riel@redhat.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Cc: Jason Low <jason.low2@hp.com>
Reviewed-by: Michel Lespinasse <walken@google.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Stanislav Kinsbursky <skinsbursky@parallels.com>
Tested-by: Emmanuel Benisty <benisty.e@gmail.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
CWE ID: CWE-189 | static int newary(struct ipc_namespace *ns, struct ipc_params *params)
{
int id;
int retval;
struct sem_array *sma;
int size;
key_t key = params->key;
int nsems = params->u.nsems;
int semflg = params->flg;
int i;
if (!nsems)
return -EINVAL;
if (ns->used_sems + nsems > ns->sc_semmns)
return -ENOSPC;
size = sizeof (*sma) + nsems * sizeof (struct sem);
sma = ipc_rcu_alloc(size);
if (!sma) {
return -ENOMEM;
}
memset (sma, 0, size);
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
if (retval) {
ipc_rcu_putref(sma);
return retval;
}
id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
if (id < 0) {
security_sem_free(sma);
ipc_rcu_putref(sma);
return id;
}
ns->used_sems += nsems;
sma->sem_base = (struct sem *) &sma[1];
for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
spin_lock_init(&sma->sem_base[i].lock);
}
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
sem_unlock(sma, -1);
return sma->sem_perm.id;
}
| 165,972 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: pcap_ng_check_header(const uint8_t *magic, FILE *fp, u_int precision,
char *errbuf, int *err)
{
bpf_u_int32 magic_int;
size_t amt_read;
bpf_u_int32 total_length;
bpf_u_int32 byte_order_magic;
struct block_header *bhdrp;
struct section_header_block *shbp;
pcap_t *p;
int swapped = 0;
struct pcap_ng_sf *ps;
int status;
struct block_cursor cursor;
struct interface_description_block *idbp;
/*
* Assume no read errors.
*/
*err = 0;
/*
* Check whether the first 4 bytes of the file are the block
* type for a pcapng savefile.
*/
memcpy(&magic_int, magic, sizeof(magic_int));
if (magic_int != BT_SHB) {
/*
* XXX - check whether this looks like what the block
* type would be after being munged by mapping between
* UN*X and DOS/Windows text file format and, if it
* does, look for the byte-order magic number in
* the appropriate place and, if we find it, report
* this as possibly being a pcapng file transferred
* between UN*X and Windows in text file format?
*/
return (NULL); /* nope */
}
/*
* OK, they are. However, that's just \n\r\r\n, so it could,
* conceivably, be an ordinary text file.
*
* It could not, however, conceivably be any other type of
* capture file, so we can read the rest of the putative
* Section Header Block; put the block type in the common
* header, read the rest of the common header and the
* fixed-length portion of the SHB, and look for the byte-order
* magic value.
*/
amt_read = fread(&total_length, 1, sizeof(total_length), fp);
if (amt_read < sizeof(total_length)) {
if (ferror(fp)) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "error reading dump file");
*err = 1;
return (NULL); /* fail */
}
/*
* Possibly a weird short text file, so just say
* "not pcapng".
*/
return (NULL);
}
amt_read = fread(&byte_order_magic, 1, sizeof(byte_order_magic), fp);
if (amt_read < sizeof(byte_order_magic)) {
if (ferror(fp)) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "error reading dump file");
*err = 1;
return (NULL); /* fail */
}
/*
* Possibly a weird short text file, so just say
* "not pcapng".
*/
return (NULL);
}
if (byte_order_magic != BYTE_ORDER_MAGIC) {
byte_order_magic = SWAPLONG(byte_order_magic);
if (byte_order_magic != BYTE_ORDER_MAGIC) {
/*
* Not a pcapng file.
*/
return (NULL);
}
swapped = 1;
total_length = SWAPLONG(total_length);
}
/*
* Check the sanity of the total length.
*/
if (total_length < sizeof(*bhdrp) + sizeof(*shbp) + sizeof(struct block_trailer) ||
(total_length > BT_SHB_INSANE_MAX)) {
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"Section Header Block in pcapng dump file has invalid length %" PRIsize " < _%lu_ < %lu (BT_SHB_INSANE_MAX)",
sizeof(*bhdrp) + sizeof(*shbp) + sizeof(struct block_trailer),
total_length,
BT_SHB_INSANE_MAX);
*err = 1;
return (NULL);
}
/*
* OK, this is a good pcapng file.
* Allocate a pcap_t for it.
*/
p = pcap_open_offline_common(errbuf, sizeof (struct pcap_ng_sf));
if (p == NULL) {
/* Allocation failed. */
*err = 1;
return (NULL);
}
p->swapped = swapped;
ps = p->priv;
/*
* What precision does the user want?
*/
switch (precision) {
case PCAP_TSTAMP_PRECISION_MICRO:
ps->user_tsresol = 1000000;
break;
case PCAP_TSTAMP_PRECISION_NANO:
ps->user_tsresol = 1000000000;
break;
default:
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"unknown time stamp resolution %u", precision);
free(p);
*err = 1;
return (NULL);
}
p->opt.tstamp_precision = precision;
/*
* Allocate a buffer into which to read blocks. We default to
* the maximum of:
*
* the total length of the SHB for which we read the header;
*
* 2K, which should be more than large enough for an Enhanced
* Packet Block containing a full-size Ethernet frame, and
* leaving room for some options.
*
* If we find a bigger block, we reallocate the buffer, up to
* the maximum size. We start out with a maximum size of
* INITIAL_MAX_BLOCKSIZE; if we see any link-layer header types
* with a maximum snapshot that results in a larger maximum
* block length, we boost the maximum.
*/
p->bufsize = 2048;
if (p->bufsize < total_length)
p->bufsize = total_length;
p->buffer = malloc(p->bufsize);
if (p->buffer == NULL) {
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "out of memory");
free(p);
*err = 1;
return (NULL);
}
ps->max_blocksize = INITIAL_MAX_BLOCKSIZE;
/*
* Copy the stuff we've read to the buffer, and read the rest
* of the SHB.
*/
bhdrp = (struct block_header *)p->buffer;
shbp = (struct section_header_block *)((u_char *)p->buffer + sizeof(struct block_header));
bhdrp->block_type = magic_int;
bhdrp->total_length = total_length;
shbp->byte_order_magic = byte_order_magic;
if (read_bytes(fp,
(u_char *)p->buffer + (sizeof(magic_int) + sizeof(total_length) + sizeof(byte_order_magic)),
total_length - (sizeof(magic_int) + sizeof(total_length) + sizeof(byte_order_magic)),
1, errbuf) == -1)
goto fail;
if (p->swapped) {
/*
* Byte-swap the fields we've read.
*/
shbp->major_version = SWAPSHORT(shbp->major_version);
shbp->minor_version = SWAPSHORT(shbp->minor_version);
/*
* XXX - we don't care about the section length.
*/
}
/* currently only SHB version 1.0 is supported */
if (! (shbp->major_version == PCAP_NG_VERSION_MAJOR &&
shbp->minor_version == PCAP_NG_VERSION_MINOR)) {
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"unsupported pcapng savefile version %u.%u",
shbp->major_version, shbp->minor_version);
goto fail;
}
p->version_major = shbp->major_version;
p->version_minor = shbp->minor_version;
/*
* Save the time stamp resolution the user requested.
*/
p->opt.tstamp_precision = precision;
/*
* Now start looking for an Interface Description Block.
*/
for (;;) {
/*
* Read the next block.
*/
status = read_block(fp, p, &cursor, errbuf);
if (status == 0) {
/* EOF - no IDB in this file */
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"the capture file has no Interface Description Blocks");
goto fail;
}
if (status == -1)
goto fail; /* error */
switch (cursor.block_type) {
case BT_IDB:
/*
* Get a pointer to the fixed-length portion of the
* IDB.
*/
idbp = get_from_block_data(&cursor, sizeof(*idbp),
errbuf);
if (idbp == NULL)
goto fail; /* error */
/*
* Byte-swap it if necessary.
*/
if (p->swapped) {
idbp->linktype = SWAPSHORT(idbp->linktype);
idbp->snaplen = SWAPLONG(idbp->snaplen);
}
/*
* Try to add this interface.
*/
if (!add_interface(p, &cursor, errbuf))
goto fail;
goto done;
case BT_EPB:
case BT_SPB:
case BT_PB:
/*
* Saw a packet before we saw any IDBs. That's
* not valid, as we don't know what link-layer
* encapsulation the packet has.
*/
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"the capture file has a packet block before any Interface Description Blocks");
goto fail;
default:
/*
* Just ignore it.
*/
break;
}
}
done:
p->tzoff = 0; /* XXX - not used in pcap */
p->linktype = linktype_to_dlt(idbp->linktype);
p->snapshot = pcap_adjust_snapshot(p->linktype, idbp->snaplen);
p->linktype_ext = 0;
/*
* If the maximum block size for a packet with the maximum
* snapshot length for this DLT_ is bigger than the current
* maximum block size, increase the maximum.
*/
if (MAX_BLOCKSIZE_FOR_SNAPLEN(max_snaplen_for_dlt(p->linktype)) > ps->max_blocksize)
ps->max_blocksize = MAX_BLOCKSIZE_FOR_SNAPLEN(max_snaplen_for_dlt(p->linktype));
p->next_packet_op = pcap_ng_next_packet;
p->cleanup_op = pcap_ng_cleanup;
return (p);
fail:
free(ps->ifaces);
free(p->buffer);
free(p);
*err = 1;
return (NULL);
}
Commit Message: Fix some format warnings.
CWE ID: CWE-20 | pcap_ng_check_header(const uint8_t *magic, FILE *fp, u_int precision,
char *errbuf, int *err)
{
bpf_u_int32 magic_int;
size_t amt_read;
bpf_u_int32 total_length;
bpf_u_int32 byte_order_magic;
struct block_header *bhdrp;
struct section_header_block *shbp;
pcap_t *p;
int swapped = 0;
struct pcap_ng_sf *ps;
int status;
struct block_cursor cursor;
struct interface_description_block *idbp;
/*
* Assume no read errors.
*/
*err = 0;
/*
* Check whether the first 4 bytes of the file are the block
* type for a pcapng savefile.
*/
memcpy(&magic_int, magic, sizeof(magic_int));
if (magic_int != BT_SHB) {
/*
* XXX - check whether this looks like what the block
* type would be after being munged by mapping between
* UN*X and DOS/Windows text file format and, if it
* does, look for the byte-order magic number in
* the appropriate place and, if we find it, report
* this as possibly being a pcapng file transferred
* between UN*X and Windows in text file format?
*/
return (NULL); /* nope */
}
/*
* OK, they are. However, that's just \n\r\r\n, so it could,
* conceivably, be an ordinary text file.
*
* It could not, however, conceivably be any other type of
* capture file, so we can read the rest of the putative
* Section Header Block; put the block type in the common
* header, read the rest of the common header and the
* fixed-length portion of the SHB, and look for the byte-order
* magic value.
*/
amt_read = fread(&total_length, 1, sizeof(total_length), fp);
if (amt_read < sizeof(total_length)) {
if (ferror(fp)) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "error reading dump file");
*err = 1;
return (NULL); /* fail */
}
/*
* Possibly a weird short text file, so just say
* "not pcapng".
*/
return (NULL);
}
amt_read = fread(&byte_order_magic, 1, sizeof(byte_order_magic), fp);
if (amt_read < sizeof(byte_order_magic)) {
if (ferror(fp)) {
pcap_fmt_errmsg_for_errno(errbuf, PCAP_ERRBUF_SIZE,
errno, "error reading dump file");
*err = 1;
return (NULL); /* fail */
}
/*
* Possibly a weird short text file, so just say
* "not pcapng".
*/
return (NULL);
}
if (byte_order_magic != BYTE_ORDER_MAGIC) {
byte_order_magic = SWAPLONG(byte_order_magic);
if (byte_order_magic != BYTE_ORDER_MAGIC) {
/*
* Not a pcapng file.
*/
return (NULL);
}
swapped = 1;
total_length = SWAPLONG(total_length);
}
/*
* Check the sanity of the total length.
*/
if (total_length < sizeof(*bhdrp) + sizeof(*shbp) + sizeof(struct block_trailer) ||
(total_length > BT_SHB_INSANE_MAX)) {
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"Section Header Block in pcapng dump file has invalid length %" PRIsize " < _%u_ < %u (BT_SHB_INSANE_MAX)",
sizeof(*bhdrp) + sizeof(*shbp) + sizeof(struct block_trailer),
total_length,
BT_SHB_INSANE_MAX);
*err = 1;
return (NULL);
}
/*
* OK, this is a good pcapng file.
* Allocate a pcap_t for it.
*/
p = pcap_open_offline_common(errbuf, sizeof (struct pcap_ng_sf));
if (p == NULL) {
/* Allocation failed. */
*err = 1;
return (NULL);
}
p->swapped = swapped;
ps = p->priv;
/*
* What precision does the user want?
*/
switch (precision) {
case PCAP_TSTAMP_PRECISION_MICRO:
ps->user_tsresol = 1000000;
break;
case PCAP_TSTAMP_PRECISION_NANO:
ps->user_tsresol = 1000000000;
break;
default:
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"unknown time stamp resolution %u", precision);
free(p);
*err = 1;
return (NULL);
}
p->opt.tstamp_precision = precision;
/*
* Allocate a buffer into which to read blocks. We default to
* the maximum of:
*
* the total length of the SHB for which we read the header;
*
* 2K, which should be more than large enough for an Enhanced
* Packet Block containing a full-size Ethernet frame, and
* leaving room for some options.
*
* If we find a bigger block, we reallocate the buffer, up to
* the maximum size. We start out with a maximum size of
* INITIAL_MAX_BLOCKSIZE; if we see any link-layer header types
* with a maximum snapshot that results in a larger maximum
* block length, we boost the maximum.
*/
p->bufsize = 2048;
if (p->bufsize < total_length)
p->bufsize = total_length;
p->buffer = malloc(p->bufsize);
if (p->buffer == NULL) {
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE, "out of memory");
free(p);
*err = 1;
return (NULL);
}
ps->max_blocksize = INITIAL_MAX_BLOCKSIZE;
/*
* Copy the stuff we've read to the buffer, and read the rest
* of the SHB.
*/
bhdrp = (struct block_header *)p->buffer;
shbp = (struct section_header_block *)((u_char *)p->buffer + sizeof(struct block_header));
bhdrp->block_type = magic_int;
bhdrp->total_length = total_length;
shbp->byte_order_magic = byte_order_magic;
if (read_bytes(fp,
(u_char *)p->buffer + (sizeof(magic_int) + sizeof(total_length) + sizeof(byte_order_magic)),
total_length - (sizeof(magic_int) + sizeof(total_length) + sizeof(byte_order_magic)),
1, errbuf) == -1)
goto fail;
if (p->swapped) {
/*
* Byte-swap the fields we've read.
*/
shbp->major_version = SWAPSHORT(shbp->major_version);
shbp->minor_version = SWAPSHORT(shbp->minor_version);
/*
* XXX - we don't care about the section length.
*/
}
/* currently only SHB version 1.0 is supported */
if (! (shbp->major_version == PCAP_NG_VERSION_MAJOR &&
shbp->minor_version == PCAP_NG_VERSION_MINOR)) {
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"unsupported pcapng savefile version %u.%u",
shbp->major_version, shbp->minor_version);
goto fail;
}
p->version_major = shbp->major_version;
p->version_minor = shbp->minor_version;
/*
* Save the time stamp resolution the user requested.
*/
p->opt.tstamp_precision = precision;
/*
* Now start looking for an Interface Description Block.
*/
for (;;) {
/*
* Read the next block.
*/
status = read_block(fp, p, &cursor, errbuf);
if (status == 0) {
/* EOF - no IDB in this file */
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"the capture file has no Interface Description Blocks");
goto fail;
}
if (status == -1)
goto fail; /* error */
switch (cursor.block_type) {
case BT_IDB:
/*
* Get a pointer to the fixed-length portion of the
* IDB.
*/
idbp = get_from_block_data(&cursor, sizeof(*idbp),
errbuf);
if (idbp == NULL)
goto fail; /* error */
/*
* Byte-swap it if necessary.
*/
if (p->swapped) {
idbp->linktype = SWAPSHORT(idbp->linktype);
idbp->snaplen = SWAPLONG(idbp->snaplen);
}
/*
* Try to add this interface.
*/
if (!add_interface(p, &cursor, errbuf))
goto fail;
goto done;
case BT_EPB:
case BT_SPB:
case BT_PB:
/*
* Saw a packet before we saw any IDBs. That's
* not valid, as we don't know what link-layer
* encapsulation the packet has.
*/
pcap_snprintf(errbuf, PCAP_ERRBUF_SIZE,
"the capture file has a packet block before any Interface Description Blocks");
goto fail;
default:
/*
* Just ignore it.
*/
break;
}
}
done:
p->tzoff = 0; /* XXX - not used in pcap */
p->linktype = linktype_to_dlt(idbp->linktype);
p->snapshot = pcap_adjust_snapshot(p->linktype, idbp->snaplen);
p->linktype_ext = 0;
/*
* If the maximum block size for a packet with the maximum
* snapshot length for this DLT_ is bigger than the current
* maximum block size, increase the maximum.
*/
if (MAX_BLOCKSIZE_FOR_SNAPLEN(max_snaplen_for_dlt(p->linktype)) > ps->max_blocksize)
ps->max_blocksize = MAX_BLOCKSIZE_FOR_SNAPLEN(max_snaplen_for_dlt(p->linktype));
p->next_packet_op = pcap_ng_next_packet;
p->cleanup_op = pcap_ng_cleanup;
return (p);
fail:
free(ps->ifaces);
free(p->buffer);
free(p);
*err = 1;
return (NULL);
}
| 169,539 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: virtual void SetUp() {
UUT_ = GET_PARAM(2);
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
output_[i] = 255;
else
output_[i] = 0;
}
::libvpx_test::ACMRandom prng;
for (int i = 0; i < kInputBufferSize; ++i)
input_[i] = prng.Rand8Extremes();
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | virtual void SetUp() {
UUT_ = GET_PARAM(2);
#if CONFIG_VP9_HIGHBITDEPTH
if (UUT_->use_highbd_ != 0)
mask_ = (1 << UUT_->use_highbd_) - 1;
else
mask_ = 255;
#endif
/* Set up guard blocks for an inner block centered in the outer block */
for (int i = 0; i < kOutputBufferSize; ++i) {
if (IsIndexInBorder(i))
output_[i] = 255;
else
output_[i] = 0;
}
::libvpx_test::ACMRandom prng;
for (int i = 0; i < kInputBufferSize; ++i) {
if (i & 1) {
input_[i] = 255;
#if CONFIG_VP9_HIGHBITDEPTH
input16_[i] = mask_;
#endif
} else {
input_[i] = prng.Rand8Extremes();
#if CONFIG_VP9_HIGHBITDEPTH
input16_[i] = prng.Rand16() & mask_;
#endif
}
}
}
| 174,505 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int lxc_attach(const char* name, const char* lxcpath, lxc_attach_exec_t exec_function, void* exec_payload, lxc_attach_options_t* options, pid_t* attached_process)
{
int ret, status;
pid_t init_pid, pid, attached_pid, expected;
struct lxc_proc_context_info *init_ctx;
char* cwd;
char* new_cwd;
int ipc_sockets[2];
signed long personality;
if (!options)
options = &attach_static_default_options;
init_pid = lxc_cmd_get_init_pid(name, lxcpath);
if (init_pid < 0) {
ERROR("failed to get the init pid");
return -1;
}
init_ctx = lxc_proc_get_context_info(init_pid);
if (!init_ctx) {
ERROR("failed to get context of the init process, pid = %ld", (long)init_pid);
return -1;
}
personality = get_personality(name, lxcpath);
if (init_ctx->personality < 0) {
ERROR("Failed to get personality of the container");
lxc_proc_put_context_info(init_ctx);
return -1;
}
init_ctx->personality = personality;
if (!fetch_seccomp(name, lxcpath, init_ctx, options))
WARN("Failed to get seccomp policy");
cwd = getcwd(NULL, 0);
/* determine which namespaces the container was created with
* by asking lxc-start, if necessary
*/
if (options->namespaces == -1) {
options->namespaces = lxc_cmd_get_clone_flags(name, lxcpath);
/* call failed */
if (options->namespaces == -1) {
ERROR("failed to automatically determine the "
"namespaces which the container unshared");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
}
/* create a socket pair for IPC communication; set SOCK_CLOEXEC in order
* to make sure we don't irritate other threads that want to fork+exec away
*
* IMPORTANT: if the initial process is multithreaded and another call
* just fork()s away without exec'ing directly after, the socket fd will
* exist in the forked process from the other thread and any close() in
* our own child process will not really cause the socket to close properly,
* potentiall causing the parent to hang.
*
* For this reason, while IPC is still active, we have to use shutdown()
* if the child exits prematurely in order to signal that the socket
* is closed and cannot assume that the child exiting will automatically
* do that.
*
* IPC mechanism: (X is receiver)
* initial process intermediate attached
* X <--- send pid of
* attached proc,
* then exit
* send 0 ------------------------------------> X
* [do initialization]
* X <------------------------------------ send 1
* [add to cgroup, ...]
* send 2 ------------------------------------> X
* close socket close socket
* run program
*/
ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
if (ret < 0) {
SYSERROR("could not set up required IPC mechanism for attaching");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* create intermediate subprocess, three reasons:
* 1. runs all pthread_atfork handlers and the
* child will no longer be threaded
* (we can't properly setns() in a threaded process)
* 2. we can't setns() in the child itself, since
* we want to make sure we are properly attached to
* the pidns
* 3. also, the initial thread has to put the attached
* process into the cgroup, which we can only do if
* we didn't already setns() (otherwise, user
* namespaces will hate us)
*/
pid = fork();
if (pid < 0) {
SYSERROR("failed to create first subprocess");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
if (pid) {
pid_t to_cleanup_pid = pid;
/* initial thread, we close the socket that is for the
* subprocesses
*/
close(ipc_sockets[1]);
free(cwd);
/* attach to cgroup, if requested */
if (options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) {
if (!cgroup_attach(name, lxcpath, pid))
goto cleanup_error;
}
/* Let the child process know to go ahead */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* get pid from intermediate process */
ret = lxc_read_nointr_expect(ipc_sockets[0], &attached_pid, sizeof(attached_pid), NULL);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive pid of attached process");
goto cleanup_error;
}
/* ignore SIGKILL (CTRL-C) and SIGQUIT (CTRL-\) - issue #313 */
if (options->stdin_fd == 0) {
signal(SIGINT, SIG_IGN);
signal(SIGQUIT, SIG_IGN);
}
/* reap intermediate process */
ret = wait_for_pid(pid);
if (ret < 0)
goto cleanup_error;
/* we will always have to reap the grandchild now */
to_cleanup_pid = attached_pid;
/* tell attached process it may start initializing */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* wait for the attached process to finish initializing */
expected = 1;
ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive notification from attached process (1)");
goto cleanup_error;
}
/* tell attached process we're done */
status = 2;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (2)");
goto cleanup_error;
}
/* now shut down communication with child, we're done */
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
lxc_proc_put_context_info(init_ctx);
/* we're done, the child process should now execute whatever
* it is that the user requested. The parent can now track it
* with waitpid() or similar.
*/
*attached_process = attached_pid;
return 0;
cleanup_error:
/* first shut down the socket, then wait for the pid,
* otherwise the pid we're waiting for may never exit
*/
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
if (to_cleanup_pid)
(void) wait_for_pid(to_cleanup_pid);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* first subprocess begins here, we close the socket that is for the
* initial thread
*/
close(ipc_sockets[0]);
/* Wait for the parent to have setup cgroups */
expected = 0;
status = -1;
ret = lxc_read_nointr_expect(ipc_sockets[1], &status, sizeof(status), &expected);
if (ret <= 0) {
ERROR("error communicating with child process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* attach now, create another subprocess later, since pid namespaces
* only really affect the children of the current process
*/
ret = lxc_attach_to_ns(init_pid, options->namespaces);
if (ret < 0) {
ERROR("failed to enter the namespace");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* attach succeeded, try to cwd */
if (options->initial_cwd)
new_cwd = options->initial_cwd;
else
new_cwd = cwd;
ret = chdir(new_cwd);
if (ret < 0)
WARN("could not change directory to '%s'", new_cwd);
free(cwd);
/* now create the real child process */
{
struct attach_clone_payload payload = {
.ipc_socket = ipc_sockets[1],
.options = options,
.init_ctx = init_ctx,
.exec_function = exec_function,
.exec_payload = exec_payload
};
/* We use clone_parent here to make this subprocess a direct child of
* the initial process. Then this intermediate process can exit and
* the parent can directly track the attached process.
*/
pid = lxc_clone(attach_child_main, &payload, CLONE_PARENT);
}
/* shouldn't happen, clone() should always return positive pid */
if (pid <= 0) {
SYSERROR("failed to create subprocess");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* tell grandparent the pid of the pid of the newly created child */
ret = lxc_write_nointr(ipc_sockets[1], &pid, sizeof(pid));
if (ret != sizeof(pid)) {
/* if this really happens here, this is very unfortunate, since the
* parent will not know the pid of the attached process and will
* not be able to wait for it (and we won't either due to CLONE_PARENT)
* so the parent won't be able to reap it and the attached process
* will remain a zombie
*/
ERROR("error using IPC to notify main process of pid of the attached process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* the rest is in the hands of the initial and the attached process */
rexit(0);
}
Commit Message: CVE-2015-1334: Don't use the container's /proc during attach
A user could otherwise over-mount /proc and prevent the apparmor profile
or selinux label from being written which combined with a modified
/bin/sh or other commonly used binary would lead to unconfined code
execution.
Reported-by: Roman Fiedler
Signed-off-by: Stéphane Graber <stgraber@ubuntu.com>
CWE ID: CWE-17 | int lxc_attach(const char* name, const char* lxcpath, lxc_attach_exec_t exec_function, void* exec_payload, lxc_attach_options_t* options, pid_t* attached_process)
{
int ret, status;
pid_t init_pid, pid, attached_pid, expected;
struct lxc_proc_context_info *init_ctx;
char* cwd;
char* new_cwd;
int ipc_sockets[2];
int procfd;
signed long personality;
if (!options)
options = &attach_static_default_options;
init_pid = lxc_cmd_get_init_pid(name, lxcpath);
if (init_pid < 0) {
ERROR("failed to get the init pid");
return -1;
}
init_ctx = lxc_proc_get_context_info(init_pid);
if (!init_ctx) {
ERROR("failed to get context of the init process, pid = %ld", (long)init_pid);
return -1;
}
personality = get_personality(name, lxcpath);
if (init_ctx->personality < 0) {
ERROR("Failed to get personality of the container");
lxc_proc_put_context_info(init_ctx);
return -1;
}
init_ctx->personality = personality;
if (!fetch_seccomp(name, lxcpath, init_ctx, options))
WARN("Failed to get seccomp policy");
cwd = getcwd(NULL, 0);
/* determine which namespaces the container was created with
* by asking lxc-start, if necessary
*/
if (options->namespaces == -1) {
options->namespaces = lxc_cmd_get_clone_flags(name, lxcpath);
/* call failed */
if (options->namespaces == -1) {
ERROR("failed to automatically determine the "
"namespaces which the container unshared");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
}
/* create a socket pair for IPC communication; set SOCK_CLOEXEC in order
* to make sure we don't irritate other threads that want to fork+exec away
*
* IMPORTANT: if the initial process is multithreaded and another call
* just fork()s away without exec'ing directly after, the socket fd will
* exist in the forked process from the other thread and any close() in
* our own child process will not really cause the socket to close properly,
* potentiall causing the parent to hang.
*
* For this reason, while IPC is still active, we have to use shutdown()
* if the child exits prematurely in order to signal that the socket
* is closed and cannot assume that the child exiting will automatically
* do that.
*
* IPC mechanism: (X is receiver)
* initial process intermediate attached
* X <--- send pid of
* attached proc,
* then exit
* send 0 ------------------------------------> X
* [do initialization]
* X <------------------------------------ send 1
* [add to cgroup, ...]
* send 2 ------------------------------------> X
* close socket close socket
* run program
*/
ret = socketpair(PF_LOCAL, SOCK_STREAM | SOCK_CLOEXEC, 0, ipc_sockets);
if (ret < 0) {
SYSERROR("could not set up required IPC mechanism for attaching");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* create intermediate subprocess, three reasons:
* 1. runs all pthread_atfork handlers and the
* child will no longer be threaded
* (we can't properly setns() in a threaded process)
* 2. we can't setns() in the child itself, since
* we want to make sure we are properly attached to
* the pidns
* 3. also, the initial thread has to put the attached
* process into the cgroup, which we can only do if
* we didn't already setns() (otherwise, user
* namespaces will hate us)
*/
pid = fork();
if (pid < 0) {
SYSERROR("failed to create first subprocess");
free(cwd);
lxc_proc_put_context_info(init_ctx);
return -1;
}
if (pid) {
pid_t to_cleanup_pid = pid;
/* initial thread, we close the socket that is for the
* subprocesses
*/
close(ipc_sockets[1]);
free(cwd);
/* attach to cgroup, if requested */
if (options->attach_flags & LXC_ATTACH_MOVE_TO_CGROUP) {
if (!cgroup_attach(name, lxcpath, pid))
goto cleanup_error;
}
/* Let the child process know to go ahead */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* get pid from intermediate process */
ret = lxc_read_nointr_expect(ipc_sockets[0], &attached_pid, sizeof(attached_pid), NULL);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive pid of attached process");
goto cleanup_error;
}
/* ignore SIGKILL (CTRL-C) and SIGQUIT (CTRL-\) - issue #313 */
if (options->stdin_fd == 0) {
signal(SIGINT, SIG_IGN);
signal(SIGQUIT, SIG_IGN);
}
/* reap intermediate process */
ret = wait_for_pid(pid);
if (ret < 0)
goto cleanup_error;
/* we will always have to reap the grandchild now */
to_cleanup_pid = attached_pid;
/* tell attached process it may start initializing */
status = 0;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (0)");
goto cleanup_error;
}
/* wait for the attached process to finish initializing */
expected = 1;
ret = lxc_read_nointr_expect(ipc_sockets[0], &status, sizeof(status), &expected);
if (ret <= 0) {
if (ret != 0)
ERROR("error using IPC to receive notification from attached process (1)");
goto cleanup_error;
}
/* tell attached process we're done */
status = 2;
ret = lxc_write_nointr(ipc_sockets[0], &status, sizeof(status));
if (ret <= 0) {
ERROR("error using IPC to notify attached process for initialization (2)");
goto cleanup_error;
}
/* now shut down communication with child, we're done */
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
lxc_proc_put_context_info(init_ctx);
/* we're done, the child process should now execute whatever
* it is that the user requested. The parent can now track it
* with waitpid() or similar.
*/
*attached_process = attached_pid;
return 0;
cleanup_error:
/* first shut down the socket, then wait for the pid,
* otherwise the pid we're waiting for may never exit
*/
shutdown(ipc_sockets[0], SHUT_RDWR);
close(ipc_sockets[0]);
if (to_cleanup_pid)
(void) wait_for_pid(to_cleanup_pid);
lxc_proc_put_context_info(init_ctx);
return -1;
}
/* first subprocess begins here, we close the socket that is for the
* initial thread
*/
close(ipc_sockets[0]);
/* Wait for the parent to have setup cgroups */
expected = 0;
status = -1;
ret = lxc_read_nointr_expect(ipc_sockets[1], &status, sizeof(status), &expected);
if (ret <= 0) {
ERROR("error communicating with child process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
procfd = open("/proc", O_DIRECTORY | O_RDONLY);
if (procfd < 0) {
SYSERROR("Unable to open /proc");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* attach now, create another subprocess later, since pid namespaces
* only really affect the children of the current process
*/
ret = lxc_attach_to_ns(init_pid, options->namespaces);
if (ret < 0) {
ERROR("failed to enter the namespace");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* attach succeeded, try to cwd */
if (options->initial_cwd)
new_cwd = options->initial_cwd;
else
new_cwd = cwd;
ret = chdir(new_cwd);
if (ret < 0)
WARN("could not change directory to '%s'", new_cwd);
free(cwd);
/* now create the real child process */
{
struct attach_clone_payload payload = {
.ipc_socket = ipc_sockets[1],
.options = options,
.init_ctx = init_ctx,
.exec_function = exec_function,
.exec_payload = exec_payload,
.procfd = procfd
};
/* We use clone_parent here to make this subprocess a direct child of
* the initial process. Then this intermediate process can exit and
* the parent can directly track the attached process.
*/
pid = lxc_clone(attach_child_main, &payload, CLONE_PARENT);
}
/* shouldn't happen, clone() should always return positive pid */
if (pid <= 0) {
SYSERROR("failed to create subprocess");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* tell grandparent the pid of the pid of the newly created child */
ret = lxc_write_nointr(ipc_sockets[1], &pid, sizeof(pid));
if (ret != sizeof(pid)) {
/* if this really happens here, this is very unfortunate, since the
* parent will not know the pid of the attached process and will
* not be able to wait for it (and we won't either due to CLONE_PARENT)
* so the parent won't be able to reap it and the attached process
* will remain a zombie
*/
ERROR("error using IPC to notify main process of pid of the attached process");
shutdown(ipc_sockets[1], SHUT_RDWR);
rexit(-1);
}
/* the rest is in the hands of the initial and the attached process */
rexit(0);
}
| 166,724 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void JBIG2Stream::readSegments() {
Guint segNum, segFlags, segType, page, segLength;
Guint refFlags, nRefSegs;
Guint *refSegs;
Goffset segDataPos;
int c1, c2, c3;
Guint i;
while (readULong(&segNum)) {
if (!readUByte(&segFlags)) {
goto eofError1;
}
segType = segFlags & 0x3f;
if (!readUByte(&refFlags)) {
goto eofError1;
}
nRefSegs = refFlags >> 5;
if (nRefSegs == 7) {
if ((c1 = curStr->getChar()) == EOF ||
(c2 = curStr->getChar()) == EOF ||
(c3 = curStr->getChar()) == EOF) {
goto eofError1;
}
refFlags = (refFlags << 24) | (c1 << 16) | (c2 << 8) | c3;
nRefSegs = refFlags & 0x1fffffff;
for (i = 0; i < (nRefSegs + 9) >> 3; ++i) {
if ((c1 = curStr->getChar()) == EOF) {
goto eofError1;
}
}
}
refSegs = (Guint *)gmallocn(nRefSegs, sizeof(Guint));
if (segNum <= 256) {
for (i = 0; i < nRefSegs; ++i) {
if (!readUByte(&refSegs[i])) {
goto eofError2;
}
}
} else if (segNum <= 65536) {
for (i = 0; i < nRefSegs; ++i) {
if (!readUWord(&refSegs[i])) {
goto eofError2;
}
}
} else {
for (i = 0; i < nRefSegs; ++i) {
if (!readULong(&refSegs[i])) {
goto eofError2;
}
}
}
if (segFlags & 0x40) {
if (!readULong(&page)) {
goto eofError2;
}
} else {
if (!readUByte(&page)) {
goto eofError2;
}
}
if (!readULong(&segLength)) {
goto eofError2;
}
segDataPos = curStr->getPos();
if (!pageBitmap && ((segType >= 4 && segType <= 7) ||
(segType >= 20 && segType <= 43))) {
error(errSyntaxError, curStr->getPos(), "First JBIG2 segment associated with a page must be a page information segment");
goto syntaxError;
}
switch (segType) {
case 0:
if (!readSymbolDictSeg(segNum, segLength, refSegs, nRefSegs)) {
goto syntaxError;
}
break;
case 4:
readTextRegionSeg(segNum, gFalse, gFalse, segLength, refSegs, nRefSegs);
break;
case 6:
readTextRegionSeg(segNum, gTrue, gFalse, segLength, refSegs, nRefSegs);
break;
case 7:
readTextRegionSeg(segNum, gTrue, gTrue, segLength, refSegs, nRefSegs);
break;
case 16:
readPatternDictSeg(segNum, segLength);
break;
case 20:
readHalftoneRegionSeg(segNum, gFalse, gFalse, segLength,
refSegs, nRefSegs);
break;
case 22:
readHalftoneRegionSeg(segNum, gTrue, gFalse, segLength,
refSegs, nRefSegs);
break;
case 23:
readHalftoneRegionSeg(segNum, gTrue, gTrue, segLength,
refSegs, nRefSegs);
break;
case 36:
readGenericRegionSeg(segNum, gFalse, gFalse, segLength);
break;
case 38:
readGenericRegionSeg(segNum, gTrue, gFalse, segLength);
break;
case 39:
readGenericRegionSeg(segNum, gTrue, gTrue, segLength);
break;
case 40:
readGenericRefinementRegionSeg(segNum, gFalse, gFalse, segLength,
refSegs, nRefSegs);
break;
case 42:
readGenericRefinementRegionSeg(segNum, gTrue, gFalse, segLength,
refSegs, nRefSegs);
break;
case 43:
readGenericRefinementRegionSeg(segNum, gTrue, gTrue, segLength,
refSegs, nRefSegs);
break;
case 48:
readPageInfoSeg(segLength);
break;
case 50:
readEndOfStripeSeg(segLength);
break;
case 52:
readProfilesSeg(segLength);
break;
case 53:
readCodeTableSeg(segNum, segLength);
break;
case 62:
readExtensionSeg(segLength);
break;
default:
error(errSyntaxError, curStr->getPos(), "Unknown segment type in JBIG2 stream");
for (i = 0; i < segLength; ++i) {
if ((c1 = curStr->getChar()) == EOF) {
goto eofError2;
}
}
break;
}
if (segLength != 0xffffffff) {
Goffset segExtraBytes = segDataPos + segLength - curStr->getPos();
if (segExtraBytes > 0) {
error(errSyntaxError, curStr->getPos(), "{0:d} extraneous byte{1:s} after segment",
segExtraBytes, (segExtraBytes > 1) ? "s" : "");
int trash;
for (Goffset i = segExtraBytes; i > 0; i--) {
readByte(&trash);
}
} else if (segExtraBytes < 0) {
error(errSyntaxError, curStr->getPos(), "Previous segment handler read too many bytes");
}
}
gfree(refSegs);
}
return;
syntaxError:
gfree(refSegs);
return;
eofError2:
gfree(refSegs);
eofError1:
error(errSyntaxError, curStr->getPos(), "Unexpected EOF in JBIG2 stream");
}
Commit Message:
CWE ID: CWE-119 | void JBIG2Stream::readSegments() {
Guint segNum, segFlags, segType, page, segLength;
Guint refFlags, nRefSegs;
Guint *refSegs;
Goffset segDataPos;
int c1, c2, c3;
Guint i;
while (readULong(&segNum)) {
if (!readUByte(&segFlags)) {
goto eofError1;
}
segType = segFlags & 0x3f;
if (!readUByte(&refFlags)) {
goto eofError1;
}
nRefSegs = refFlags >> 5;
if (nRefSegs == 7) {
if ((c1 = curStr->getChar()) == EOF ||
(c2 = curStr->getChar()) == EOF ||
(c3 = curStr->getChar()) == EOF) {
goto eofError1;
}
refFlags = (refFlags << 24) | (c1 << 16) | (c2 << 8) | c3;
nRefSegs = refFlags & 0x1fffffff;
for (i = 0; i < (nRefSegs + 9) >> 3; ++i) {
if ((c1 = curStr->getChar()) == EOF) {
goto eofError1;
}
}
}
refSegs = (Guint *)gmallocn(nRefSegs, sizeof(Guint));
if (segNum <= 256) {
for (i = 0; i < nRefSegs; ++i) {
if (!readUByte(&refSegs[i])) {
goto eofError2;
}
}
} else if (segNum <= 65536) {
for (i = 0; i < nRefSegs; ++i) {
if (!readUWord(&refSegs[i])) {
goto eofError2;
}
}
} else {
for (i = 0; i < nRefSegs; ++i) {
if (!readULong(&refSegs[i])) {
goto eofError2;
}
}
}
if (segFlags & 0x40) {
if (!readULong(&page)) {
goto eofError2;
}
} else {
if (!readUByte(&page)) {
goto eofError2;
}
}
if (!readULong(&segLength)) {
goto eofError2;
}
segDataPos = curStr->getPos();
if (!pageBitmap && ((segType >= 4 && segType <= 7) ||
(segType >= 20 && segType <= 43))) {
error(errSyntaxError, curStr->getPos(), "First JBIG2 segment associated with a page must be a page information segment");
goto syntaxError;
}
switch (segType) {
case 0:
if (!readSymbolDictSeg(segNum, segLength, refSegs, nRefSegs)) {
goto syntaxError;
}
break;
case 4:
readTextRegionSeg(segNum, gFalse, gFalse, segLength, refSegs, nRefSegs);
break;
case 6:
readTextRegionSeg(segNum, gTrue, gFalse, segLength, refSegs, nRefSegs);
break;
case 7:
readTextRegionSeg(segNum, gTrue, gTrue, segLength, refSegs, nRefSegs);
break;
case 16:
readPatternDictSeg(segNum, segLength);
break;
case 20:
readHalftoneRegionSeg(segNum, gFalse, gFalse, segLength,
refSegs, nRefSegs);
break;
case 22:
readHalftoneRegionSeg(segNum, gTrue, gFalse, segLength,
refSegs, nRefSegs);
break;
case 23:
readHalftoneRegionSeg(segNum, gTrue, gTrue, segLength,
refSegs, nRefSegs);
break;
case 36:
readGenericRegionSeg(segNum, gFalse, gFalse, segLength);
break;
case 38:
readGenericRegionSeg(segNum, gTrue, gFalse, segLength);
break;
case 39:
readGenericRegionSeg(segNum, gTrue, gTrue, segLength);
break;
case 40:
readGenericRefinementRegionSeg(segNum, gFalse, gFalse, segLength,
refSegs, nRefSegs);
break;
case 42:
readGenericRefinementRegionSeg(segNum, gTrue, gFalse, segLength,
refSegs, nRefSegs);
break;
case 43:
readGenericRefinementRegionSeg(segNum, gTrue, gTrue, segLength,
refSegs, nRefSegs);
break;
case 48:
readPageInfoSeg(segLength);
break;
case 50:
readEndOfStripeSeg(segLength);
break;
case 52:
readProfilesSeg(segLength);
break;
case 53:
readCodeTableSeg(segNum, segLength);
break;
case 62:
readExtensionSeg(segLength);
break;
default:
error(errSyntaxError, curStr->getPos(), "Unknown segment type in JBIG2 stream");
for (i = 0; i < segLength; ++i) {
if ((c1 = curStr->getChar()) == EOF) {
goto eofError2;
}
}
break;
}
if (segLength != 0xffffffff) {
Goffset segExtraBytes = segDataPos + segLength - curStr->getPos();
if (segExtraBytes > 0) {
error(errSyntaxError, curStr->getPos(), "{0:lld} extraneous byte{1:s} after segment",
segExtraBytes, (segExtraBytes > 1) ? "s" : "");
int trash;
for (Goffset i = segExtraBytes; i > 0; i--) {
readByte(&trash);
}
} else if (segExtraBytes < 0) {
error(errSyntaxError, curStr->getPos(), "Previous segment handler read too many bytes");
}
}
gfree(refSegs);
}
return;
syntaxError:
gfree(refSegs);
return;
eofError2:
gfree(refSegs);
eofError1:
error(errSyntaxError, curStr->getPos(), "Unexpected EOF in JBIG2 stream");
}
| 165,299 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static struct rds_connection *__rds_conn_create(struct net *net,
__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp,
int is_outgoing)
{
struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
struct rds_transport *loop_trans;
unsigned long flags;
int ret;
struct rds_transport *otrans = trans;
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
goto new_conn;
rcu_read_lock();
conn = rds_conn_lookup(net, head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
* can stick the other QP. */
parent = conn;
conn = parent->c_passive;
}
rcu_read_unlock();
if (conn)
goto out;
new_conn:
conn = kmem_cache_zalloc(rds_conn_slab, gfp);
if (!conn) {
conn = ERR_PTR(-ENOMEM);
goto out;
}
INIT_HLIST_NODE(&conn->c_hash_node);
conn->c_laddr = laddr;
conn->c_faddr = faddr;
spin_lock_init(&conn->c_lock);
conn->c_next_tx_seq = 1;
rds_conn_net_set(conn, net);
init_waitqueue_head(&conn->c_waitq);
INIT_LIST_HEAD(&conn->c_send_queue);
INIT_LIST_HEAD(&conn->c_retrans);
ret = rds_cong_get_maps(conn);
if (ret) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
/*
* This is where a connection becomes loopback. If *any* RDS sockets
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
loop_trans = rds_trans_get_preferred(net, faddr);
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
if (is_outgoing && trans->t_prefer_loopback) {
/* "outgoing" connection - and the transport
* says it wants the connection handled by the
* loopback transport. This is what TCP does.
*/
trans = &rds_loop_transport;
}
}
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
if (ret) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
atomic_set(&conn->c_state, RDS_CONN_DOWN);
conn->c_send_gen = 0;
conn->c_reconnect_jiffies = 0;
INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
mutex_init(&conn->c_cm_lock);
conn->c_flags = 0;
rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
conn, &laddr, &faddr,
trans->t_name ? trans->t_name : "[unknown]",
is_outgoing ? "(outgoing)" : "");
/*
* Since we ran without holding the conn lock, someone could
* have created the same conn (either normal or passive) in the
* interim. We check while holding the lock. If we won, we complete
* init and return our conn. If we lost, we rollback and return the
* other one.
*/
spin_lock_irqsave(&rds_conn_lock, flags);
if (parent) {
/* Creating passive conn */
if (parent->c_passive) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = parent->c_passive;
} else {
parent->c_passive = conn;
rds_cong_add_conn(conn);
rds_conn_count++;
}
} else {
/* Creating normal conn */
struct rds_connection *found;
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
found = NULL;
else
found = rds_conn_lookup(net, head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
(otrans->t_type != RDS_TRANS_TCP)) {
/* Only the active side should be added to
* reconnect list for TCP.
*/
hlist_add_head_rcu(&conn->c_hash_node, head);
}
rds_cong_add_conn(conn);
rds_conn_count++;
}
}
spin_unlock_irqrestore(&rds_conn_lock, flags);
out:
return conn;
}
Commit Message: RDS: verify the underlying transport exists before creating a connection
There was no verification that an underlying transport exists when creating
a connection, this would cause dereferencing a NULL ptr.
It might happen on sockets that weren't properly bound before attempting to
send a message, which will cause a NULL ptr deref:
[135546.047719] kasan: GPF could be caused by NULL-ptr deref or user memory accessgeneral protection fault: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC KASAN
[135546.051270] Modules linked in:
[135546.051781] CPU: 4 PID: 15650 Comm: trinity-c4 Not tainted 4.2.0-next-20150902-sasha-00041-gbaa1222-dirty #2527
[135546.053217] task: ffff8800835bc000 ti: ffff8800bc708000 task.ti: ffff8800bc708000
[135546.054291] RIP: __rds_conn_create (net/rds/connection.c:194)
[135546.055666] RSP: 0018:ffff8800bc70fab0 EFLAGS: 00010202
[135546.056457] RAX: dffffc0000000000 RBX: 0000000000000f2c RCX: ffff8800835bc000
[135546.057494] RDX: 0000000000000007 RSI: ffff8800835bccd8 RDI: 0000000000000038
[135546.058530] RBP: ffff8800bc70fb18 R08: 0000000000000001 R09: 0000000000000000
[135546.059556] R10: ffffed014d7a3a23 R11: ffffed014d7a3a21 R12: 0000000000000000
[135546.060614] R13: 0000000000000001 R14: ffff8801ec3d0000 R15: 0000000000000000
[135546.061668] FS: 00007faad4ffb700(0000) GS:ffff880252000000(0000) knlGS:0000000000000000
[135546.062836] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[135546.063682] CR2: 000000000000846a CR3: 000000009d137000 CR4: 00000000000006a0
[135546.064723] Stack:
[135546.065048] ffffffffafe2055c ffffffffafe23fc1 ffffed00493097bf ffff8801ec3d0008
[135546.066247] 0000000000000000 00000000000000d0 0000000000000000 ac194a24c0586342
[135546.067438] 1ffff100178e1f78 ffff880320581b00 ffff8800bc70fdd0 ffff880320581b00
[135546.068629] Call Trace:
[135546.069028] ? __rds_conn_create (include/linux/rcupdate.h:856 net/rds/connection.c:134)
[135546.069989] ? rds_message_copy_from_user (net/rds/message.c:298)
[135546.071021] rds_conn_create_outgoing (net/rds/connection.c:278)
[135546.071981] rds_sendmsg (net/rds/send.c:1058)
[135546.072858] ? perf_trace_lock (include/trace/events/lock.h:38)
[135546.073744] ? lockdep_init (kernel/locking/lockdep.c:3298)
[135546.074577] ? rds_send_drop_to (net/rds/send.c:976)
[135546.075508] ? __might_fault (./arch/x86/include/asm/current.h:14 mm/memory.c:3795)
[135546.076349] ? __might_fault (mm/memory.c:3795)
[135546.077179] ? rds_send_drop_to (net/rds/send.c:976)
[135546.078114] sock_sendmsg (net/socket.c:611 net/socket.c:620)
[135546.078856] SYSC_sendto (net/socket.c:1657)
[135546.079596] ? SYSC_connect (net/socket.c:1628)
[135546.080510] ? trace_dump_stack (kernel/trace/trace.c:1926)
[135546.081397] ? ring_buffer_unlock_commit (kernel/trace/ring_buffer.c:2479 kernel/trace/ring_buffer.c:2558 kernel/trace/ring_buffer.c:2674)
[135546.082390] ? trace_buffer_unlock_commit (kernel/trace/trace.c:1749)
[135546.083410] ? trace_event_raw_event_sys_enter (include/trace/events/syscalls.h:16)
[135546.084481] ? do_audit_syscall_entry (include/trace/events/syscalls.h:16)
[135546.085438] ? trace_buffer_unlock_commit (kernel/trace/trace.c:1749)
[135546.085515] rds_ib_laddr_check(): addr 36.74.25.172 ret -99 node type -1
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: | static struct rds_connection *__rds_conn_create(struct net *net,
__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp,
int is_outgoing)
{
struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
struct rds_transport *loop_trans;
unsigned long flags;
int ret;
struct rds_transport *otrans = trans;
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
goto new_conn;
rcu_read_lock();
conn = rds_conn_lookup(net, head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
laddr == faddr && !is_outgoing) {
/* This is a looped back IB connection, and we're
* called by the code handling the incoming connect.
* We need a second connection object into which we
* can stick the other QP. */
parent = conn;
conn = parent->c_passive;
}
rcu_read_unlock();
if (conn)
goto out;
new_conn:
conn = kmem_cache_zalloc(rds_conn_slab, gfp);
if (!conn) {
conn = ERR_PTR(-ENOMEM);
goto out;
}
INIT_HLIST_NODE(&conn->c_hash_node);
conn->c_laddr = laddr;
conn->c_faddr = faddr;
spin_lock_init(&conn->c_lock);
conn->c_next_tx_seq = 1;
rds_conn_net_set(conn, net);
init_waitqueue_head(&conn->c_waitq);
INIT_LIST_HEAD(&conn->c_send_queue);
INIT_LIST_HEAD(&conn->c_retrans);
ret = rds_cong_get_maps(conn);
if (ret) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
/*
* This is where a connection becomes loopback. If *any* RDS sockets
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
loop_trans = rds_trans_get_preferred(net, faddr);
if (loop_trans) {
rds_trans_put(loop_trans);
conn->c_loopback = 1;
if (is_outgoing && trans->t_prefer_loopback) {
/* "outgoing" connection - and the transport
* says it wants the connection handled by the
* loopback transport. This is what TCP does.
*/
trans = &rds_loop_transport;
}
}
if (trans == NULL) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(-ENODEV);
goto out;
}
conn->c_trans = trans;
ret = trans->conn_alloc(conn, gfp);
if (ret) {
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
goto out;
}
atomic_set(&conn->c_state, RDS_CONN_DOWN);
conn->c_send_gen = 0;
conn->c_reconnect_jiffies = 0;
INIT_DELAYED_WORK(&conn->c_send_w, rds_send_worker);
INIT_DELAYED_WORK(&conn->c_recv_w, rds_recv_worker);
INIT_DELAYED_WORK(&conn->c_conn_w, rds_connect_worker);
INIT_WORK(&conn->c_down_w, rds_shutdown_worker);
mutex_init(&conn->c_cm_lock);
conn->c_flags = 0;
rdsdebug("allocated conn %p for %pI4 -> %pI4 over %s %s\n",
conn, &laddr, &faddr,
trans->t_name ? trans->t_name : "[unknown]",
is_outgoing ? "(outgoing)" : "");
/*
* Since we ran without holding the conn lock, someone could
* have created the same conn (either normal or passive) in the
* interim. We check while holding the lock. If we won, we complete
* init and return our conn. If we lost, we rollback and return the
* other one.
*/
spin_lock_irqsave(&rds_conn_lock, flags);
if (parent) {
/* Creating passive conn */
if (parent->c_passive) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = parent->c_passive;
} else {
parent->c_passive = conn;
rds_cong_add_conn(conn);
rds_conn_count++;
}
} else {
/* Creating normal conn */
struct rds_connection *found;
if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
found = NULL;
else
found = rds_conn_lookup(net, head, laddr, faddr, trans);
if (found) {
trans->conn_free(conn->c_transport_data);
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
(otrans->t_type != RDS_TRANS_TCP)) {
/* Only the active side should be added to
* reconnect list for TCP.
*/
hlist_add_head_rcu(&conn->c_hash_node, head);
}
rds_cong_add_conn(conn);
rds_conn_count++;
}
}
spin_unlock_irqrestore(&rds_conn_lock, flags);
out:
return conn;
}
| 166,583 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long Cluster::CreateBlock(long long id,
long long pos, // absolute pos of payload
long long size, long long discard_padding) {
assert((id == 0x20) || (id == 0x23)); // BlockGroup or SimpleBlock
if (m_entries_count < 0) { // haven't parsed anything yet
assert(m_entries == NULL);
assert(m_entries_size == 0);
m_entries_size = 1024;
m_entries = new BlockEntry* [m_entries_size];
m_entries_count = 0;
} else {
assert(m_entries);
assert(m_entries_size > 0);
assert(m_entries_count <= m_entries_size);
if (m_entries_count >= m_entries_size) {
const long entries_size = 2 * m_entries_size;
BlockEntry** const entries = new BlockEntry* [entries_size];
assert(entries);
BlockEntry** src = m_entries;
BlockEntry** const src_end = src + m_entries_count;
BlockEntry** dst = entries;
while (src != src_end)
*dst++ = *src++;
delete[] m_entries;
m_entries = entries;
m_entries_size = entries_size;
}
}
if (id == 0x20) // BlockGroup ID
return CreateBlockGroup(pos, size, discard_padding);
else // SimpleBlock ID
return CreateSimpleBlock(pos, size);
}
Commit Message: external/libvpx/libwebm: Update snapshot
Update libwebm snapshot. This update contains security fixes from upstream.
Upstream git hash: 229f49347d19b0ca0941e072b199a242ef6c5f2b
BUG=23167726
Change-Id: Id3e140e7b31ae11294724b1ecfe2e9c83b4d4207
(cherry picked from commit d0281a15b3c6bd91756e453cc9398c5ef412d99a)
CWE ID: CWE-20 | long Cluster::CreateBlock(long long id,
long long pos, // absolute pos of payload
long long size, long long discard_padding) {
assert((id == 0x20) || (id == 0x23)); // BlockGroup or SimpleBlock
if (m_entries_count < 0) { // haven't parsed anything yet
assert(m_entries == NULL);
assert(m_entries_size == 0);
m_entries_size = 1024;
m_entries = new (std::nothrow) BlockEntry*[m_entries_size];
if (m_entries == NULL)
return -1;
m_entries_count = 0;
} else {
assert(m_entries);
assert(m_entries_size > 0);
assert(m_entries_count <= m_entries_size);
if (m_entries_count >= m_entries_size) {
const long entries_size = 2 * m_entries_size;
BlockEntry** const entries = new (std::nothrow) BlockEntry*[entries_size];
if (entries == NULL)
return -1;
BlockEntry** src = m_entries;
BlockEntry** const src_end = src + m_entries_count;
BlockEntry** dst = entries;
while (src != src_end)
*dst++ = *src++;
delete[] m_entries;
m_entries = entries;
m_entries_size = entries_size;
}
}
if (id == 0x20) // BlockGroup ID
return CreateBlockGroup(pos, size, discard_padding);
else // SimpleBlock ID
return CreateSimpleBlock(pos, size);
}
| 173,805 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t infilesize, total_samples;
DFFFileHeader dff_file_header;
DFFChunkHeader dff_chunk_header;
uint32_t bcount;
infilesize = DoGetFileSize (infile);
memcpy (&dff_file_header, fourcc, 4);
if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) ||
bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) {
error_line ("%s is not a valid .DFF file (by total size)!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("file header indicated length = %lld", dff_file_header.ckDataSize);
#endif
while (1) {
if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) ||
bcount != sizeof (DFFChunkHeader)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (debug_logging_mode)
error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize);
if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) {
uint32_t version;
if (dff_chunk_header.ckDataSize != sizeof (version) ||
!DoReadFile (infile, &version, sizeof (version), &bcount) ||
bcount != sizeof (version)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &version, sizeof (version))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&version, "L");
if (debug_logging_mode)
error_line ("dsdiff file version = 0x%08x", version);
}
else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) {
char *prop_chunk;
if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize);
prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) ||
bcount != dff_chunk_header.ckDataSize) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
if (!strncmp (prop_chunk, "SND ", 4)) {
char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize;
uint16_t numChannels, chansSpecified, chanMask = 0;
uint32_t sampleRate;
while (eptr - cptr >= sizeof (dff_chunk_header)) {
memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header));
cptr += sizeof (dff_chunk_header);
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (dff_chunk_header.ckDataSize > 0 && dff_chunk_header.ckDataSize <= eptr - cptr) {
if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) {
memcpy (&sampleRate, cptr, sizeof (sampleRate));
WavpackBigEndianToNative (&sampleRate, "L");
cptr += dff_chunk_header.ckDataSize;
if (debug_logging_mode)
error_line ("got sample rate of %u Hz", sampleRate);
}
else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) {
memcpy (&numChannels, cptr, sizeof (numChannels));
WavpackBigEndianToNative (&numChannels, "S");
cptr += sizeof (numChannels);
chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4;
if (numChannels < chansSpecified || numChannels < 1) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
while (chansSpecified--) {
if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4))
chanMask |= 0x1;
else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4))
chanMask |= 0x2;
else if (!strncmp (cptr, "LS ", 4))
chanMask |= 0x10;
else if (!strncmp (cptr, "RS ", 4))
chanMask |= 0x20;
else if (!strncmp (cptr, "C ", 4))
chanMask |= 0x4;
else if (!strncmp (cptr, "LFE ", 4))
chanMask |= 0x8;
else
if (debug_logging_mode)
error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]);
cptr += 4;
}
if (debug_logging_mode)
error_line ("%d channels, mask = 0x%08x", numChannels, chanMask);
}
else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) {
if (strncmp (cptr, "DSD ", 4)) {
error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!",
cptr [0], cptr [1], cptr [2], cptr [3]);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
cptr += dff_chunk_header.ckDataSize;
}
else {
if (debug_logging_mode)
error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0],
dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
cptr += dff_chunk_header.ckDataSize;
}
}
else {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
}
if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this DSDIFF file already has channel order information!");
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (chanMask)
config->channel_mask = chanMask;
config->bits_per_sample = 8;
config->bytes_per_sample = 1;
config->num_channels = numChannels;
config->sample_rate = sampleRate / 8;
config->qmode |= QMODE_DSD_MSB_FIRST;
}
else if (debug_logging_mode)
error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes",
prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize);
free (prop_chunk);
}
else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) {
total_samples = dff_chunk_header.ckDataSize / config->num_channels;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1);
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2],
dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (debug_logging_mode)
error_line ("setting configuration with %lld samples", total_samples);
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
}
Commit Message: issue #65: make sure DSDIFF files have a valid channel count
CWE ID: CWE-369 | int ParseDsdiffHeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t infilesize, total_samples;
DFFFileHeader dff_file_header;
DFFChunkHeader dff_chunk_header;
uint32_t bcount;
infilesize = DoGetFileSize (infile);
memcpy (&dff_file_header, fourcc, 4);
if ((!DoReadFile (infile, ((char *) &dff_file_header) + 4, sizeof (DFFFileHeader) - 4, &bcount) ||
bcount != sizeof (DFFFileHeader) - 4) || strncmp (dff_file_header.formType, "DSD ", 4)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_file_header, sizeof (DFFFileHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackBigEndianToNative (&dff_file_header, DFFFileHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
dff_file_header.ckDataSize && dff_file_header.ckDataSize + 1 && dff_file_header.ckDataSize + 12 != infilesize) {
error_line ("%s is not a valid .DFF file (by total size)!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("file header indicated length = %lld", dff_file_header.ckDataSize);
#endif
while (1) {
if (!DoReadFile (infile, &dff_chunk_header, sizeof (DFFChunkHeader), &bcount) ||
bcount != sizeof (DFFChunkHeader)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &dff_chunk_header, sizeof (DFFChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (debug_logging_mode)
error_line ("chunk header indicated length = %lld", dff_chunk_header.ckDataSize);
if (!strncmp (dff_chunk_header.ckID, "FVER", 4)) {
uint32_t version;
if (dff_chunk_header.ckDataSize != sizeof (version) ||
!DoReadFile (infile, &version, sizeof (version), &bcount) ||
bcount != sizeof (version)) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &version, sizeof (version))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackBigEndianToNative (&version, "L");
if (debug_logging_mode)
error_line ("dsdiff file version = 0x%08x", version);
}
else if (!strncmp (dff_chunk_header.ckID, "PROP", 4)) {
char *prop_chunk;
if (dff_chunk_header.ckDataSize < 4 || dff_chunk_header.ckDataSize > 1024) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (debug_logging_mode)
error_line ("got PROP chunk of %d bytes total", (int) dff_chunk_header.ckDataSize);
prop_chunk = malloc ((size_t) dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize, &bcount) ||
bcount != dff_chunk_header.ckDataSize) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, prop_chunk, (uint32_t) dff_chunk_header.ckDataSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
if (!strncmp (prop_chunk, "SND ", 4)) {
char *cptr = prop_chunk + 4, *eptr = prop_chunk + dff_chunk_header.ckDataSize;
uint16_t numChannels = 0, chansSpecified, chanMask = 0;
uint32_t sampleRate;
while (eptr - cptr >= sizeof (dff_chunk_header)) {
memcpy (&dff_chunk_header, cptr, sizeof (dff_chunk_header));
cptr += sizeof (dff_chunk_header);
WavpackBigEndianToNative (&dff_chunk_header, DFFChunkHeaderFormat);
if (dff_chunk_header.ckDataSize > 0 && dff_chunk_header.ckDataSize <= eptr - cptr) {
if (!strncmp (dff_chunk_header.ckID, "FS ", 4) && dff_chunk_header.ckDataSize == 4) {
memcpy (&sampleRate, cptr, sizeof (sampleRate));
WavpackBigEndianToNative (&sampleRate, "L");
cptr += dff_chunk_header.ckDataSize;
if (debug_logging_mode)
error_line ("got sample rate of %u Hz", sampleRate);
}
else if (!strncmp (dff_chunk_header.ckID, "CHNL", 4) && dff_chunk_header.ckDataSize >= 2) {
memcpy (&numChannels, cptr, sizeof (numChannels));
WavpackBigEndianToNative (&numChannels, "S");
cptr += sizeof (numChannels);
chansSpecified = (int)(dff_chunk_header.ckDataSize - sizeof (numChannels)) / 4;
if (numChannels < chansSpecified || numChannels < 1 || numChannels > 256) {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
while (chansSpecified--) {
if (!strncmp (cptr, "SLFT", 4) || !strncmp (cptr, "MLFT", 4))
chanMask |= 0x1;
else if (!strncmp (cptr, "SRGT", 4) || !strncmp (cptr, "MRGT", 4))
chanMask |= 0x2;
else if (!strncmp (cptr, "LS ", 4))
chanMask |= 0x10;
else if (!strncmp (cptr, "RS ", 4))
chanMask |= 0x20;
else if (!strncmp (cptr, "C ", 4))
chanMask |= 0x4;
else if (!strncmp (cptr, "LFE ", 4))
chanMask |= 0x8;
else
if (debug_logging_mode)
error_line ("undefined channel ID %c%c%c%c", cptr [0], cptr [1], cptr [2], cptr [3]);
cptr += 4;
}
if (debug_logging_mode)
error_line ("%d channels, mask = 0x%08x", numChannels, chanMask);
}
else if (!strncmp (dff_chunk_header.ckID, "CMPR", 4) && dff_chunk_header.ckDataSize >= 4) {
if (strncmp (cptr, "DSD ", 4)) {
error_line ("DSDIFF files must be uncompressed, not \"%c%c%c%c\"!",
cptr [0], cptr [1], cptr [2], cptr [3]);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
cptr += dff_chunk_header.ckDataSize;
}
else {
if (debug_logging_mode)
error_line ("got PROP/SND chunk type \"%c%c%c%c\" of %d bytes", dff_chunk_header.ckID [0],
dff_chunk_header.ckID [1], dff_chunk_header.ckID [2], dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
cptr += dff_chunk_header.ckDataSize;
}
}
else {
error_line ("%s is not a valid .DFF file!", infilename);
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
}
if (chanMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this DSDIFF file already has channel order information!");
free (prop_chunk);
return WAVPACK_SOFT_ERROR;
}
else if (chanMask)
config->channel_mask = chanMask;
config->bits_per_sample = 8;
config->bytes_per_sample = 1;
config->num_channels = numChannels;
config->sample_rate = sampleRate / 8;
config->qmode |= QMODE_DSD_MSB_FIRST;
}
else if (debug_logging_mode)
error_line ("got unknown PROP chunk type \"%c%c%c%c\" of %d bytes",
prop_chunk [0], prop_chunk [1], prop_chunk [2], prop_chunk [3], dff_chunk_header.ckDataSize);
free (prop_chunk);
}
else if (!strncmp (dff_chunk_header.ckID, "DSD ", 4)) {
if (!config->num_channels) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
total_samples = dff_chunk_header.ckDataSize / config->num_channels;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (int)(((dff_chunk_header.ckDataSize) + 1) & ~(int64_t)1);
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .DFF file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
dff_chunk_header.ckID [0], dff_chunk_header.ckID [1], dff_chunk_header.ckID [2],
dff_chunk_header.ckID [3], dff_chunk_header.ckDataSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (debug_logging_mode)
error_line ("setting configuration with %lld samples", total_samples);
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
}
| 169,463 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: long ZEXPORT inflateMark(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16;
state = (struct inflate_state FAR *)strm->state;
return ((long)(state->back) << 16) +
(state->mode == COPY ? state->length :
(state->mode == MATCH ? state->was - state->length : 0));
}
Commit Message: Avoid shifts of negative values inflateMark().
The C standard says that bit shifts of negative integers is
undefined. This casts to unsigned values to assure a known
result.
CWE ID: CWE-189 | long ZEXPORT inflateMark(strm)
z_streamp strm;
{
struct inflate_state FAR *state;
if (strm == Z_NULL || strm->state == Z_NULL)
return (long)(((unsigned long)0 - 1) << 16);
state = (struct inflate_state FAR *)strm->state;
return (long)(((unsigned long)((long)state->back)) << 16) +
(state->mode == COPY ? state->length :
(state->mode == MATCH ? state->was - state->length : 0));
}
| 168,673 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static inline bool elementCanUseSimpleDefaultStyle(Element* e)
{
return isHTMLHtmlElement(e) || e->hasTagName(headTag) || e->hasTagName(bodyTag) || e->hasTagName(divTag) || e->hasTagName(spanTag) || e->hasTagName(brTag) || isHTMLAnchorElement(e);
}
Commit Message: Remove the Simple Default Stylesheet, it's just a foot-gun.
We've been bitten by the Simple Default Stylesheet being out
of sync with the real html.css twice this week.
The Simple Default Stylesheet was invented years ago for Mac:
http://trac.webkit.org/changeset/36135
It nicely handles the case where you just want to create
a single WebView and parse some simple HTML either without
styling said HTML, or only to display a small string, etc.
Note that this optimization/complexity *only* helps for the
very first document, since the default stylesheets are
all static (process-global) variables. Since any real page
on the internet uses a tag not covered by the simple default
stylesheet, not real load benefits from this optimization.
Only uses of WebView which were just rendering small bits
of text might have benefited from this. about:blank would
also have used this sheet.
This was a common application for some uses of WebView back
in those days. These days, even with WebView on Android,
there are likely much larger overheads than parsing the
html.css stylesheet, so making it required seems like the
right tradeoff of code-simplicity for this case.
BUG=319556
Review URL: https://codereview.chromium.org/73723005
git-svn-id: svn://svn.chromium.org/blink/trunk@162153 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-399 | static inline bool elementCanUseSimpleDefaultStyle(Element* e)
| 171,578 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ModuleExport size_t RegisterMPCImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("CACHE");
entry->description=ConstantString("Magick Persistent Cache image format");
entry->module=ConstantString("MPC");
entry->stealth=MagickTrue;
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("MPC");
entry->decoder=(DecodeImageHandler *) ReadMPCImage;
entry->encoder=(EncodeImageHandler *) WriteMPCImage;
entry->magick=(IsImageFormatHandler *) IsMPC;
entry->description=ConstantString("Magick Persistent Cache image format");
entry->module=ConstantString("MPC");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
Commit Message: ...
CWE ID: CWE-20 | ModuleExport size_t RegisterMPCImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("CACHE");
entry->description=ConstantString("Magick Persistent Cache image format");
entry->module=ConstantString("MPC");
entry->seekable_stream=MagickTrue;
entry->stealth=MagickTrue;
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("MPC");
entry->decoder=(DecodeImageHandler *) ReadMPCImage;
entry->encoder=(EncodeImageHandler *) WriteMPCImage;
entry->magick=(IsImageFormatHandler *) IsMPC;
entry->description=ConstantString("Magick Persistent Cache image format");
entry->seekable_stream=MagickTrue;
entry->module=ConstantString("MPC");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
| 168,035 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void rng_egd_request_entropy(RngBackend *b, size_t size,
EntropyReceiveFunc *receive_entropy,
void *opaque)
{
RngEgd *s = RNG_EGD(b);
RngRequest *req;
req = g_malloc(sizeof(*req));
req->offset = 0;
req->size = size;
req->receive_entropy = receive_entropy;
req->opaque = opaque;
req->data = g_malloc(req->size);
while (size > 0) {
uint8_t header[2];
req = g_malloc(sizeof(*req));
req->offset = 0;
req->size = size;
req->receive_entropy = receive_entropy;
req->opaque = opaque;
req->data = g_malloc(req->size);
size -= len;
}
s->parent.requests = g_slist_append(s->parent.requests, req);
}
Commit Message:
CWE ID: CWE-119 | static void rng_egd_request_entropy(RngBackend *b, size_t size,
static void rng_egd_request_entropy(RngBackend *b, RngRequest *req)
{
RngEgd *s = RNG_EGD(b);
size_t size = req->size;
while (size > 0) {
uint8_t header[2];
req = g_malloc(sizeof(*req));
req->offset = 0;
req->size = size;
req->receive_entropy = receive_entropy;
req->opaque = opaque;
req->data = g_malloc(req->size);
size -= len;
}
}
| 165,180 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
unsigned long address;
struct mm_struct *mm;
int fault;
int write = error_code & PF_WRITE;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
/* Get the faulting address: */
address = read_cr2();
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
return;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
if (kmemcheck_fault(regs, address, error_code))
return;
}
/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
return;
/* kprobes don't want to hook the spurious faults: */
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock:
*/
bad_area_nosemaphore(regs, error_code, address);
return;
}
/* kprobes don't want to hook the spurious faults: */
if (unlikely(notify_page_fault(regs)))
return;
/*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
if (user_mode_vm(regs)) {
local_irq_enable();
error_code |= PF_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/*
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
/*
* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in
* the kernel and should generate an OOPS. Unfortunately, in the
* case of an erroneous fault occurring in a code path which already
* holds mmap_sem we will deadlock attempting to validate the fault
* against the address space. Luckily the kernel only validly
* references user space from well defined areas of code, which are
* listed in the exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a
* deadlock. Attempt to lock the address space, if we cannot we then
* validate the source. If this is invalid we can skip the address
* space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
retry:
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case we'll have missed the might_sleep() from
* down_read():
*/
might_sleep();
}
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, error_code, address);
return;
}
if (error_code & PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
* and pusha to work. ("enter $65535, $31" pushes
* 32 pointers and then decrements %sp by 65535.)
*/
if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
bad_area(regs, error_code, address);
return;
}
}
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address);
return;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (mm_fault_error(regs, error_code, address, fault))
return;
}
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
check_v8086_mode(regs, address, tsk);
up_read(&mm->mmap_sem);
}
Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
CWE ID: CWE-399 | do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
unsigned long address;
struct mm_struct *mm;
int fault;
int write = error_code & PF_WRITE;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
/* Get the faulting address: */
address = read_cr2();
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
*/
if (kmemcheck_active(regs))
kmemcheck_hide(regs);
prefetchw(&mm->mmap_sem);
if (unlikely(kmmio_fault(regs, address)))
return;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*
* This verifies that the fault happens in kernel space
* (error_code & 4) == 0, and that the fault was not a
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
if (kmemcheck_fault(regs, address, error_code))
return;
}
/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
return;
/* kprobes don't want to hook the spurious faults: */
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock:
*/
bad_area_nosemaphore(regs, error_code, address);
return;
}
/* kprobes don't want to hook the spurious faults: */
if (unlikely(notify_page_fault(regs)))
return;
/*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
if (user_mode_vm(regs)) {
local_irq_enable();
error_code |= PF_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
}
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/*
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
*/
if (unlikely(in_atomic() || !mm)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
/*
* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in
* the kernel and should generate an OOPS. Unfortunately, in the
* case of an erroneous fault occurring in a code path which already
* holds mmap_sem we will deadlock attempting to validate the fault
* against the address space. Luckily the kernel only validly
* references user space from well defined areas of code, which are
* listed in the exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a
* deadlock. Attempt to lock the address space, if we cannot we then
* validate the source. If this is invalid we can skip the address
* space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address);
return;
}
retry:
down_read(&mm->mmap_sem);
} else {
/*
* The above down_read_trylock() might have succeeded in
* which case we'll have missed the might_sleep() from
* down_read():
*/
might_sleep();
}
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, error_code, address);
return;
}
if (error_code & PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
* and pusha to work. ("enter $65535, $31" pushes
* 32 pointers and then decrements %sp by 65535.)
*/
if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
bad_area(regs, error_code, address);
return;
}
}
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
}
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address);
return;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (mm_fault_error(regs, error_code, address, fault))
return;
}
/*
* Major/minor page fault accounting is only done on the
* initial attempt. If we go through a retry, it is extremely
* likely that the page will be found in page cache at that point.
*/
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
regs, address);
} else {
tsk->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
regs, address);
}
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
check_v8086_mode(regs, address, tsk);
up_read(&mm->mmap_sem);
}
| 165,825 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: MagickExport MemoryInfo *RelinquishVirtualMemory(MemoryInfo *memory_info)
{
assert(memory_info != (MemoryInfo *) NULL);
assert(memory_info->signature == MagickSignature);
if (memory_info->blob != (void *) NULL)
switch (memory_info->type)
{
case AlignedVirtualMemory:
{
memory_info->blob=RelinquishAlignedMemory(memory_info->blob);
RelinquishMagickResource(MemoryResource,memory_info->length);
break;
}
case MapVirtualMemory:
{
(void) UnmapBlob(memory_info->blob,memory_info->length);
memory_info->blob=NULL;
RelinquishMagickResource(MapResource,memory_info->length);
if (*memory_info->filename != '\0')
(void) RelinquishUniqueFileResource(memory_info->filename);
break;
}
case UnalignedVirtualMemory:
default:
{
memory_info->blob=RelinquishMagickMemory(memory_info->blob);
break;
}
}
memory_info->signature=(~MagickSignature);
memory_info=(MemoryInfo *) RelinquishAlignedMemory(memory_info);
return(memory_info);
}
Commit Message:
CWE ID: CWE-189 | MagickExport MemoryInfo *RelinquishVirtualMemory(MemoryInfo *memory_info)
{
assert(memory_info != (MemoryInfo *) NULL);
assert(memory_info->signature == MagickSignature);
if (memory_info->blob != (void *) NULL)
switch (memory_info->type)
{
case AlignedVirtualMemory:
{
memory_info->blob=RelinquishAlignedMemory(memory_info->blob);
RelinquishMagickResource(MemoryResource,memory_info->length);
break;
}
case MapVirtualMemory:
{
(void) UnmapBlob(memory_info->blob,memory_info->length);
memory_info->blob=NULL;
RelinquishMagickResource(MapResource,memory_info->length);
if (*memory_info->filename != '\0')
{
(void) RelinquishUniqueFileResource(memory_info->filename);
RelinquishMagickResource(DiskResource,memory_info->length);
}
break;
}
case UnalignedVirtualMemory:
default:
{
memory_info->blob=RelinquishMagickMemory(memory_info->blob);
break;
}
}
memory_info->signature=(~MagickSignature);
memory_info=(MemoryInfo *) RelinquishAlignedMemory(memory_info);
return(memory_info);
}
| 168,860 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static uint32_t select_lease_time(struct dhcp_packet *packet)
{
uint32_t lease_time_sec = server_config.max_lease_sec;
uint8_t *lease_time_opt = udhcp_get_option(packet, DHCP_LEASE_TIME);
if (lease_time_opt) {
move_from_unaligned32(lease_time_sec, lease_time_opt);
lease_time_sec = ntohl(lease_time_sec);
if (lease_time_sec > server_config.max_lease_sec)
lease_time_sec = server_config.max_lease_sec;
if (lease_time_sec < server_config.min_lease_sec)
lease_time_sec = server_config.min_lease_sec;
}
return lease_time_sec;
}
Commit Message:
CWE ID: CWE-125 | static uint32_t select_lease_time(struct dhcp_packet *packet)
{
uint32_t lease_time_sec = server_config.max_lease_sec;
uint8_t *lease_time_opt = udhcp_get_option32(packet, DHCP_LEASE_TIME);
if (lease_time_opt) {
move_from_unaligned32(lease_time_sec, lease_time_opt);
lease_time_sec = ntohl(lease_time_sec);
if (lease_time_sec > server_config.max_lease_sec)
lease_time_sec = server_config.max_lease_sec;
if (lease_time_sec < server_config.min_lease_sec)
lease_time_sec = server_config.min_lease_sec;
}
return lease_time_sec;
}
| 165,225 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl, bool in_task_switch)
{
struct desc_struct seg_desc, old_desc;
u8 dpl, rpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
ulong desc_addr;
int ret;
u16 dummy;
u32 base3 = 0;
memset(&seg_desc, 0, sizeof seg_desc);
if (ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor (keep limit etc. for
* unreal mode) */
ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
set_desc_base(&seg_desc, selector << 4);
goto load;
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
/* VM86 needs a clean new segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = 3;
goto load;
}
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
/* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s)
goto exception;
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
dpl = seg_desc.dpl;
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* in long-mode d/b must be clear if l is set */
if (seg_desc.d && seg_desc.l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
old_desc = seg_desc;
seg_desc.type |= 2; /* busy */
ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
sizeof(seg_desc), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) {
/* mark segment as accessed */
seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
return X86EMUL_CONTINUE;
exception:
return emulate_exception(ctxt, err_vec, err_code, true);
}
Commit Message: KVM: x86: Handle errors when RIP is set during far jumps
Far jmp/call/ret may fault while loading a new RIP. Currently KVM does not
handle this case, and may result in failed vm-entry once the assignment is
done. The tricky part of doing so is that loading the new CS affects the
VMCS/VMCB state, so if we fail during loading the new RIP, we are left in
unconsistent state. Therefore, this patch saves on 64-bit the old CS
descriptor and restores it if loading RIP failed.
This fixes CVE-2014-3647.
Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
CWE ID: CWE-264 | static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
bool in_task_switch,
struct desc_struct *desc)
{
struct desc_struct seg_desc, old_desc;
u8 dpl, rpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
ulong desc_addr;
int ret;
u16 dummy;
u32 base3 = 0;
memset(&seg_desc, 0, sizeof seg_desc);
if (ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor (keep limit etc. for
* unreal mode) */
ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
set_desc_base(&seg_desc, selector << 4);
goto load;
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
/* VM86 needs a clean new segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = 3;
goto load;
}
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
/* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s)
goto exception;
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
dpl = seg_desc.dpl;
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* in long-mode d/b must be clear if l is set */
if (seg_desc.d && seg_desc.l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
old_desc = seg_desc;
seg_desc.type |= 2; /* busy */
ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
sizeof(seg_desc), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) {
/* mark segment as accessed */
seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
if (desc)
*desc = seg_desc;
return X86EMUL_CONTINUE;
exception:
return emulate_exception(ctxt, err_vec, err_code, true);
}
| 166,337 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: status_t OMXNodeInstance::setParameter(
OMX_INDEXTYPE index, const void *params, size_t size) {
Mutex::Autolock autoLock(mLock);
OMX_INDEXEXTTYPE extIndex = (OMX_INDEXEXTTYPE)index;
CLOG_CONFIG(setParameter, "%s(%#x), %zu@%p)", asString(extIndex), index, size, params);
OMX_ERRORTYPE err = OMX_SetParameter(
mHandle, index, const_cast<void *>(params));
CLOG_IF_ERROR(setParameter, err, "%s(%#x)", asString(extIndex), index);
return StatusFromOMXError(err);
}
Commit Message: DO NOT MERGE: IOMX: work against metadata buffer spoofing
- Prohibit direct set/getParam/Settings for extensions meant for
OMXNodeInstance alone. This disallows enabling metadata mode
without the knowledge of OMXNodeInstance.
- Use a backup buffer for metadata mode buffers and do not directly
share with clients.
- Disallow setting up metadata mode/tunneling/input surface
after first sendCommand.
- Disallow store-meta for input cross process.
- Disallow emptyBuffer for surface input (via IOMX).
- Fix checking for input surface.
Bug: 29422020
Change-Id: I801c77b80e703903f62e42d76fd2e76a34e4bc8e
(cherry picked from commit 7c3c2fa3e233c656fc8c2fc2a6634b3ecf8a23e8)
CWE ID: CWE-200 | status_t OMXNodeInstance::setParameter(
OMX_INDEXTYPE index, const void *params, size_t size) {
Mutex::Autolock autoLock(mLock);
OMX_INDEXEXTTYPE extIndex = (OMX_INDEXEXTTYPE)index;
CLOG_CONFIG(setParameter, "%s(%#x), %zu@%p)", asString(extIndex), index, size, params);
if (isProhibitedIndex_l(index)) {
android_errorWriteLog(0x534e4554, "29422020");
return BAD_INDEX;
}
OMX_ERRORTYPE err = OMX_SetParameter(
mHandle, index, const_cast<void *>(params));
CLOG_IF_ERROR(setParameter, err, "%s(%#x)", asString(extIndex), index);
return StatusFromOMXError(err);
}
| 174,139 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: test_function (char * (*my_asnprintf) (char *, size_t *, const char *, ...))
{
char buf[8];
int size;
for (size = 0; size <= 8; size++)
{
size_t length = size;
char *result = my_asnprintf (NULL, &length, "%d", 12345);
ASSERT (result != NULL);
ASSERT (strcmp (result, "12345") == 0);
ASSERT (length == 5);
free (result);
}
for (size = 0; size <= 8; size++)
{
size_t length;
char *result;
memcpy (buf, "DEADBEEF", 8);
length = size;
result = my_asnprintf (buf, &length, "%d", 12345);
ASSERT (result != NULL);
ASSERT (strcmp (result, "12345") == 0);
ASSERT (length == 5);
if (size < 6)
ASSERT (result != buf);
ASSERT (memcmp (buf + size, &"DEADBEEF"[size], 8 - size) == 0);
if (result != buf)
free (result);
}
}
Commit Message: vasnprintf: Fix heap memory overrun bug.
Reported by Ben Pfaff <blp@cs.stanford.edu> in
<https://lists.gnu.org/archive/html/bug-gnulib/2018-09/msg00107.html>.
* lib/vasnprintf.c (convert_to_decimal): Allocate one more byte of
memory.
* tests/test-vasnprintf.c (test_function): Add another test.
CWE ID: CWE-119 | test_function (char * (*my_asnprintf) (char *, size_t *, const char *, ...))
{
char buf[8];
int size;
for (size = 0; size <= 8; size++)
{
size_t length = size;
char *result = my_asnprintf (NULL, &length, "%d", 12345);
ASSERT (result != NULL);
ASSERT (strcmp (result, "12345") == 0);
ASSERT (length == 5);
free (result);
}
for (size = 0; size <= 8; size++)
{
size_t length;
char *result;
memcpy (buf, "DEADBEEF", 8);
length = size;
result = my_asnprintf (buf, &length, "%d", 12345);
ASSERT (result != NULL);
ASSERT (strcmp (result, "12345") == 0);
ASSERT (length == 5);
if (size < 5 + 1)
ASSERT (result != buf);
ASSERT (memcmp (buf + size, &"DEADBEEF"[size], 8 - size) == 0);
if (result != buf)
free (result);
}
/* Note: This test assumes IEEE 754 representation of 'double' floats. */
for (size = 0; size <= 8; size++)
{
size_t length;
char *result;
memcpy (buf, "DEADBEEF", 8);
length = size;
result = my_asnprintf (buf, &length, "%2.0f", 1.6314159265358979e+125);
ASSERT (result != NULL);
ASSERT (strcmp (result, "163141592653589790215729350939528493057529598899734151772468186268423257777068536614838678161083520756952076273094236944990208") == 0);
ASSERT (length == 126);
if (size < 126 + 1)
ASSERT (result != buf);
ASSERT (memcmp (buf + size, &"DEADBEEF"[size], 8 - size) == 0);
if (result != buf)
free (result);
}
}
| 169,014 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: init_rc(void)
{
int i;
struct stat st;
FILE *f;
if (rc_dir != NULL)
goto open_rc;
rc_dir = expandPath(RC_DIR);
i = strlen(rc_dir);
if (i > 1 && rc_dir[i - 1] == '/')
rc_dir[i - 1] = '\0';
#ifdef USE_M17N
display_charset_str = wc_get_ces_list();
document_charset_str = display_charset_str;
system_charset_str = display_charset_str;
#endif
if (stat(rc_dir, &st) < 0) {
if (errno == ENOENT) { /* no directory */
if (do_mkdir(rc_dir, 0700) < 0) {
/* fprintf(stderr, "Can't create config directory (%s)!\n", rc_dir); */
goto rc_dir_err;
}
else {
stat(rc_dir, &st);
}
}
else {
/* fprintf(stderr, "Can't open config directory (%s)!\n", rc_dir); */
goto rc_dir_err;
}
}
if (!S_ISDIR(st.st_mode)) {
/* not a directory */
/* fprintf(stderr, "%s is not a directory!\n", rc_dir); */
goto rc_dir_err;
}
if (!(st.st_mode & S_IWUSR)) {
/* fprintf(stderr, "%s is not writable!\n", rc_dir); */
goto rc_dir_err;
}
no_rc_dir = FALSE;
tmp_dir = rc_dir;
if (config_file == NULL)
config_file = rcFile(CONFIG_FILE);
create_option_search_table();
open_rc:
/* open config file */
if ((f = fopen(etcFile(W3MCONFIG), "rt")) != NULL) {
interpret_rc(f);
fclose(f);
}
if ((f = fopen(confFile(CONFIG_FILE), "rt")) != NULL) {
interpret_rc(f);
fclose(f);
}
if (config_file && (f = fopen(config_file, "rt")) != NULL) {
interpret_rc(f);
fclose(f);
}
return;
rc_dir_err:
no_rc_dir = TRUE;
if (((tmp_dir = getenv("TMPDIR")) == NULL || *tmp_dir == '\0') &&
((tmp_dir = getenv("TMP")) == NULL || *tmp_dir == '\0') &&
((tmp_dir = getenv("TEMP")) == NULL || *tmp_dir == '\0'))
tmp_dir = "/tmp";
create_option_search_table();
goto open_rc;
}
Commit Message: Make temporary directory safely when ~/.w3m is unwritable
CWE ID: CWE-59 | init_rc(void)
{
int i;
struct stat st;
FILE *f;
if (rc_dir != NULL)
goto open_rc;
rc_dir = expandPath(RC_DIR);
i = strlen(rc_dir);
if (i > 1 && rc_dir[i - 1] == '/')
rc_dir[i - 1] = '\0';
#ifdef USE_M17N
display_charset_str = wc_get_ces_list();
document_charset_str = display_charset_str;
system_charset_str = display_charset_str;
#endif
if (stat(rc_dir, &st) < 0) {
if (errno == ENOENT) { /* no directory */
if (do_mkdir(rc_dir, 0700) < 0) {
/* fprintf(stderr, "Can't create config directory (%s)!\n", rc_dir); */
goto rc_dir_err;
}
else {
stat(rc_dir, &st);
}
}
else {
/* fprintf(stderr, "Can't open config directory (%s)!\n", rc_dir); */
goto rc_dir_err;
}
}
if (!S_ISDIR(st.st_mode)) {
/* not a directory */
/* fprintf(stderr, "%s is not a directory!\n", rc_dir); */
goto rc_dir_err;
}
if (!(st.st_mode & S_IWUSR)) {
/* fprintf(stderr, "%s is not writable!\n", rc_dir); */
goto rc_dir_err;
}
no_rc_dir = FALSE;
tmp_dir = rc_dir;
if (config_file == NULL)
config_file = rcFile(CONFIG_FILE);
create_option_search_table();
open_rc:
/* open config file */
if ((f = fopen(etcFile(W3MCONFIG), "rt")) != NULL) {
interpret_rc(f);
fclose(f);
}
if ((f = fopen(confFile(CONFIG_FILE), "rt")) != NULL) {
interpret_rc(f);
fclose(f);
}
if (config_file && (f = fopen(config_file, "rt")) != NULL) {
interpret_rc(f);
fclose(f);
}
return;
rc_dir_err:
no_rc_dir = TRUE;
if (((tmp_dir = getenv("TMPDIR")) == NULL || *tmp_dir == '\0') &&
((tmp_dir = getenv("TMP")) == NULL || *tmp_dir == '\0') &&
((tmp_dir = getenv("TEMP")) == NULL || *tmp_dir == '\0'))
tmp_dir = "/tmp";
#ifdef HAVE_MKDTEMP
tmp_dir = mkdtemp(Strnew_m_charp(tmp_dir, "/w3m-XXXXXX", NULL)->ptr);
if (tmp_dir == NULL)
tmp_dir = rc_dir;
#endif
create_option_search_table();
goto open_rc;
}
| 169,346 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void faad_resetbits(bitfile *ld, int bits)
{
uint32_t tmp;
int words = bits >> 5;
int remainder = bits & 0x1F;
ld->bytes_left = ld->buffer_size - words*4;
if (ld->bytes_left >= 4)
{
tmp = getdword(&ld->start[words]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n(&ld->start[words], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufa = tmp;
if (ld->bytes_left >= 4)
{
tmp = getdword(&ld->start[words+1]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n(&ld->start[words+1], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufb = tmp;
ld->bits_left = 32 - remainder;
ld->tail = &ld->start[words+2];
/* recheck for reading too many bytes */
ld->error = 0;
}
Commit Message: Fix a couple buffer overflows
https://hackerone.com/reports/502816
https://hackerone.com/reports/507858
https://github.com/videolan/vlc/blob/master/contrib/src/faad2/faad2-fix-overflows.patch
CWE ID: CWE-119 | void faad_resetbits(bitfile *ld, int bits)
{
uint32_t tmp;
int words = bits >> 5;
int remainder = bits & 0x1F;
if (ld->buffer_size < words * 4)
ld->bytes_left = 0;
else
ld->bytes_left = ld->buffer_size - words*4;
if (ld->bytes_left >= 4)
{
tmp = getdword(&ld->start[words]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n(&ld->start[words], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufa = tmp;
if (ld->bytes_left >= 4)
{
tmp = getdword(&ld->start[words+1]);
ld->bytes_left -= 4;
} else {
tmp = getdword_n(&ld->start[words+1], ld->bytes_left);
ld->bytes_left = 0;
}
ld->bufb = tmp;
ld->bits_left = 32 - remainder;
ld->tail = &ld->start[words+2];
/* recheck for reading too many bytes */
ld->error = 0;
}
| 169,535 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void save_text_if_changed(const char *name, const char *new_value)
{
/* a text value can't be change if the file is not loaded */
/* returns NULL if the name is not found; otherwise nonzero */
if (!g_hash_table_lookup(g_loaded_texts, name))
return;
const char *old_value = g_cd ? problem_data_get_content_or_NULL(g_cd, name) : "";
if (!old_value)
old_value = "";
if (strcmp(new_value, old_value) != 0)
{
struct dump_dir *dd = wizard_open_directory_for_writing(g_dump_dir_name);
if (dd)
dd_save_text(dd, name, new_value);
dd_close(dd);
problem_data_reload_from_dump_dir();
update_gui_state_from_problem_data(/* don't update selected event */ 0);
}
}
Commit Message: wizard: fix save users changes after reviewing dump dir files
If the user reviewed the dump dir's files during reporting the crash, the
changes was thrown away and original data was passed to the bugzilla bug
report.
report-gtk saves the first text view buffer and then reloads data from the
reported problem directory, which causes that the changes made to those text
views are thrown away.
Function save_text_if_changed(), except of saving text, also reload the files
from dump dir and update gui state from the dump dir. The commit moves the
reloading and updating gui functions away from this function.
Related to rhbz#1270235
Signed-off-by: Matej Habrnal <mhabrnal@redhat.com>
CWE ID: CWE-200 | static void save_text_if_changed(const char *name, const char *new_value)
{
/* a text value can't be change if the file is not loaded */
/* returns NULL if the name is not found; otherwise nonzero */
if (!g_hash_table_lookup(g_loaded_texts, name))
return;
const char *old_value = g_cd ? problem_data_get_content_or_NULL(g_cd, name) : "";
if (!old_value)
old_value = "";
if (strcmp(new_value, old_value) != 0)
{
struct dump_dir *dd = wizard_open_directory_for_writing(g_dump_dir_name);
if (dd)
dd_save_text(dd, name, new_value);
dd_close(dd);
}
}
| 166,602 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int rose_parse_facilities(unsigned char *p,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0)
return 0;
while (facilities_len > 0) {
if (*p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
facilities_len -= len + 1;
p += len + 1;
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
facilities_len -= len + 1;
p += len + 1;
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
facilities_len--;
p++;
break;
}
} else
break; /* Error in facilities format */
}
return 1;
}
Commit Message: ROSE: prevent heap corruption with bad facilities
When parsing the FAC_NATIONAL_DIGIS facilities field, it's possible for
a remote host to provide more digipeaters than expected, resulting in
heap corruption. Check against ROSE_MAX_DIGIS to prevent overflows, and
abort facilities parsing on failure.
Additionally, when parsing the FAC_CCITT_DEST_NSAP and
FAC_CCITT_SRC_NSAP facilities fields, a remote host can provide a length
of less than 10, resulting in an underflow in a memcpy size, causing a
kernel panic due to massive heap corruption. A length of greater than
20 results in a stack overflow of the callsign array. Abort facilities
parsing on these invalid length values.
Signed-off-by: Dan Rosenberg <drosenberg@vsecurity.com>
Cc: stable@kernel.org
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: CWE-20 | int rose_parse_facilities(unsigned char *p,
struct rose_facilities_struct *facilities)
{
int facilities_len, len;
facilities_len = *p++;
if (facilities_len == 0)
return 0;
while (facilities_len > 0) {
if (*p == 0x00) {
facilities_len--;
p++;
switch (*p) {
case FAC_NATIONAL: /* National */
len = rose_parse_national(p + 1, facilities, facilities_len - 1);
if (len < 0)
return 0;
facilities_len -= len + 1;
p += len + 1;
break;
case FAC_CCITT: /* CCITT */
len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1);
if (len < 0)
return 0;
facilities_len -= len + 1;
p += len + 1;
break;
default:
printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p);
facilities_len--;
p++;
break;
}
} else
break; /* Error in facilities format */
}
return 1;
}
| 165,672 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void __svc_rdma_free(struct work_struct *work)
{
struct svcxprt_rdma *rdma =
container_of(work, struct svcxprt_rdma, sc_work);
struct svc_xprt *xprt = &rdma->sc_xprt;
dprintk("svcrdma: %s(%p)\n", __func__, rdma);
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp);
/* We should only be called from kref_put */
if (kref_read(&xprt->xpt_ref) != 0)
pr_err("svcrdma: sc_xprt still in use? (%d)\n",
kref_read(&xprt->xpt_ref));
/*
* Destroy queued, but not processed read completions. Note
* that this cleanup has to be done before destroying the
* cm_id because the device ptr is needed to unmap the dma in
* svc_rdma_put_context.
*/
while (!list_empty(&rdma->sc_read_complete_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_read_complete_q,
struct svc_rdma_op_ctxt, list);
list_del(&ctxt->list);
svc_rdma_put_context(ctxt, 1);
}
/* Destroy queued, but not processed recv completions */
while (!list_empty(&rdma->sc_rq_dto_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_rq_dto_q,
struct svc_rdma_op_ctxt, list);
list_del(&ctxt->list);
svc_rdma_put_context(ctxt, 1);
}
/* Warn if we leaked a resource or under-referenced */
if (rdma->sc_ctxt_used != 0)
pr_err("svcrdma: ctxt still in use? (%d)\n",
rdma->sc_ctxt_used);
/* Final put of backchannel client transport */
if (xprt->xpt_bc_xprt) {
xprt_put(xprt->xpt_bc_xprt);
xprt->xpt_bc_xprt = NULL;
}
rdma_dealloc_frmr_q(rdma);
svc_rdma_destroy_ctxts(rdma);
svc_rdma_destroy_maps(rdma);
/* Destroy the QP if present (not a listener) */
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_destroy_qp(rdma->sc_qp);
if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
ib_free_cq(rdma->sc_sq_cq);
if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
ib_free_cq(rdma->sc_rq_cq);
if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
ib_dealloc_pd(rdma->sc_pd);
/* Destroy the CM ID */
rdma_destroy_id(rdma->sc_cm_id);
kfree(rdma);
}
Commit Message: Merge tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Another RDMA update from Chuck Lever, and a bunch of miscellaneous
bugfixes"
* tag 'nfsd-4.12' of git://linux-nfs.org/~bfields/linux: (26 commits)
nfsd: Fix up the "supattr_exclcreat" attributes
nfsd: encoders mustn't use unitialized values in error cases
nfsd: fix undefined behavior in nfsd4_layout_verify
lockd: fix lockd shutdown race
NFSv4: Fix callback server shutdown
SUNRPC: Refactor svc_set_num_threads()
NFSv4.x/callback: Create the callback service through svc_create_pooled
lockd: remove redundant check on block
svcrdma: Clean out old XDR encoders
svcrdma: Remove the req_map cache
svcrdma: Remove unused RDMA Write completion handler
svcrdma: Reduce size of sge array in struct svc_rdma_op_ctxt
svcrdma: Clean up RPC-over-RDMA backchannel reply processing
svcrdma: Report Write/Reply chunk overruns
svcrdma: Clean up RDMA_ERROR path
svcrdma: Use rdma_rw API in RPC reply path
svcrdma: Introduce local rdma_rw API helpers
svcrdma: Clean up svc_rdma_get_inv_rkey()
svcrdma: Add helper to save pages under I/O
svcrdma: Eliminate RPCRDMA_SQ_DEPTH_MULT
...
CWE ID: CWE-404 | static void __svc_rdma_free(struct work_struct *work)
{
struct svcxprt_rdma *rdma =
container_of(work, struct svcxprt_rdma, sc_work);
struct svc_xprt *xprt = &rdma->sc_xprt;
dprintk("svcrdma: %s(%p)\n", __func__, rdma);
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp);
/* We should only be called from kref_put */
if (kref_read(&xprt->xpt_ref) != 0)
pr_err("svcrdma: sc_xprt still in use? (%d)\n",
kref_read(&xprt->xpt_ref));
/*
* Destroy queued, but not processed read completions. Note
* that this cleanup has to be done before destroying the
* cm_id because the device ptr is needed to unmap the dma in
* svc_rdma_put_context.
*/
while (!list_empty(&rdma->sc_read_complete_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_read_complete_q,
struct svc_rdma_op_ctxt, list);
list_del(&ctxt->list);
svc_rdma_put_context(ctxt, 1);
}
/* Destroy queued, but not processed recv completions */
while (!list_empty(&rdma->sc_rq_dto_q)) {
struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&rdma->sc_rq_dto_q,
struct svc_rdma_op_ctxt, list);
list_del(&ctxt->list);
svc_rdma_put_context(ctxt, 1);
}
/* Warn if we leaked a resource or under-referenced */
if (rdma->sc_ctxt_used != 0)
pr_err("svcrdma: ctxt still in use? (%d)\n",
rdma->sc_ctxt_used);
/* Final put of backchannel client transport */
if (xprt->xpt_bc_xprt) {
xprt_put(xprt->xpt_bc_xprt);
xprt->xpt_bc_xprt = NULL;
}
rdma_dealloc_frmr_q(rdma);
svc_rdma_destroy_rw_ctxts(rdma);
svc_rdma_destroy_ctxts(rdma);
/* Destroy the QP if present (not a listener) */
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_destroy_qp(rdma->sc_qp);
if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
ib_free_cq(rdma->sc_sq_cq);
if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
ib_free_cq(rdma->sc_rq_cq);
if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
ib_dealloc_pd(rdma->sc_pd);
/* Destroy the CM ID */
rdma_destroy_id(rdma->sc_cm_id);
kfree(rdma);
}
| 168,176 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
unsigned long arg)
{
struct sem_undo *un;
struct sem_array *sma;
struct sem* curr;
int err;
int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
/* big-endian 64bit */
val = arg >> 32;
#else
/* 32bit or little-endian 64bit */
val = arg;
#endif
sma = sem_lock_check(ns, semid);
if (IS_ERR(sma))
return PTR_ERR(sma);
INIT_LIST_HEAD(&tasks);
nsems = sma->sem_nsems;
err = -EACCES;
if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
goto out_unlock;
err = security_sem_semctl(sma, SETVAL);
if (err)
goto out_unlock;
err = -EINVAL;
if(semnum < 0 || semnum >= nsems)
goto out_unlock;
curr = &sma->sem_base[semnum];
err = -ERANGE;
if (val > SEMVMX || val < 0)
goto out_unlock;
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
curr->semval = val;
curr->sempid = task_tgid_vnr(current);
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
err = 0;
out_unlock:
sem_unlock(sma);
wake_up_sem_queue_do(&tasks);
return err;
}
Commit Message: ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[davidlohr.bueso@hp.com: do not call sem_lock when bogus sma]
[davidlohr.bueso@hp.com: make refcounter atomic]
Signed-off-by: Rik van Riel <riel@redhat.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Cc: Jason Low <jason.low2@hp.com>
Reviewed-by: Michel Lespinasse <walken@google.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Stanislav Kinsbursky <skinsbursky@parallels.com>
Tested-by: Emmanuel Benisty <benisty.e@gmail.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
CWE ID: CWE-189 | static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
unsigned long arg)
{
struct sem_undo *un;
struct sem_array *sma;
struct sem* curr;
int err;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
/* big-endian 64bit */
val = arg >> 32;
#else
/* 32bit or little-endian 64bit */
val = arg;
#endif
if (val > SEMVMX || val < 0)
return -ERANGE;
INIT_LIST_HEAD(&tasks);
rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
rcu_read_unlock();
return PTR_ERR(sma);
}
if (semnum < 0 || semnum >= sma->sem_nsems) {
rcu_read_unlock();
return -EINVAL;
}
if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
rcu_read_unlock();
return -EACCES;
}
err = security_sem_semctl(sma, SETVAL);
if (err) {
rcu_read_unlock();
return -EACCES;
}
sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
curr->semval = val;
curr->sempid = task_tgid_vnr(current);
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
return 0;
}
| 165,981 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PepperDeviceEnumerationHostHelper::PepperDeviceEnumerationHostHelper(
ppapi::host::ResourceHost* resource_host,
Delegate* delegate,
PP_DeviceType_Dev device_type,
const GURL& document_url)
: resource_host_(resource_host),
delegate_(delegate),
device_type_(device_type),
document_url_(document_url) {}
Commit Message: Pepper: Access PepperMediaDeviceManager through a WeakPtr
Its lifetime is scoped to the RenderFrame, and it might go away before the
hosts that refer to it.
BUG=423030
Review URL: https://codereview.chromium.org/653243003
Cr-Commit-Position: refs/heads/master@{#299897}
CWE ID: CWE-399 | PepperDeviceEnumerationHostHelper::PepperDeviceEnumerationHostHelper(
ppapi::host::ResourceHost* resource_host,
base::WeakPtr<Delegate> delegate,
PP_DeviceType_Dev device_type,
const GURL& document_url)
: resource_host_(resource_host),
delegate_(delegate),
device_type_(device_type),
document_url_(document_url) {}
| 171,604 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: BGD_DECLARE(gdImagePtr) gdImageCreate (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char), sx)) {
return NULL;
}
im = (gdImage *) gdCalloc(1, sizeof(gdImage));
if (!im) {
return NULL;
}
/* Row-major ever since gd 1.3 */
im->pixels = (unsigned char **) gdMalloc (sizeof (unsigned char *) * sy);
if (!im->pixels) {
gdFree(im);
return NULL;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
/* Row-major ever since gd 1.3 */
im->pixels[i] = (unsigned char *) gdCalloc (sx, sizeof (unsigned char));
if (!im->pixels[i]) {
for (--i ; i >= 0; i--) {
gdFree(im->pixels[i]);
}
gdFree(im->pixels);
gdFree(im);
return NULL;
}
}
im->sx = sx;
im->sy = sy;
im->colorsTotal = 0;
im->transparent = (-1);
im->interlace = 0;
im->thick = 1;
im->AA = 0;
for (i = 0; (i < gdMaxColors); i++) {
im->open[i] = 1;
};
im->trueColor = 0;
im->tpixels = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
Commit Message: Fix #340: System frozen
gdImageCreate() doesn't check for oversized images and as such is prone
to DoS vulnerabilities. We fix that by applying the same overflow check
that is already in place for gdImageCreateTrueColor().
CVE-2016-9317
CWE ID: CWE-20 | BGD_DECLARE(gdImagePtr) gdImageCreate (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char), sx)) {
return NULL;
}
im = (gdImage *) gdCalloc(1, sizeof(gdImage));
if (!im) {
return NULL;
}
/* Row-major ever since gd 1.3 */
im->pixels = (unsigned char **) gdMalloc (sizeof (unsigned char *) * sy);
if (!im->pixels) {
gdFree(im);
return NULL;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
/* Row-major ever since gd 1.3 */
im->pixels[i] = (unsigned char *) gdCalloc (sx, sizeof (unsigned char));
if (!im->pixels[i]) {
for (--i ; i >= 0; i--) {
gdFree(im->pixels[i]);
}
gdFree(im->pixels);
gdFree(im);
return NULL;
}
}
im->sx = sx;
im->sy = sy;
im->colorsTotal = 0;
im->transparent = (-1);
im->interlace = 0;
im->thick = 1;
im->AA = 0;
for (i = 0; (i < gdMaxColors); i++) {
im->open[i] = 1;
};
im->trueColor = 0;
im->tpixels = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
| 168,743 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WebsiteSettings::WebsiteSettings(
WebsiteSettingsUI* ui,
Profile* profile,
TabSpecificContentSettings* tab_specific_content_settings,
InfoBarService* infobar_service,
const GURL& url,
const content::SSLStatus& ssl,
content::CertStore* cert_store)
: TabSpecificContentSettings::SiteDataObserver(
tab_specific_content_settings),
ui_(ui),
infobar_service_(infobar_service),
show_info_bar_(false),
site_url_(url),
site_identity_status_(SITE_IDENTITY_STATUS_UNKNOWN),
cert_id_(0),
site_connection_status_(SITE_CONNECTION_STATUS_UNKNOWN),
cert_store_(cert_store),
content_settings_(profile->GetHostContentSettingsMap()),
chrome_ssl_host_state_delegate_(
ChromeSSLHostStateDelegateFactory::GetForProfile(profile)),
did_revoke_user_ssl_decisions_(false) {
Init(profile, url, ssl);
PresentSitePermissions();
PresentSiteData();
PresentSiteIdentity();
RecordWebsiteSettingsAction(WEBSITE_SETTINGS_OPENED);
}
Commit Message: Fix UAF in Origin Info Bubble and permission settings UI.
In addition to fixing the UAF, will this also fix the problem of the bubble
showing over the previous tab (if the bubble is open when the tab it was opened
for closes).
BUG=490492
TBR=tedchoc
Review URL: https://codereview.chromium.org/1317443002
Cr-Commit-Position: refs/heads/master@{#346023}
CWE ID: | WebsiteSettings::WebsiteSettings(
WebsiteSettingsUI* ui,
Profile* profile,
TabSpecificContentSettings* tab_specific_content_settings,
content::WebContents* web_contents,
const GURL& url,
const content::SSLStatus& ssl,
content::CertStore* cert_store)
: TabSpecificContentSettings::SiteDataObserver(
tab_specific_content_settings),
ui_(ui),
web_contents_(web_contents),
show_info_bar_(false),
site_url_(url),
site_identity_status_(SITE_IDENTITY_STATUS_UNKNOWN),
cert_id_(0),
site_connection_status_(SITE_CONNECTION_STATUS_UNKNOWN),
cert_store_(cert_store),
content_settings_(profile->GetHostContentSettingsMap()),
chrome_ssl_host_state_delegate_(
ChromeSSLHostStateDelegateFactory::GetForProfile(profile)),
did_revoke_user_ssl_decisions_(false) {
Init(profile, url, ssl);
PresentSitePermissions();
PresentSiteData();
PresentSiteIdentity();
RecordWebsiteSettingsAction(WEBSITE_SETTINGS_OPENED);
}
| 171,781 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: native_handle* Parcel::readNativeHandle() const
{
int numFds, numInts;
status_t err;
err = readInt32(&numFds);
if (err != NO_ERROR) return 0;
err = readInt32(&numInts);
if (err != NO_ERROR) return 0;
native_handle* h = native_handle_create(numFds, numInts);
for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
h->data[i] = dup(readFileDescriptor());
if (h->data[i] < 0) err = BAD_VALUE;
}
err = read(h->data + numFds, sizeof(int)*numInts);
if (err != NO_ERROR) {
native_handle_close(h);
native_handle_delete(h);
h = 0;
}
return h;
}
Commit Message: Verify that the native handle was created
The inputs to native_handle_create can cause an overflowed allocation,
so check the return value of native_handle_create before accessing
the memory it returns.
Bug:19334482
Change-Id: I1f489382776c2a1390793a79dc27ea17baa9b2a2
(cherry picked from commit eaac99a7172da52a76ba48c26413778a74951b1a)
CWE ID: CWE-189 | native_handle* Parcel::readNativeHandle() const
{
int numFds, numInts;
status_t err;
err = readInt32(&numFds);
if (err != NO_ERROR) return 0;
err = readInt32(&numInts);
if (err != NO_ERROR) return 0;
native_handle* h = native_handle_create(numFds, numInts);
if (!h) {
return 0;
}
for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
h->data[i] = dup(readFileDescriptor());
if (h->data[i] < 0) err = BAD_VALUE;
}
err = read(h->data + numFds, sizeof(int)*numInts);
if (err != NO_ERROR) {
native_handle_close(h);
native_handle_delete(h);
h = 0;
}
return h;
}
| 173,373 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int __perf_event_overflow(struct perf_event *event, int nmi,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
int ret = 0;
/*
* Non-sampling counters might still use the PMI to fold short
* hardware counters, ignore those.
*/
if (unlikely(!is_sampling_event(event)))
return 0;
if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
if (throttle) {
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
} else
hwc->interrupts++;
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period);
}
/*
* XXX event_limit might not quite work as expected on inherited
* events
*/
event->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
if (nmi) {
event->pending_disable = 1;
irq_work_queue(&event->pending);
} else
perf_event_disable(event);
}
if (event->overflow_handler)
event->overflow_handler(event, nmi, data, regs);
else
perf_event_output(event, nmi, data, regs);
if (event->fasync && event->pending_kill) {
if (nmi) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
} else
perf_event_wakeup(event);
}
return ret;
}
Commit Message: perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
CWE ID: CWE-399 | static int __perf_event_overflow(struct perf_event *event, int nmi,
static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
int ret = 0;
/*
* Non-sampling counters might still use the PMI to fold short
* hardware counters, ignore those.
*/
if (unlikely(!is_sampling_event(event)))
return 0;
if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
if (throttle) {
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
} else
hwc->interrupts++;
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period);
}
/*
* XXX event_limit might not quite work as expected on inherited
* events
*/
event->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
event->pending_disable = 1;
irq_work_queue(&event->pending);
}
if (event->overflow_handler)
event->overflow_handler(event, data, regs);
else
perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
return ret;
}
| 165,826 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WORD32 ih264d_mark_err_slice_skip(dec_struct_t * ps_dec,
WORD32 num_mb_skip,
UWORD8 u1_is_idr_slice,
UWORD16 u2_frame_num,
pocstruct_t *ps_cur_poc,
WORD32 prev_slice_err)
{
WORD32 i2_cur_mb_addr;
UWORD32 u1_num_mbs, u1_num_mbsNby2;
UWORD32 u1_mb_idx = ps_dec->u1_mb_idx;
UWORD32 i2_mb_skip_run;
UWORD32 u1_num_mbs_next, u1_end_of_row;
const UWORD32 i2_pic_wdin_mbs = ps_dec->u2_frm_wd_in_mbs;
UWORD32 u1_slice_end;
UWORD32 u1_tfr_n_mb;
UWORD32 u1_decode_nmb;
dec_bit_stream_t * const ps_bitstrm = ps_dec->ps_bitstrm;
dec_slice_params_t * ps_slice = ps_dec->ps_cur_slice;
UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer;
UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst;
deblk_mb_t *ps_cur_deblk_mb;
dec_mb_info_t *ps_cur_mb_info;
parse_pmbarams_t *ps_parse_mb_data;
UWORD32 u1_inter_mb_type;
UWORD32 u1_deblk_mb_type;
UWORD16 u2_total_mbs_coded;
UWORD32 u1_mbaff = ps_slice->u1_mbaff_frame_flag;
parse_part_params_t *ps_part_info;
WORD32 ret;
if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC)
{
ih264d_err_pic_dispbuf_mgr(ps_dec);
return 0;
}
if(prev_slice_err == 1)
{
/* first slice - missing/header corruption */
ps_dec->ps_cur_slice->u2_frame_num = u2_frame_num;
if(!ps_dec->u1_first_slice_in_stream)
{
ih264d_end_of_pic(ps_dec, u1_is_idr_slice,
ps_dec->ps_cur_slice->u2_frame_num);
ps_dec->s_cur_pic_poc.u2_frame_num =
ps_dec->ps_cur_slice->u2_frame_num;
}
{
WORD32 i, j, poc = 0;
ps_dec->ps_cur_slice->u2_first_mb_in_slice = 0;
ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff;
ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp;
ps_dec->p_motion_compensate = ih264d_motion_compensate_bp;
if(ps_dec->ps_cur_pic != NULL)
poc = ps_dec->ps_cur_pic->i4_poc + 2;
j = 0;
for(i = 0; i < MAX_NUM_PIC_PARAMS; i++)
if(ps_dec->ps_pps[i].u1_is_valid == TRUE)
j = i;
{
ret = ih264d_start_of_pic(ps_dec, poc, ps_cur_poc,
ps_dec->ps_cur_slice->u2_frame_num,
&ps_dec->ps_pps[j]);
if(ret != OK)
{
return ret;
}
}
ps_dec->ps_ref_pic_buf_lx[0][0]->u1_pic_buf_id = 0;
ps_dec->u4_output_present = 0;
{
ih264d_get_next_display_field(ps_dec,
ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
/* If error code is non-zero then there is no buffer available for display,
hence avoid format conversion */
if(0 != ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht;
}
else
ps_dec->u4_output_present = 1;
}
if(ps_dec->u1_separate_parse == 1)
{
if(ps_dec->u4_dec_thread_created == 0)
{
ithread_create(ps_dec->pv_dec_thread_handle, NULL,
(void *)ih264d_decode_picture_thread,
(void *)ps_dec);
ps_dec->u4_dec_thread_created = 1;
}
if((ps_dec->u4_num_cores == 3) &&
((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag)
&& (ps_dec->u4_bs_deblk_thread_created == 0))
{
ps_dec->u4_start_recon_deblk = 0;
ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL,
(void *)ih264d_recon_deblk_thread,
(void *)ps_dec);
ps_dec->u4_bs_deblk_thread_created = 1;
}
}
}
}
else
{
dec_slice_struct_t *ps_parse_cur_slice;
ps_parse_cur_slice = ps_dec->ps_dec_slice_buf + ps_dec->u2_cur_slice_num;
if(ps_dec->u1_slice_header_done
&& ps_parse_cur_slice == ps_dec->ps_parse_cur_slice)
{
u1_num_mbs = ps_dec->u4_num_mbs_cur_nmb;
if(u1_num_mbs)
{
ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs - 1;
}
else
{
if(ps_dec->u1_separate_parse)
{
ps_cur_mb_info = ps_dec->ps_nmb_info - 1;
}
else
{
ps_cur_mb_info = ps_dec->ps_nmb_info
+ ps_dec->u4_num_mbs_prev_nmb - 1;
}
}
ps_dec->u2_mby = ps_cur_mb_info->u2_mby;
ps_dec->u2_mbx = ps_cur_mb_info->u2_mbx;
ps_dec->u1_mb_ngbr_availablity =
ps_cur_mb_info->u1_mb_ngbr_availablity;
ps_dec->pv_parse_tu_coeff_data = ps_dec->pv_prev_mb_parse_tu_coeff_data;
ps_dec->u2_cur_mb_addr--;
ps_dec->i4_submb_ofst -= SUB_BLK_SIZE;
if(u1_num_mbs)
{
if (ps_dec->u1_pr_sl_type == P_SLICE
|| ps_dec->u1_pr_sl_type == B_SLICE)
{
ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs);
ps_dec->ps_part = ps_dec->ps_parse_part_params;
}
u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1;
u1_end_of_row = (!u1_num_mbs_next)
&& (!(u1_mbaff && (u1_num_mbs & 0x01)));
u1_slice_end = 1;
u1_tfr_n_mb = 1;
ps_cur_mb_info->u1_end_of_slice = u1_slice_end;
if(ps_dec->u1_separate_parse)
{
ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs,
u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row);
ps_dec->ps_nmb_info += u1_num_mbs;
}
else
{
ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs,
u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row);
}
ps_dec->u1_mb_idx = 0;
ps_dec->u4_num_mbs_cur_nmb = 0;
}
if(ps_dec->u2_total_mbs_coded
>= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
ps_dec->u1_pic_decode_done = 1;
return 0;
}
ps_dec->u2_cur_slice_num++;
ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx;
ps_dec->i2_prev_slice_mby = ps_dec->u2_mby;
ps_dec->ps_parse_cur_slice++;
}
else
{
ps_dec->ps_parse_cur_slice = ps_dec->ps_dec_slice_buf
+ ps_dec->u2_cur_slice_num;
}
}
/******************************************************/
/* Initializations to new slice */
/******************************************************/
{
WORD32 num_entries;
WORD32 size;
UWORD8 *pu1_buf;
num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init);
num_entries = 2 * ((2 * num_entries) + 1);
size = num_entries * sizeof(void *);
size += PAD_MAP_IDX_POC * sizeof(void *);
pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf;
pu1_buf += size * ps_dec->u2_cur_slice_num;
ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = (volatile void **)pu1_buf;
}
ps_dec->ps_cur_slice->u2_first_mb_in_slice = ps_dec->u2_total_mbs_coded << u1_mbaff;
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
ps_dec->u2_prv_frame_num = ps_dec->ps_cur_slice->u2_frame_num;
ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->u2_total_mbs_coded << u1_mbaff;
ps_dec->ps_parse_cur_slice->u2_log2Y_crwd = ps_dec->ps_cur_slice->u2_log2Y_crwd;
if(ps_dec->u1_separate_parse)
{
ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data;
}
else
{
ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data;
}
/******************************************************/
/* Initializations specific to P slice */
/******************************************************/
u1_inter_mb_type = P_MB;
u1_deblk_mb_type = D_INTER_MB;
ps_dec->ps_cur_slice->u1_slice_type = P_SLICE;
ps_dec->ps_parse_cur_slice->slice_type = P_SLICE;
ps_dec->pf_mvpred_ref_tfr_nby2mb = ih264d_mv_pred_ref_tfr_nby2_pmb;
ps_dec->ps_part = ps_dec->ps_parse_part_params;
/******************************************************/
/* Parsing / decoding the slice */
/******************************************************/
ps_dec->u1_slice_header_done = 2;
ps_dec->u1_qp = ps_slice->u1_slice_qp;
ih264d_update_qp(ps_dec, 0);
u1_mb_idx = ps_dec->u1_mb_idx;
ps_parse_mb_data = ps_dec->ps_parse_mb_data;
u1_num_mbs = u1_mb_idx;
u1_slice_end = 0;
u1_tfr_n_mb = 0;
u1_decode_nmb = 0;
u1_num_mbsNby2 = 0;
i2_cur_mb_addr = ps_dec->u2_total_mbs_coded;
i2_mb_skip_run = num_mb_skip;
while(!u1_slice_end)
{
UWORD8 u1_mb_type;
if(i2_cur_mb_addr > ps_dec->ps_cur_sps->u2_max_mb_addr)
break;
ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs;
ps_dec->u4_num_mbs_cur_nmb = u1_num_mbs;
ps_cur_mb_info->u1_Mux = 0;
ps_dec->u4_num_pmbair = (u1_num_mbs >> u1_mbaff);
ps_cur_deblk_mb = ps_dec->ps_deblk_mbn + u1_num_mbs;
ps_cur_mb_info->u1_end_of_slice = 0;
/* Storing Default partition info */
ps_parse_mb_data->u1_num_part = 1;
ps_parse_mb_data->u1_isI_mb = 0;
/**************************************************************/
/* Get the required information for decoding of MB */
/**************************************************************/
/* mb_x, mb_y, neighbor availablity, */
if (u1_mbaff)
ih264d_get_mb_info_cavlc_mbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run);
else
ih264d_get_mb_info_cavlc_nonmbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run);
/* Set the deblocking parameters for this MB */
if(ps_dec->u4_app_disable_deblk_frm == 0)
{
ih264d_set_deblocking_parameters(ps_cur_deblk_mb, ps_slice,
ps_dec->u1_mb_ngbr_availablity,
ps_dec->u1_cur_mb_fld_dec_flag);
}
/* Set appropriate flags in ps_cur_mb_info and ps_dec */
ps_dec->i1_prev_mb_qp_delta = 0;
ps_dec->u1_sub_mb_num = 0;
ps_cur_mb_info->u1_mb_type = MB_SKIP;
ps_cur_mb_info->u1_mb_mc_mode = PRED_16x16;
ps_cur_mb_info->u1_cbp = 0;
/* Storing Skip partition info */
ps_part_info = ps_dec->ps_part;
ps_part_info->u1_is_direct = PART_DIRECT_16x16;
ps_part_info->u1_sub_mb_num = 0;
ps_dec->ps_part++;
/* Update Nnzs */
ih264d_update_nnz_for_skipmb(ps_dec, ps_cur_mb_info, CAVLC);
ps_cur_mb_info->ps_curmb->u1_mb_type = u1_inter_mb_type;
ps_cur_deblk_mb->u1_mb_type |= u1_deblk_mb_type;
i2_mb_skip_run--;
ps_cur_deblk_mb->u1_mb_qp = ps_dec->u1_qp;
if (u1_mbaff)
{
ih264d_update_mbaff_left_nnz(ps_dec, ps_cur_mb_info);
}
/**************************************************************/
/* Get next Macroblock address */
/**************************************************************/
i2_cur_mb_addr++;
u1_num_mbs++;
ps_dec->u2_total_mbs_coded++;
u1_num_mbsNby2++;
ps_parse_mb_data++;
/****************************************************************/
/* Check for End Of Row and other flags that determine when to */
/* do DMA setup for N/2-Mb, Decode for N-Mb, and Transfer for */
/* N-Mb */
/****************************************************************/
u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1;
u1_end_of_row = (!u1_num_mbs_next) && (!(u1_mbaff && (u1_num_mbs & 0x01)));
u1_slice_end = !i2_mb_skip_run;
u1_tfr_n_mb = (u1_num_mbs == ps_dec->u1_recon_mb_grp) || u1_end_of_row
|| u1_slice_end;
u1_decode_nmb = u1_tfr_n_mb || u1_slice_end;
ps_cur_mb_info->u1_end_of_slice = u1_slice_end;
if(u1_decode_nmb)
{
ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs);
u1_num_mbsNby2 = 0;
ps_parse_mb_data = ps_dec->ps_parse_mb_data;
ps_dec->ps_part = ps_dec->ps_parse_part_params;
if(ps_dec->u1_separate_parse)
{
ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs,
u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row);
ps_dec->ps_nmb_info += u1_num_mbs;
}
else
{
ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next,
u1_tfr_n_mb, u1_end_of_row);
}
if(u1_tfr_n_mb)
u1_num_mbs = 0;
u1_mb_idx = u1_num_mbs;
ps_dec->u1_mb_idx = u1_num_mbs;
}
}
ps_dec->u4_num_mbs_cur_nmb = 0;
ps_dec->ps_cur_slice->u4_mbs_in_slice = i2_cur_mb_addr
- ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice;
H264_DEC_DEBUG_PRINT("Mbs in slice: %d\n", ps_dec->ps_cur_slice->u4_mbs_in_slice);
ps_dec->u2_cur_slice_num++;
/* incremented here only if first slice is inserted */
if(ps_dec->u4_first_slice_in_pic != 0)
ps_dec->ps_parse_cur_slice++;
ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx;
ps_dec->i2_prev_slice_mby = ps_dec->u2_mby;
if(ps_dec->u2_total_mbs_coded
>= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
ps_dec->u1_pic_decode_done = 1;
}
return 0;
}
Commit Message: Decoder Update mb count after mb map is set.
Bug: 25928803
Change-Id: Iccc58a7dd1c5c6ea656dfca332cfb8dddba4de37
CWE ID: CWE-119 | WORD32 ih264d_mark_err_slice_skip(dec_struct_t * ps_dec,
WORD32 num_mb_skip,
UWORD8 u1_is_idr_slice,
UWORD16 u2_frame_num,
pocstruct_t *ps_cur_poc,
WORD32 prev_slice_err)
{
WORD32 i2_cur_mb_addr;
UWORD32 u1_num_mbs, u1_num_mbsNby2;
UWORD32 u1_mb_idx = ps_dec->u1_mb_idx;
UWORD32 i2_mb_skip_run;
UWORD32 u1_num_mbs_next, u1_end_of_row;
const UWORD32 i2_pic_wdin_mbs = ps_dec->u2_frm_wd_in_mbs;
UWORD32 u1_slice_end;
UWORD32 u1_tfr_n_mb;
UWORD32 u1_decode_nmb;
dec_bit_stream_t * const ps_bitstrm = ps_dec->ps_bitstrm;
dec_slice_params_t * ps_slice = ps_dec->ps_cur_slice;
UWORD32 *pu4_bitstrm_buf = ps_bitstrm->pu4_buffer;
UWORD32 *pu4_bitstrm_ofst = &ps_bitstrm->u4_ofst;
deblk_mb_t *ps_cur_deblk_mb;
dec_mb_info_t *ps_cur_mb_info;
parse_pmbarams_t *ps_parse_mb_data;
UWORD32 u1_inter_mb_type;
UWORD32 u1_deblk_mb_type;
UWORD16 u2_total_mbs_coded;
UWORD32 u1_mbaff = ps_slice->u1_mbaff_frame_flag;
parse_part_params_t *ps_part_info;
WORD32 ret;
if(ps_dec->ps_dec_err_status->u1_err_flag & REJECT_CUR_PIC)
{
ih264d_err_pic_dispbuf_mgr(ps_dec);
return 0;
}
if(prev_slice_err == 1)
{
/* first slice - missing/header corruption */
ps_dec->ps_cur_slice->u2_frame_num = u2_frame_num;
if(!ps_dec->u1_first_slice_in_stream)
{
ih264d_end_of_pic(ps_dec, u1_is_idr_slice,
ps_dec->ps_cur_slice->u2_frame_num);
ps_dec->s_cur_pic_poc.u2_frame_num =
ps_dec->ps_cur_slice->u2_frame_num;
}
{
WORD32 i, j, poc = 0;
ps_dec->ps_cur_slice->u2_first_mb_in_slice = 0;
ps_dec->pf_mvpred = ih264d_mvpred_nonmbaff;
ps_dec->p_form_mb_part_info = ih264d_form_mb_part_info_bp;
ps_dec->p_motion_compensate = ih264d_motion_compensate_bp;
if(ps_dec->ps_cur_pic != NULL)
poc = ps_dec->ps_cur_pic->i4_poc + 2;
j = 0;
for(i = 0; i < MAX_NUM_PIC_PARAMS; i++)
if(ps_dec->ps_pps[i].u1_is_valid == TRUE)
j = i;
{
ret = ih264d_start_of_pic(ps_dec, poc, ps_cur_poc,
ps_dec->ps_cur_slice->u2_frame_num,
&ps_dec->ps_pps[j]);
if(ret != OK)
{
return ret;
}
}
ps_dec->ps_ref_pic_buf_lx[0][0]->u1_pic_buf_id = 0;
ps_dec->u4_output_present = 0;
{
ih264d_get_next_display_field(ps_dec,
ps_dec->ps_out_buffer,
&(ps_dec->s_disp_op));
/* If error code is non-zero then there is no buffer available for display,
hence avoid format conversion */
if(0 != ps_dec->s_disp_op.u4_error_code)
{
ps_dec->u4_fmt_conv_cur_row = ps_dec->s_disp_frame_info.u4_y_ht;
}
else
ps_dec->u4_output_present = 1;
}
if(ps_dec->u1_separate_parse == 1)
{
if(ps_dec->u4_dec_thread_created == 0)
{
ithread_create(ps_dec->pv_dec_thread_handle, NULL,
(void *)ih264d_decode_picture_thread,
(void *)ps_dec);
ps_dec->u4_dec_thread_created = 1;
}
if((ps_dec->u4_num_cores == 3) &&
((ps_dec->u4_app_disable_deblk_frm == 0) || ps_dec->i1_recon_in_thread3_flag)
&& (ps_dec->u4_bs_deblk_thread_created == 0))
{
ps_dec->u4_start_recon_deblk = 0;
ithread_create(ps_dec->pv_bs_deblk_thread_handle, NULL,
(void *)ih264d_recon_deblk_thread,
(void *)ps_dec);
ps_dec->u4_bs_deblk_thread_created = 1;
}
}
}
}
else
{
dec_slice_struct_t *ps_parse_cur_slice;
ps_parse_cur_slice = ps_dec->ps_dec_slice_buf + ps_dec->u2_cur_slice_num;
if(ps_dec->u1_slice_header_done
&& ps_parse_cur_slice == ps_dec->ps_parse_cur_slice)
{
u1_num_mbs = ps_dec->u4_num_mbs_cur_nmb;
if(u1_num_mbs)
{
ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs - 1;
}
else
{
if(ps_dec->u1_separate_parse)
{
ps_cur_mb_info = ps_dec->ps_nmb_info - 1;
}
else
{
ps_cur_mb_info = ps_dec->ps_nmb_info
+ ps_dec->u4_num_mbs_prev_nmb - 1;
}
}
ps_dec->u2_mby = ps_cur_mb_info->u2_mby;
ps_dec->u2_mbx = ps_cur_mb_info->u2_mbx;
ps_dec->u1_mb_ngbr_availablity =
ps_cur_mb_info->u1_mb_ngbr_availablity;
ps_dec->pv_parse_tu_coeff_data = ps_dec->pv_prev_mb_parse_tu_coeff_data;
ps_dec->u2_cur_mb_addr--;
ps_dec->i4_submb_ofst -= SUB_BLK_SIZE;
if(u1_num_mbs)
{
if (ps_dec->u1_pr_sl_type == P_SLICE
|| ps_dec->u1_pr_sl_type == B_SLICE)
{
ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs);
ps_dec->ps_part = ps_dec->ps_parse_part_params;
}
u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1;
u1_end_of_row = (!u1_num_mbs_next)
&& (!(u1_mbaff && (u1_num_mbs & 0x01)));
u1_slice_end = 1;
u1_tfr_n_mb = 1;
ps_cur_mb_info->u1_end_of_slice = u1_slice_end;
if(ps_dec->u1_separate_parse)
{
ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs,
u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row);
ps_dec->ps_nmb_info += u1_num_mbs;
}
else
{
ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs,
u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row);
}
ps_dec->u2_total_mbs_coded += u1_num_mbs;
ps_dec->u1_mb_idx = 0;
ps_dec->u4_num_mbs_cur_nmb = 0;
}
if(ps_dec->u2_total_mbs_coded
>= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
ps_dec->u1_pic_decode_done = 1;
return 0;
}
ps_dec->u2_cur_slice_num++;
ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx;
ps_dec->i2_prev_slice_mby = ps_dec->u2_mby;
ps_dec->ps_parse_cur_slice++;
}
else
{
ps_dec->ps_parse_cur_slice = ps_dec->ps_dec_slice_buf
+ ps_dec->u2_cur_slice_num;
}
}
/******************************************************/
/* Initializations to new slice */
/******************************************************/
{
WORD32 num_entries;
WORD32 size;
UWORD8 *pu1_buf;
num_entries = MIN(MAX_FRAMES, ps_dec->u4_num_ref_frames_at_init);
num_entries = 2 * ((2 * num_entries) + 1);
size = num_entries * sizeof(void *);
size += PAD_MAP_IDX_POC * sizeof(void *);
pu1_buf = (UWORD8 *)ps_dec->pv_map_ref_idx_to_poc_buf;
pu1_buf += size * ps_dec->u2_cur_slice_num;
ps_dec->ps_parse_cur_slice->ppv_map_ref_idx_to_poc = (volatile void **)pu1_buf;
}
ps_dec->ps_cur_slice->u2_first_mb_in_slice = ps_dec->u2_total_mbs_coded << u1_mbaff;
if(ps_dec->ps_cur_slice->u1_field_pic_flag)
ps_dec->u2_prv_frame_num = ps_dec->ps_cur_slice->u2_frame_num;
ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice = ps_dec->u2_total_mbs_coded << u1_mbaff;
ps_dec->ps_parse_cur_slice->u2_log2Y_crwd = ps_dec->ps_cur_slice->u2_log2Y_crwd;
if(ps_dec->u1_separate_parse)
{
ps_dec->ps_parse_cur_slice->pv_tu_coeff_data_start = ps_dec->pv_parse_tu_coeff_data;
}
else
{
ps_dec->pv_proc_tu_coeff_data = ps_dec->pv_parse_tu_coeff_data;
}
/******************************************************/
/* Initializations specific to P slice */
/******************************************************/
u1_inter_mb_type = P_MB;
u1_deblk_mb_type = D_INTER_MB;
ps_dec->ps_cur_slice->u1_slice_type = P_SLICE;
ps_dec->ps_parse_cur_slice->slice_type = P_SLICE;
ps_dec->pf_mvpred_ref_tfr_nby2mb = ih264d_mv_pred_ref_tfr_nby2_pmb;
ps_dec->ps_part = ps_dec->ps_parse_part_params;
/******************************************************/
/* Parsing / decoding the slice */
/******************************************************/
ps_dec->u1_slice_header_done = 2;
ps_dec->u1_qp = ps_slice->u1_slice_qp;
ih264d_update_qp(ps_dec, 0);
u1_mb_idx = ps_dec->u1_mb_idx;
ps_parse_mb_data = ps_dec->ps_parse_mb_data;
u1_num_mbs = u1_mb_idx;
u1_slice_end = 0;
u1_tfr_n_mb = 0;
u1_decode_nmb = 0;
u1_num_mbsNby2 = 0;
i2_cur_mb_addr = ps_dec->u2_total_mbs_coded;
i2_mb_skip_run = num_mb_skip;
while(!u1_slice_end)
{
UWORD8 u1_mb_type;
if(i2_cur_mb_addr > ps_dec->ps_cur_sps->u2_max_mb_addr)
break;
ps_cur_mb_info = ps_dec->ps_nmb_info + u1_num_mbs;
ps_dec->u4_num_mbs_cur_nmb = u1_num_mbs;
ps_cur_mb_info->u1_Mux = 0;
ps_dec->u4_num_pmbair = (u1_num_mbs >> u1_mbaff);
ps_cur_deblk_mb = ps_dec->ps_deblk_mbn + u1_num_mbs;
ps_cur_mb_info->u1_end_of_slice = 0;
/* Storing Default partition info */
ps_parse_mb_data->u1_num_part = 1;
ps_parse_mb_data->u1_isI_mb = 0;
/**************************************************************/
/* Get the required information for decoding of MB */
/**************************************************************/
/* mb_x, mb_y, neighbor availablity, */
if (u1_mbaff)
ih264d_get_mb_info_cavlc_mbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run);
else
ih264d_get_mb_info_cavlc_nonmbaff(ps_dec, i2_cur_mb_addr, ps_cur_mb_info, i2_mb_skip_run);
/* Set the deblocking parameters for this MB */
if(ps_dec->u4_app_disable_deblk_frm == 0)
{
ih264d_set_deblocking_parameters(ps_cur_deblk_mb, ps_slice,
ps_dec->u1_mb_ngbr_availablity,
ps_dec->u1_cur_mb_fld_dec_flag);
}
/* Set appropriate flags in ps_cur_mb_info and ps_dec */
ps_dec->i1_prev_mb_qp_delta = 0;
ps_dec->u1_sub_mb_num = 0;
ps_cur_mb_info->u1_mb_type = MB_SKIP;
ps_cur_mb_info->u1_mb_mc_mode = PRED_16x16;
ps_cur_mb_info->u1_cbp = 0;
/* Storing Skip partition info */
ps_part_info = ps_dec->ps_part;
ps_part_info->u1_is_direct = PART_DIRECT_16x16;
ps_part_info->u1_sub_mb_num = 0;
ps_dec->ps_part++;
/* Update Nnzs */
ih264d_update_nnz_for_skipmb(ps_dec, ps_cur_mb_info, CAVLC);
ps_cur_mb_info->ps_curmb->u1_mb_type = u1_inter_mb_type;
ps_cur_deblk_mb->u1_mb_type |= u1_deblk_mb_type;
i2_mb_skip_run--;
ps_cur_deblk_mb->u1_mb_qp = ps_dec->u1_qp;
if (u1_mbaff)
{
ih264d_update_mbaff_left_nnz(ps_dec, ps_cur_mb_info);
}
/**************************************************************/
/* Get next Macroblock address */
/**************************************************************/
i2_cur_mb_addr++;
u1_num_mbs++;
u1_num_mbsNby2++;
ps_parse_mb_data++;
/****************************************************************/
/* Check for End Of Row and other flags that determine when to */
/* do DMA setup for N/2-Mb, Decode for N-Mb, and Transfer for */
/* N-Mb */
/****************************************************************/
u1_num_mbs_next = i2_pic_wdin_mbs - ps_dec->u2_mbx - 1;
u1_end_of_row = (!u1_num_mbs_next) && (!(u1_mbaff && (u1_num_mbs & 0x01)));
u1_slice_end = !i2_mb_skip_run;
u1_tfr_n_mb = (u1_num_mbs == ps_dec->u1_recon_mb_grp) || u1_end_of_row
|| u1_slice_end;
u1_decode_nmb = u1_tfr_n_mb || u1_slice_end;
ps_cur_mb_info->u1_end_of_slice = u1_slice_end;
if(u1_decode_nmb)
{
ps_dec->pf_mvpred_ref_tfr_nby2mb(ps_dec, u1_mb_idx, u1_num_mbs);
u1_num_mbsNby2 = 0;
ps_parse_mb_data = ps_dec->ps_parse_mb_data;
ps_dec->ps_part = ps_dec->ps_parse_part_params;
if(ps_dec->u1_separate_parse)
{
ih264d_parse_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs,
u1_num_mbs_next, u1_tfr_n_mb, u1_end_of_row);
ps_dec->ps_nmb_info += u1_num_mbs;
}
else
{
ih264d_decode_recon_tfr_nmb(ps_dec, u1_mb_idx, u1_num_mbs, u1_num_mbs_next,
u1_tfr_n_mb, u1_end_of_row);
}
ps_dec->u2_total_mbs_coded += u1_num_mbs;
if(u1_tfr_n_mb)
u1_num_mbs = 0;
u1_mb_idx = u1_num_mbs;
ps_dec->u1_mb_idx = u1_num_mbs;
}
}
ps_dec->u4_num_mbs_cur_nmb = 0;
ps_dec->ps_cur_slice->u4_mbs_in_slice = i2_cur_mb_addr
- ps_dec->ps_parse_cur_slice->u4_first_mb_in_slice;
H264_DEC_DEBUG_PRINT("Mbs in slice: %d\n", ps_dec->ps_cur_slice->u4_mbs_in_slice);
ps_dec->u2_cur_slice_num++;
/* incremented here only if first slice is inserted */
if(ps_dec->u4_first_slice_in_pic != 0)
ps_dec->ps_parse_cur_slice++;
ps_dec->i2_prev_slice_mbx = ps_dec->u2_mbx;
ps_dec->i2_prev_slice_mby = ps_dec->u2_mby;
if(ps_dec->u2_total_mbs_coded
>= ps_dec->u2_frm_ht_in_mbs * ps_dec->u2_frm_wd_in_mbs)
{
ps_dec->u1_pic_decode_done = 1;
}
return 0;
}
| 173,955 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void rpng2_x_redisplay_image(ulg startcol, ulg startrow,
ulg width, ulg height)
{
uch bg_red = rpng2_info.bg_red;
uch bg_green = rpng2_info.bg_green;
uch bg_blue = rpng2_info.bg_blue;
uch *src, *src2=NULL;
char *dest;
uch r, g, b, a;
ulg i, row, lastrow = 0;
ulg pixel;
int ximage_rowbytes = ximage->bytes_per_line;
Trace((stderr, "beginning display loop (image_channels == %d)\n",
rpng2_info.channels))
Trace((stderr, " (width = %ld, rowbytes = %d, ximage_rowbytes = %d)\n",
rpng2_info.width, rpng2_info.rowbytes, ximage_rowbytes))
Trace((stderr, " (bpp = %d)\n", ximage->bits_per_pixel))
Trace((stderr, " (byte_order = %s)\n", ximage->byte_order == MSBFirst?
"MSBFirst" : (ximage->byte_order == LSBFirst? "LSBFirst" : "unknown")))
/*---------------------------------------------------------------------------
Aside from the use of the rpng2_info struct and of src2 (for background
image), this routine is identical to rpng_x_display_image() in the non-
progressive version of the program--for the simple reason that redisplay
of the image against a new background happens after the image is fully
decoded and therefore is, by definition, non-progressive.
---------------------------------------------------------------------------*/
if (depth == 24 || depth == 32) {
ulg red, green, blue;
int bpp = ximage->bits_per_pixel;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.image_data + row*rpng2_info.rowbytes;
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = *src++;
green = *src++;
blue = *src++;
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = r;
green = g;
blue = b;
} else if (a == 0) {
red = bg_red;
green = bg_green;
blue = bg_blue;
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result into the
* first argument */
alpha_composite(red, r, a, bg_red);
alpha_composite(green, g, a, bg_green);
alpha_composite(blue, b, a, bg_blue);
}
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else if (depth == 16) {
ush red, green, blue;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.row_pointers[row];
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = ((ush)(*src) << 8);
++src;
green = ((ush)(*src) << 8);
++src;
blue = ((ush)(*src) << 8);
++src;
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
} else if (a == 0) {
red = ((ush)bg_red << 8);
green = ((ush)bg_green << 8);
blue = ((ush)bg_blue << 8);
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result back into
* the first argument (== fg byte here: safe) */
alpha_composite(r, r, a, bg_red);
alpha_composite(g, g, a, bg_green);
alpha_composite(b, b, a, bg_blue);
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
}
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else /* depth == 8 */ {
/* GRR: add 8-bit support */
}
Trace((stderr, "calling final XPutImage()\n"))
if (lastrow < startrow+height) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, rpng2_info.height-lastrow);
XFlush(display);
}
} /* end function rpng2_x_redisplay_image() */
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | static void rpng2_x_redisplay_image(ulg startcol, ulg startrow,
ulg width, ulg height)
{
uch bg_red = rpng2_info.bg_red;
uch bg_green = rpng2_info.bg_green;
uch bg_blue = rpng2_info.bg_blue;
uch *src, *src2=NULL;
char *dest;
uch r, g, b, a;
ulg i, row, lastrow = 0;
ulg pixel;
int ximage_rowbytes = ximage->bytes_per_line;
Trace((stderr, "beginning display loop (image_channels == %d)\n",
rpng2_info.channels))
Trace((stderr, " (width = %ld, rowbytes = %d, ximage_rowbytes = %d)\n",
rpng2_info.width, rpng2_info.rowbytes, ximage_rowbytes))
Trace((stderr, " (bpp = %d)\n", ximage->bits_per_pixel))
Trace((stderr, " (byte_order = %s)\n", ximage->byte_order == MSBFirst?
"MSBFirst" : (ximage->byte_order == LSBFirst? "LSBFirst" : "unknown")))
/*---------------------------------------------------------------------------
Aside from the use of the rpng2_info struct and of src2 (for background
image), this routine is identical to rpng_x_display_image() in the non-
progressive version of the program--for the simple reason that redisplay
of the image against a new background happens after the image is fully
decoded and therefore is, by definition, non-progressive.
---------------------------------------------------------------------------*/
if (depth == 24 || depth == 32) {
ulg red, green, blue;
int bpp = ximage->bits_per_pixel;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.image_data + row*rpng2_info.rowbytes;
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = *src++;
green = *src++;
blue = *src++;
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = r;
green = g;
blue = b;
} else if (a == 0) {
red = bg_red;
green = bg_green;
blue = bg_blue;
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result into the
* first argument */
alpha_composite(red, r, a, bg_red);
alpha_composite(green, g, a, bg_green);
alpha_composite(blue, b, a, bg_blue);
}
#ifdef NO_24BIT_MASKS
pixel = (red << RShift) |
(green << GShift) |
(blue << BShift);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift, RMask, etc.) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#else
red = (RShift < 0)? red << (-RShift) : red >> RShift;
green = (GShift < 0)? green << (-GShift) : green >> GShift;
blue = (BShift < 0)? blue << (-BShift) : blue >> BShift;
pixel = (red & RMask) | (green & GMask) | (blue & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
if (bpp == 32) {
*dest++ = (char)((pixel >> 24) & 0xff);
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
} else {
/* GRR BUG */
/* this assumes bpp == 24 & bits are packed low */
/* (probably need to use RShift/RMask/etc. here, too) */
*dest++ = (char)((pixel >> 16) & 0xff);
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
#endif
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else if (depth == 16) {
ush red, green, blue;
for (lastrow = row = startrow; row < startrow+height; ++row) {
src = rpng2_info.row_pointers[row];
if (bg_image)
src2 = bg_data + row*bg_rowbytes;
dest = ximage->data + row*ximage_rowbytes;
if (rpng2_info.channels == 3) {
for (i = rpng2_info.width; i > 0; --i) {
red = ((ush)(*src) << 8);
++src;
green = ((ush)(*src) << 8);
++src;
blue = ((ush)(*src) << 8);
++src;
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
} else /* if (rpng2_info.channels == 4) */ {
for (i = rpng2_info.width; i > 0; --i) {
r = *src++;
g = *src++;
b = *src++;
a = *src++;
if (bg_image) {
bg_red = *src2++;
bg_green = *src2++;
bg_blue = *src2++;
}
if (a == 255) {
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
} else if (a == 0) {
red = ((ush)bg_red << 8);
green = ((ush)bg_green << 8);
blue = ((ush)bg_blue << 8);
} else {
/* this macro (from png.h) composites the foreground
* and background values and puts the result back into
* the first argument (== fg byte here: safe) */
alpha_composite(r, r, a, bg_red);
alpha_composite(g, g, a, bg_green);
alpha_composite(b, b, a, bg_blue);
red = ((ush)r << 8);
green = ((ush)g << 8);
blue = ((ush)b << 8);
}
pixel = ((red >> RShift) & RMask) |
((green >> GShift) & GMask) |
((blue >> BShift) & BMask);
/* recall that we set ximage->byte_order = MSBFirst above */
*dest++ = (char)((pixel >> 8) & 0xff);
*dest++ = (char)( pixel & 0xff);
}
}
/* display after every 16 lines */
if (((row+1) & 0xf) == 0) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, 16);
XFlush(display);
lastrow = row + 1;
}
}
} else /* depth == 8 */ {
/* GRR: add 8-bit support */
}
Trace((stderr, "calling final XPutImage()\n"))
if (lastrow < startrow+height) {
XPutImage(display, window, gc, ximage, 0, (int)lastrow, 0,
(int)lastrow, rpng2_info.width, rpng2_info.height-lastrow);
XFlush(display);
}
(void)startcol;
(void)width;
} /* end function rpng2_x_redisplay_image() */
| 173,575 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: Cluster::~Cluster()
{
if (m_entries_count <= 0)
return;
BlockEntry** i = m_entries;
BlockEntry** const j = m_entries + m_entries_count;
while (i != j)
{
BlockEntry* p = *i++;
assert(p);
delete p;
}
delete[] m_entries;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | Cluster::~Cluster()
if (status < 0) { // error or underflow
len = 1;
return status;
}
++pos; // consume flags byte
assert(pos <= avail);
if (pos >= block_stop)
return E_FILE_FORMAT_INVALID;
const int lacing = int(flags & 0x06) >> 1;
if ((lacing != 0) && (block_stop > avail)) {
len = static_cast<long>(block_stop - pos);
return E_BUFFER_NOT_FULL;
}
pos = block_stop; // consume block-part of block group
assert(pos <= payload_stop);
}
assert(pos == payload_stop);
status = CreateBlock(0x20, // BlockGroup ID
payload_start, payload_size, discard_padding);
if (status != 0)
return status;
m_pos = payload_stop;
return 0; // success
}
| 174,458 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, true,
__builtin_return_address(0));
}
Commit Message: ARM: dma-mapping: don't allow DMA mappings to be marked executable
DMA mapping permissions were being derived from pgprot_kernel directly
without using PAGE_KERNEL. This causes them to be marked with executable
permission, which is not what we want. Fix this.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
CWE ID: CWE-264 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, true,
__builtin_return_address(0));
}
| 167,577 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static JSValueRef updateTouchPointCallback(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
{
if (argumentCount < 3)
return JSValueMakeUndefined(context);
int index = static_cast<int>(JSValueToNumber(context, arguments[0], exception));
ASSERT(!exception || !*exception);
int x = static_cast<int>(JSValueToNumber(context, arguments[1], exception));
ASSERT(!exception || !*exception);
int y = static_cast<int>(JSValueToNumber(context, arguments[2], exception));
ASSERT(!exception || !*exception);
if (index < 0 || index >= (int)touches.size())
return JSValueMakeUndefined(context);
BlackBerry::Platform::TouchPoint& touch = touches[index];
IntPoint pos(x, y);
touch.m_pos = pos;
touch.m_screenPos = pos;
touch.m_state = BlackBerry::Platform::TouchPoint::TouchMoved;
return JSValueMakeUndefined(context);
}
Commit Message: [BlackBerry] Adapt to new BlackBerry::Platform::TouchPoint API
https://bugs.webkit.org/show_bug.cgi?id=105143
RIM PR 171941
Reviewed by Rob Buis.
Internally reviewed by George Staikos.
Source/WebCore:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit.
Also adapt to new method names and encapsulation of TouchPoint data
members.
No change in behavior, no new tests.
* platform/blackberry/PlatformTouchPointBlackBerry.cpp:
(WebCore::PlatformTouchPoint::PlatformTouchPoint):
Source/WebKit/blackberry:
TouchPoint instances now provide document coordinates for the viewport
and content position of the touch event. The pixel coordinates stored
in the TouchPoint should no longer be needed in WebKit. One exception
is when passing events to a full screen plugin.
Also adapt to new method names and encapsulation of TouchPoint data
members.
* Api/WebPage.cpp:
(BlackBerry::WebKit::WebPage::touchEvent):
(BlackBerry::WebKit::WebPage::touchPointAsMouseEvent):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchEventToFullScreenPlugin):
(BlackBerry::WebKit::WebPagePrivate::dispatchTouchPointAsMouseEventToFullScreenPlugin):
* WebKitSupport/InputHandler.cpp:
(BlackBerry::WebKit::InputHandler::shouldRequestSpellCheckingOptionsForPoint):
* WebKitSupport/InputHandler.h:
(InputHandler):
* WebKitSupport/TouchEventHandler.cpp:
(BlackBerry::WebKit::TouchEventHandler::doFatFingers):
(BlackBerry::WebKit::TouchEventHandler::handleTouchPoint):
* WebKitSupport/TouchEventHandler.h:
(TouchEventHandler):
Tools:
Adapt to new method names and encapsulation of TouchPoint data members.
* DumpRenderTree/blackberry/EventSender.cpp:
(addTouchPointCallback):
(updateTouchPointCallback):
(touchEndCallback):
(releaseTouchPointCallback):
(sendTouchEvent):
git-svn-id: svn://svn.chromium.org/blink/trunk@137880 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | static JSValueRef updateTouchPointCallback(JSContextRef context, JSObjectRef function, JSObjectRef thisObject, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception)
{
if (argumentCount < 3)
return JSValueMakeUndefined(context);
int index = static_cast<int>(JSValueToNumber(context, arguments[0], exception));
ASSERT(!exception || !*exception);
int x = static_cast<int>(JSValueToNumber(context, arguments[1], exception));
ASSERT(!exception || !*exception);
int y = static_cast<int>(JSValueToNumber(context, arguments[2], exception));
ASSERT(!exception || !*exception);
if (index < 0 || index >= (int)touches.size())
return JSValueMakeUndefined(context);
BlackBerry::Platform::TouchPoint& touch = touches[index];
// pixelViewportPosition is unused in the WebKit layer
IntPoint pos(x, y);
// Unfortunately we don't know the scroll position at this point, so use pos for the content position too.
// This assumes scroll position is 0,0
touch.populateDocumentPosition(pos, pos);
touch.setScreenPosition(pos);
touch.updateState(BlackBerry::Platform::TouchPoint::TouchMoved);
return JSValueMakeUndefined(context);
}
| 170,775 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void dvb_usbv2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
const char *name = d->name;
struct device dev = d->udev->dev;
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
if (d->props->exit)
d->props->exit(d);
dvb_usbv2_exit(d);
dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
KBUILD_MODNAME, name);
}
Commit Message: [media] dvb-usb-v2: avoid use-after-free
I ran into a stack frame size warning because of the on-stack copy of
the USB device structure:
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c: In function 'dvb_usbv2_disconnect':
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c:1029:1: error: the frame size of 1104 bytes is larger than 1024 bytes [-Werror=frame-larger-than=]
Copying a device structure like this is wrong for a number of other reasons
too aside from the possible stack overflow. One of them is that the
dev_info() call will print the name of the device later, but AFAICT
we have only copied a pointer to the name earlier and the actual name
has been freed by the time it gets printed.
This removes the on-stack copy of the device and instead copies the
device name using kstrdup(). I'm ignoring the possible failure here
as both printk() and kfree() are able to deal with NULL pointers.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
CWE ID: CWE-119 | void dvb_usbv2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
const char *drvname = d->name;
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
if (d->props->exit)
d->props->exit(d);
dvb_usbv2_exit(d);
pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
KBUILD_MODNAME, drvname, devname);
kfree(devname);
}
| 168,222 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void CNB::SetupLSO(virtio_net_hdr_basic *VirtioHeader, PVOID IpHeader, ULONG EthPayloadLength) const
{
PopulateIPLength(reinterpret_cast<IPv4Header*>(IpHeader), static_cast<USHORT>(EthPayloadLength));
tTcpIpPacketParsingResult packetReview;
packetReview = ParaNdis_CheckSumVerifyFlat(reinterpret_cast<IPv4Header*>(IpHeader), EthPayloadLength,
pcrIpChecksum | pcrFixIPChecksum | pcrTcpChecksum | pcrFixPHChecksum,
__FUNCTION__);
if (packetReview.xxpCheckSum == ppresPCSOK || packetReview.fixedXxpCS)
{
auto IpHeaderOffset = m_Context->Offload.ipHeaderOffset;
auto VHeader = static_cast<virtio_net_hdr_basic*>(VirtioHeader);
auto PriorityHdrLen = (m_ParentNBL->TCI() != 0) ? ETH_PRIORITY_HEADER_SIZE : 0;
VHeader->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
VHeader->gso_type = packetReview.ipStatus == ppresIPV4 ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6;
VHeader->hdr_len = (USHORT)(packetReview.XxpIpHeaderSize + IpHeaderOffset + PriorityHdrLen);
VHeader->gso_size = (USHORT)m_ParentNBL->MSS();
VHeader->csum_start = (USHORT)(m_ParentNBL->TCPHeaderOffset() + PriorityHdrLen);
VHeader->csum_offset = TCP_CHECKSUM_OFFSET;
}
}
Commit Message: NetKVM: BZ#1169718: Checking the length only on read
Signed-off-by: Joseph Hindin <yhindin@rehat.com>
CWE ID: CWE-20 | void CNB::SetupLSO(virtio_net_hdr_basic *VirtioHeader, PVOID IpHeader, ULONG EthPayloadLength) const
{
PopulateIPLength(reinterpret_cast<IPv4Header*>(IpHeader), static_cast<USHORT>(EthPayloadLength));
tTcpIpPacketParsingResult packetReview;
packetReview = ParaNdis_CheckSumVerifyFlat(reinterpret_cast<IPv4Header*>(IpHeader), EthPayloadLength,
pcrIpChecksum | pcrFixIPChecksum | pcrTcpChecksum | pcrFixPHChecksum,
FALSE,
__FUNCTION__);
if (packetReview.xxpCheckSum == ppresPCSOK || packetReview.fixedXxpCS)
{
auto IpHeaderOffset = m_Context->Offload.ipHeaderOffset;
auto VHeader = static_cast<virtio_net_hdr_basic*>(VirtioHeader);
auto PriorityHdrLen = (m_ParentNBL->TCI() != 0) ? ETH_PRIORITY_HEADER_SIZE : 0;
VHeader->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
VHeader->gso_type = packetReview.ipStatus == ppresIPV4 ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6;
VHeader->hdr_len = (USHORT)(packetReview.XxpIpHeaderSize + IpHeaderOffset + PriorityHdrLen);
VHeader->gso_size = (USHORT)m_ParentNBL->MSS();
VHeader->csum_start = (USHORT)(m_ParentNBL->TCPHeaderOffset() + PriorityHdrLen);
VHeader->csum_offset = TCP_CHECKSUM_OFFSET;
}
}
| 170,142 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void GpuMessageFilter::EstablishChannelCallback(
IPC::Message* reply,
const IPC::ChannelHandle& channel,
base::ProcessHandle gpu_process_for_browser,
const content::GPUInfo& gpu_info) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
base::ProcessHandle renderer_process_for_gpu;
if (gpu_process_for_browser != 0) {
#if defined(OS_WIN)
DuplicateHandle(base::GetCurrentProcessHandle(),
peer_handle(),
gpu_process_for_browser,
&renderer_process_for_gpu,
PROCESS_DUP_HANDLE,
FALSE,
0);
#else
renderer_process_for_gpu = peer_handle();
#endif
} else {
renderer_process_for_gpu = 0;
}
GpuHostMsg_EstablishGpuChannel::WriteReplyParams(
reply, render_process_id_, channel, renderer_process_for_gpu, gpu_info);
Send(reply);
}
Commit Message: Convert plugin and GPU process to brokered handle duplication.
BUG=119250
Review URL: https://chromiumcodereview.appspot.com/9958034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@132303 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void GpuMessageFilter::EstablishChannelCallback(
IPC::Message* reply,
const IPC::ChannelHandle& channel,
const content::GPUInfo& gpu_info) {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::IO));
GpuHostMsg_EstablishGpuChannel::WriteReplyParams(
reply, render_process_id_, channel, gpu_info);
Send(reply);
}
| 170,925 |