func
stringlengths 0
484k
| target
int64 0
1
| cwe
sequence | project
stringlengths 2
29
| commit_id
stringlengths 40
40
| hash
float64 1,215,700,430,453,689,100,000,000B
340,281,914,521,452,260,000,000,000,000B
| size
int64 1
24k
| message
stringlengths 0
13.3k
|
---|---|---|---|---|---|---|---|
main( int argc,
char* argv[] )
{
int old_ptsize, orig_ptsize, file;
int first_glyph = 0;
int XisSetup = 0;
char* execname;
int option;
int file_loaded;
grEvent event;
execname = ft_basename( argv[0] );
while ( 1 )
{
option = getopt( argc, argv, "d:e:f:r:" );
if ( option == -1 )
break;
switch ( option )
{
case 'd':
parse_design_coords( optarg );
break;
case 'e':
encoding = (FT_Encoding)make_tag( optarg );
break;
case 'f':
first_glyph = atoi( optarg );
break;
case 'r':
res = atoi( optarg );
if ( res < 1 )
usage( execname );
break;
default:
usage( execname );
break;
}
}
argc -= optind;
argv += optind;
if ( argc <= 1 )
usage( execname );
if ( sscanf( argv[0], "%d", &orig_ptsize ) != 1 )
orig_ptsize = 64;
file = 1;
/* Initialize engine */
error = FT_Init_FreeType( &library );
if ( error )
PanicZ( "Could not initialize FreeType library" );
NewFile:
ptsize = orig_ptsize;
hinted = 1;
file_loaded = 0;
/* Load face */
error = FT_New_Face( library, argv[file], 0, &face );
if ( error )
goto Display_Font;
if ( encoding != FT_ENCODING_NONE )
{
error = FT_Select_Charmap( face, encoding );
if ( error )
goto Display_Font;
}
/* retrieve multiple master information */
error = FT_Get_MM_Var( face, &multimaster );
if ( error )
goto Display_Font;
/* if the user specified a position, use it, otherwise */
/* set the current position to the median of each axis */
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
design_pos[n] = n < requested_cnt ? requested_pos[n]
: multimaster->axis[n].def;
if ( design_pos[n] < multimaster->axis[n].minimum )
design_pos[n] = multimaster->axis[n].minimum;
else if ( design_pos[n] > multimaster->axis[n].maximum )
design_pos[n] = multimaster->axis[n].maximum;
}
}
error = FT_Set_Var_Design_Coordinates( face,
multimaster->num_axis,
design_pos );
if ( error )
goto Display_Font;
file_loaded++;
Reset_Scale( ptsize );
num_glyphs = face->num_glyphs;
glyph = face->glyph;
size = face->size;
Display_Font:
/* initialize graphics if needed */
if ( !XisSetup )
{
XisSetup = 1;
Init_Display();
}
grSetTitle( surface, "FreeType Glyph Viewer - press F1 for help" );
old_ptsize = ptsize;
if ( file_loaded >= 1 )
{
Fail = 0;
Num = first_glyph;
if ( Num >= num_glyphs )
Num = num_glyphs - 1;
if ( Num < 0 )
Num = 0;
}
for ( ;; )
{
int key;
Clear_Display();
if ( file_loaded >= 1 )
{
switch ( render_mode )
{
case 0:
Render_Text( Num );
break;
default:
Render_All( Num, ptsize );
}
sprintf( Header, "%.50s %.50s (file %.100s)",
face->family_name,
face->style_name,
ft_basename( argv[file] ) );
if ( !new_header )
new_header = Header;
grWriteCellString( &bit, 0, 0, new_header, fore_color );
new_header = 0;
sprintf( Header, "axis: " );
{
int n;
for ( n = 0; n < (int)multimaster->num_axis; n++ )
{
char temp[32];
sprintf( temp, " %s:%g",
multimaster->axis[n].name,
design_pos[n]/65536. );
strcat( Header, temp );
}
}
grWriteCellString( &bit, 0, 16, Header, fore_color );
sprintf( Header, "at %d points, first glyph = %d",
ptsize,
Num );
}
else
{
sprintf( Header, "%.100s: not an MM font file, or could not be opened",
ft_basename( argv[file] ) );
}
grWriteCellString( &bit, 0, 8, Header, fore_color );
grRefreshSurface( surface );
grListenSurface( surface, 0, &event );
if ( !( key = Process_Event( &event ) ) )
goto End;
if ( key == 'n' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file < argc - 1 )
file++;
goto NewFile;
}
if ( key == 'p' )
{
if ( file_loaded >= 1 )
FT_Done_Face( face );
if ( file > 1 )
file--;
goto NewFile;
}
if ( ptsize != old_ptsize )
{
Reset_Scale( ptsize );
old_ptsize = ptsize;
}
}
End:
grDoneSurface( surface );
grDoneDevices();
free ( multimaster );
FT_Done_Face ( face );
FT_Done_FreeType( library );
printf( "Execution completed successfully.\n" );
printf( "Fails = %d\n", Fail );
exit( 0 ); /* for safety reasons */
return 0; /* never reached */
} | 1 | [
"CWE-120"
] | freetype2-demos | 3636982a7666bcfa0e47fb31d565314d1b3e7d78 | 68,758,681,229,457,960,000,000,000,000,000,000,000 | 249 | ftmulti: Fix possible buffer overflow.
Based on a patch from Marek Kašík <mkasik@redhat.com>.
* src/ftmulti.c (main): Limit axis name length in `Header'. |
long keyctl_session_to_parent(void)
{
#ifdef TIF_NOTIFY_RESUME
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
struct cred *cred, *oldcred;
key_ref_t keyring_r;
int ret;
keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
if (IS_ERR(keyring_r))
return PTR_ERR(keyring_r);
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
ret = -ENOMEM;
cred = cred_alloc_blank();
if (!cred)
goto error_keyring;
cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
keyring_r = NULL;
me = current;
write_lock_irq(&tasklist_lock);
parent = me->real_parent;
ret = -EPERM;
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
goto not_permitted;
/* the parent must be single threaded */
if (!thread_group_empty(parent))
goto not_permitted;
/* the parent and the child must have different session keyrings or
* there's no point */
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
goto already_same;
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
if (pcred->uid != mycred->euid ||
pcred->euid != mycred->euid ||
pcred->suid != mycred->euid ||
pcred->gid != mycred->egid ||
pcred->egid != mycred->egid ||
pcred->sgid != mycred->egid)
goto not_permitted;
/* the keyrings must have the same UID */
if (pcred->tgcred->session_keyring->uid != mycred->euid ||
mycred->tgcred->session_keyring->uid != mycred->euid)
goto not_permitted;
/* if there's an already pending keyring replacement, then we replace
* that */
oldcred = parent->replacement_session_keyring;
/* the replacement session keyring is applied just prior to userspace
* restarting */
parent->replacement_session_keyring = cred;
cred = NULL;
set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
write_unlock_irq(&tasklist_lock);
if (oldcred)
put_cred(oldcred);
return 0;
already_same:
ret = 0;
not_permitted:
write_unlock_irq(&tasklist_lock);
put_cred(cred);
return ret;
error_keyring:
key_ref_put(keyring_r);
return ret;
#else /* !TIF_NOTIFY_RESUME */
/*
* To be removed when TIF_NOTIFY_RESUME has been implemented on
* m68k/xtensa
*/
#warning TIF_NOTIFY_RESUME not implemented
return -EOPNOTSUPP;
#endif /* !TIF_NOTIFY_RESUME */
} | 1 | [
"CWE-476"
] | linux-2.6 | 9d1ac65a9698513d00e5608d93fca0c53f536c14 | 212,709,001,616,641,300,000,000,000,000,000,000,000 | 96 | KEYS: Fix RCU no-lock warning in keyctl_session_to_parent()
There's an protected access to the parent process's credentials in the middle
of keyctl_session_to_parent(). This results in the following RCU warning:
===================================================
[ INFO: suspicious rcu_dereference_check() usage. ]
---------------------------------------------------
security/keys/keyctl.c:1291 invoked rcu_dereference_check() without protection!
other info that might help us debug this:
rcu_scheduler_active = 1, debug_locks = 0
1 lock held by keyctl-session-/2137:
#0: (tasklist_lock){.+.+..}, at: [<ffffffff811ae2ec>] keyctl_session_to_parent+0x60/0x236
stack backtrace:
Pid: 2137, comm: keyctl-session- Not tainted 2.6.36-rc2-cachefs+ #1
Call Trace:
[<ffffffff8105606a>] lockdep_rcu_dereference+0xaa/0xb3
[<ffffffff811ae379>] keyctl_session_to_parent+0xed/0x236
[<ffffffff811af77e>] sys_keyctl+0xb4/0xb6
[<ffffffff81001eab>] system_call_fastpath+0x16/0x1b
The code should take the RCU read lock to make sure the parents credentials
don't go away, even though it's holding a spinlock and has IRQ disabled.
Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
long keyctl_session_to_parent(void)
{
#ifdef TIF_NOTIFY_RESUME
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
struct cred *cred, *oldcred;
key_ref_t keyring_r;
int ret;
keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
if (IS_ERR(keyring_r))
return PTR_ERR(keyring_r);
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
ret = -ENOMEM;
cred = cred_alloc_blank();
if (!cred)
goto error_keyring;
cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
keyring_r = NULL;
me = current;
rcu_read_lock();
write_lock_irq(&tasklist_lock);
parent = me->real_parent;
ret = -EPERM;
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
goto not_permitted;
/* the parent must be single threaded */
if (!thread_group_empty(parent))
goto not_permitted;
/* the parent and the child must have different session keyrings or
* there's no point */
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
goto already_same;
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
if (pcred->uid != mycred->euid ||
pcred->euid != mycred->euid ||
pcred->suid != mycred->euid ||
pcred->gid != mycred->egid ||
pcred->egid != mycred->egid ||
pcred->sgid != mycred->egid)
goto not_permitted;
/* the keyrings must have the same UID */
if (pcred->tgcred->session_keyring->uid != mycred->euid ||
mycred->tgcred->session_keyring->uid != mycred->euid)
goto not_permitted;
/* if there's an already pending keyring replacement, then we replace
* that */
oldcred = parent->replacement_session_keyring;
/* the replacement session keyring is applied just prior to userspace
* restarting */
parent->replacement_session_keyring = cred;
cred = NULL;
set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
if (oldcred)
put_cred(oldcred);
return 0;
already_same:
ret = 0;
not_permitted:
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
put_cred(cred);
return ret;
error_keyring:
key_ref_put(keyring_r);
return ret;
#else /* !TIF_NOTIFY_RESUME */
/*
* To be removed when TIF_NOTIFY_RESUME has been implemented on
* m68k/xtensa
*/
#warning TIF_NOTIFY_RESUME not implemented
return -EOPNOTSUPP;
#endif /* !TIF_NOTIFY_RESUME */
} | 1 | [
"CWE-476"
] | linux-2.6 | 3d96406c7da1ed5811ea52a3b0905f4f0e295376 | 32,494,453,156,495,134,000,000,000,000,000,000,000 | 99 | KEYS: Fix bug in keyctl_session_to_parent() if parent has no session keyring
Fix a bug in keyctl_session_to_parent() whereby it tries to check the ownership
of the parent process's session keyring whether or not the parent has a session
keyring [CVE-2010-2960].
This results in the following oops:
BUG: unable to handle kernel NULL pointer dereference at 00000000000000a0
IP: [<ffffffff811ae4dd>] keyctl_session_to_parent+0x251/0x443
...
Call Trace:
[<ffffffff811ae2f3>] ? keyctl_session_to_parent+0x67/0x443
[<ffffffff8109d286>] ? __do_fault+0x24b/0x3d0
[<ffffffff811af98c>] sys_keyctl+0xb4/0xb8
[<ffffffff81001eab>] system_call_fastpath+0x16/0x1b
if the parent process has no session keyring.
If the system is using pam_keyinit then it mostly protected against this as all
processes derived from a login will have inherited the session keyring created
by pam_keyinit during the log in procedure.
To test this, pam_keyinit calls need to be commented out in /etc/pam.d/.
Reported-by: Tavis Ormandy <taviso@cmpxchg8b.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Tavis Ormandy <taviso@cmpxchg8b.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat)
{
struct kioctx *ctx;
long ret = 0;
int i;
struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
if (unlikely(nr < 0))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
return -EFAULT;
ctx = lookup_ioctx(ctx_id);
if (unlikely(!ctx)) {
pr_debug("EINVAL: io_submit: invalid context id\n");
return -EINVAL;
}
/*
* AKPM: should this return a partial result if some of the IOs were
* successfully submitted?
*/
for (i=0; i<nr; i++) {
struct iocb __user *user_iocb;
struct iocb tmp;
if (unlikely(__get_user(user_iocb, iocbpp + i))) {
ret = -EFAULT;
break;
}
if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
ret = -EFAULT;
break;
}
ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat);
if (ret)
break;
}
aio_batch_free(batch_hash);
put_ioctx(ctx);
return i ? i : ret;
} | 1 | [
"CWE-190"
] | linux-2.6 | 75e1c70fc31490ef8a373ea2a4bea2524099b478 | 116,941,780,762,916,300,000,000,000,000,000,000,000 | 47 | aio: check for multiplication overflow in do_io_submit
Tavis Ormandy pointed out that do_io_submit does not do proper bounds
checking on the passed-in iocb array:
if (unlikely(nr < 0))
return -EINVAL;
if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(iocbpp)))))
return -EFAULT; ^^^^^^^^^^^^^^^^^^
The attached patch checks for overflow, and if it is detected, the
number of iocbs submitted is scaled down to a number that will fit in
the long. This is an ok thing to do, as sys_io_submit is documented as
returning the number of iocbs submitted, so callers should handle a
return value of less than the 'nr' argument passed in.
Reported-by: Tavis Ormandy <taviso@cmpxchg8b.com>
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
snd_seq_oss_open(struct file *file, int level)
{
int i, rc;
struct seq_oss_devinfo *dp;
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
snd_printk(KERN_ERR "can't malloc device info\n");
return -ENOMEM;
}
debug_printk(("oss_open: dp = %p\n", dp));
dp->cseq = system_client;
dp->port = -1;
dp->queue = -1;
for (i = 0; i < SNDRV_SEQ_OSS_MAX_CLIENTS; i++) {
if (client_table[i] == NULL)
break;
}
dp->index = i;
if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
snd_printk(KERN_ERR "too many applications\n");
rc = -ENOMEM;
goto _error;
}
/* look up synth and midi devices */
snd_seq_oss_synth_setup(dp);
snd_seq_oss_midi_setup(dp);
if (dp->synth_opened == 0 && dp->max_mididev == 0) {
/* snd_printk(KERN_ERR "no device found\n"); */
rc = -ENODEV;
goto _error;
}
/* create port */
debug_printk(("create new port\n"));
rc = create_port(dp);
if (rc < 0) {
snd_printk(KERN_ERR "can't create port\n");
goto _error;
}
/* allocate queue */
debug_printk(("allocate queue\n"));
rc = alloc_seq_queue(dp);
if (rc < 0)
goto _error;
/* set address */
dp->addr.client = dp->cseq;
dp->addr.port = dp->port;
/*dp->addr.queue = dp->queue;*/
/*dp->addr.channel = 0;*/
dp->seq_mode = level;
/* set up file mode */
dp->file_mode = translate_mode(file);
/* initialize read queue */
debug_printk(("initialize read queue\n"));
if (is_read_mode(dp->file_mode)) {
dp->readq = snd_seq_oss_readq_new(dp, maxqlen);
if (!dp->readq) {
rc = -ENOMEM;
goto _error;
}
}
/* initialize write queue */
debug_printk(("initialize write queue\n"));
if (is_write_mode(dp->file_mode)) {
dp->writeq = snd_seq_oss_writeq_new(dp, maxqlen);
if (!dp->writeq) {
rc = -ENOMEM;
goto _error;
}
}
/* initialize timer */
debug_printk(("initialize timer\n"));
dp->timer = snd_seq_oss_timer_new(dp);
if (!dp->timer) {
snd_printk(KERN_ERR "can't alloc timer\n");
rc = -ENOMEM;
goto _error;
}
debug_printk(("timer initialized\n"));
/* set private data pointer */
file->private_data = dp;
/* set up for mode2 */
if (level == SNDRV_SEQ_OSS_MODE_MUSIC)
snd_seq_oss_synth_setup_midi(dp);
else if (is_read_mode(dp->file_mode))
snd_seq_oss_midi_open_all(dp, SNDRV_SEQ_OSS_FILE_READ);
client_table[dp->index] = dp;
num_clients++;
debug_printk(("open done\n"));
return 0;
_error:
snd_seq_oss_writeq_delete(dp->writeq);
snd_seq_oss_readq_delete(dp->readq);
snd_seq_oss_synth_cleanup(dp);
snd_seq_oss_midi_cleanup(dp);
delete_port(dp);
delete_seq_queue(dp->queue);
kfree(dp);
return rc;
} | 1 | [
"CWE-415"
] | linux-2.6 | 27f7ad53829f79e799a253285318bff79ece15bd | 58,584,039,742,201,940,000,000,000,000,000,000,000 | 119 | ALSA: seq/oss - Fix double-free at error path of snd_seq_oss_open()
The error handling in snd_seq_oss_open() has several bad codes that
do dereferecing released pointers and double-free of kmalloc'ed data.
The object dp is release in free_devinfo() that is called via
private_free callback. The rest shouldn't touch this object any more.
The patch changes delete_port() to call kfree() in any case, and gets
rid of unnecessary calls of destructors in snd_seq_oss_open().
Fixes CVE-2010-3080.
Reported-and-tested-by: Tavis Ormandy <taviso@cmpxchg8b.com>
Cc: <stable@kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de> |
delete_port(struct seq_oss_devinfo *dp)
{
if (dp->port < 0)
return 0;
debug_printk(("delete_port %i\n", dp->port));
return snd_seq_event_port_detach(dp->cseq, dp->port);
} | 1 | [
"CWE-415"
] | linux-2.6 | 27f7ad53829f79e799a253285318bff79ece15bd | 197,983,924,405,975,260,000,000,000,000,000,000,000 | 8 | ALSA: seq/oss - Fix double-free at error path of snd_seq_oss_open()
The error handling in snd_seq_oss_open() has several bad codes that
do dereferecing released pointers and double-free of kmalloc'ed data.
The object dp is release in free_devinfo() that is called via
private_free callback. The rest shouldn't touch this object any more.
The patch changes delete_port() to call kfree() in any case, and gets
rid of unnecessary calls of destructors in snd_seq_oss_open().
Fixes CVE-2010-3080.
Reported-and-tested-by: Tavis Ormandy <taviso@cmpxchg8b.com>
Cc: <stable@kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de> |
load_status_changed_cb (WebKitWebView *view,
GParamSpec *spec,
EphyEmbed *embed)
{
EphyEmbedPrivate *priv = embed->priv;
WebKitLoadStatus status = webkit_web_view_get_load_status (view);
if (status == WEBKIT_LOAD_COMMITTED) {
const gchar* uri;
EphyWebViewSecurityLevel security_level;
uri = webkit_web_view_get_uri (view);
ephy_embed_destroy_top_widgets (embed);
if (g_strcmp0 (uri, priv->loading_uri) != 0) {
g_free (priv->loading_uri);
priv->loading_uri = g_strdup (uri);
}
ephy_web_view_location_changed (EPHY_WEB_VIEW (view),
uri);
restore_zoom_level (embed, uri);
ephy_history_add_page (embed->priv->history,
uri,
FALSE,
FALSE);
/*
* FIXME: as a temporary workaround while soup lacks the needed
* security API, determine security level based on the existence of
* a 'https' prefix for the URI
*/
if (uri && g_str_has_prefix (uri, "https"))
security_level = EPHY_WEB_VIEW_STATE_IS_SECURE_HIGH;
else
security_level = EPHY_WEB_VIEW_STATE_IS_UNKNOWN;
ephy_web_view_set_security_level (EPHY_WEB_VIEW (view), security_level);
} else if (status == WEBKIT_LOAD_PROVISIONAL || status == WEBKIT_LOAD_FINISHED) {
EphyWebViewNetState estate = EPHY_WEB_VIEW_STATE_UNKNOWN;
char *loading_uri = NULL;
if (status == WEBKIT_LOAD_PROVISIONAL) {
WebKitWebFrame *frame;
WebKitWebDataSource *source;
WebKitNetworkRequest *request;
frame = webkit_web_view_get_main_frame (view);
source = webkit_web_frame_get_provisional_data_source (frame);
request = webkit_web_data_source_get_initial_request (source);
loading_uri = g_strdup (webkit_network_request_get_uri (request));
/* We also store the URI we are currently loading here, because
* we will want to use it in WEBKIT_LOAD_FINISHED, because if a
* load fails we may never get to committed */
priv->loading_uri = g_strdup (loading_uri);
estate = (EphyWebViewNetState) (estate |
EPHY_WEB_VIEW_STATE_START |
EPHY_WEB_VIEW_STATE_NEGOTIATING |
EPHY_WEB_VIEW_STATE_IS_REQUEST |
EPHY_WEB_VIEW_STATE_IS_NETWORK);
g_signal_emit_by_name (EPHY_WEB_VIEW (view), "new-document-now", loading_uri);
} else if (status == WEBKIT_LOAD_FINISHED) {
loading_uri = priv->loading_uri;
/* Will be freed below */
priv->loading_uri = NULL;
estate = (EphyWebViewNetState) (estate |
EPHY_WEB_VIEW_STATE_STOP |
EPHY_WEB_VIEW_STATE_IS_DOCUMENT |
EPHY_WEB_VIEW_STATE_IS_NETWORK);
}
ephy_web_view_update_from_net_state (EPHY_WEB_VIEW (view),
loading_uri,
(EphyWebViewNetState)estate);
g_free (loading_uri);
}
} | 1 | [] | epiphany | 3e0f7dea754381c5ad11a06ccc62eb153382b498 | 71,823,681,041,783,270,000,000,000,000,000,000,000 | 85 | Report broken certs through the padlock icon
This uses a new feature in libsoup that reports through a
SoupMessageFlag whether the message is talking to a server that has a
trusted server.
Bug #600663 |
ephy_embed_single_initialize (EphyEmbedSingle *single)
{
SoupSession *session;
SoupCookieJar *jar;
char *filename;
char *cookie_policy;
/* Initialise nspluginwrapper's plugins if available */
if (g_file_test (NSPLUGINWRAPPER_SETUP, G_FILE_TEST_EXISTS) != FALSE)
g_spawn_command_line_sync (NSPLUGINWRAPPER_SETUP, NULL, NULL, NULL, NULL);
ephy_embed_prefs_init ();
session = webkit_get_default_session ();
/* Store cookies in moz-compatible SQLite format */
filename = g_build_filename (ephy_dot_dir (), "cookies.sqlite", NULL);
jar = soup_cookie_jar_sqlite_new (filename, FALSE);
g_free (filename);
cookie_policy = eel_gconf_get_string (CONF_SECURITY_COOKIES_ACCEPT);
ephy_embed_prefs_set_cookie_jar_policy (jar, cookie_policy);
g_free (cookie_policy);
soup_session_add_feature (session, SOUP_SESSION_FEATURE (jar));
g_object_unref (jar);
/* Use GNOME proxy settings through libproxy */
soup_session_add_feature_by_type (session, SOUP_TYPE_PROXY_RESOLVER_GNOME);
#ifdef SOUP_TYPE_PASSWORD_MANAGER
/* Use GNOME keyring to store passwords. Only add the manager if we
are not using a private session, otherwise we want any new
password to expire when we exit *and* we don't want to use any
existing password in the keyring */
if (ephy_has_private_profile () == FALSE)
soup_session_add_feature_by_type (session, SOUP_TYPE_PASSWORD_MANAGER_GNOME);
#endif
return TRUE;
} | 1 | [] | epiphany | 3e0f7dea754381c5ad11a06ccc62eb153382b498 | 181,282,374,735,269,800,000,000,000,000,000,000,000 | 40 | Report broken certs through the padlock icon
This uses a new feature in libsoup that reports through a
SoupMessageFlag whether the message is talking to a server that has a
trusted server.
Bug #600663 |
ephy_embed_single_initialize (EphyEmbedSingle *single)
{
SoupSession *session;
SoupCookieJar *jar;
char *filename;
char *cookie_policy;
/* Initialise nspluginwrapper's plugins if available */
if (g_file_test (NSPLUGINWRAPPER_SETUP, G_FILE_TEST_EXISTS) != FALSE)
g_spawn_command_line_sync (NSPLUGINWRAPPER_SETUP, NULL, NULL, NULL, NULL);
ephy_embed_prefs_init ();
session = webkit_get_default_session ();
#ifdef GTLS_SYSTEM_CA_FILE
/* Check SSL certificates */
if (g_file_test (GTLS_SYSTEM_CA_FILE, G_FILE_TEST_EXISTS)) {
g_object_set (session,
SOUP_SESSION_SSL_CA_FILE, GTLS_SYSTEM_CA_FILE,
"ignore-ssl-cert-errors", TRUE,
NULL);
} else {
g_warning (_("CA Certificates file we should use was not found, "\
"all SSL sites will be considered to have a broken certificate."));
}
#endif
/* Store cookies in moz-compatible SQLite format */
filename = g_build_filename (ephy_dot_dir (), "cookies.sqlite", NULL);
jar = soup_cookie_jar_sqlite_new (filename, FALSE);
g_free (filename);
cookie_policy = eel_gconf_get_string (CONF_SECURITY_COOKIES_ACCEPT);
ephy_embed_prefs_set_cookie_jar_policy (jar, cookie_policy);
g_free (cookie_policy);
soup_session_add_feature (session, SOUP_SESSION_FEATURE (jar));
g_object_unref (jar);
/* Use GNOME proxy settings through libproxy */
soup_session_add_feature_by_type (session, SOUP_TYPE_PROXY_RESOLVER_GNOME);
#ifdef SOUP_TYPE_PASSWORD_MANAGER
/* Use GNOME keyring to store passwords. Only add the manager if we
are not using a private session, otherwise we want any new
password to expire when we exit *and* we don't want to use any
existing password in the keyring */
if (ephy_has_private_profile () == FALSE)
soup_session_add_feature_by_type (session, SOUP_TYPE_PASSWORD_MANAGER_GNOME);
#endif
return TRUE;
} | 1 | [] | epiphany | f3ed2a94694b698bb3cb38bb08a741364fe2df9b | 281,964,126,822,656,600,000,000,000,000,000,000,000 | 54 | Fix last patch to account for changes in soup API, and bump required version |
static inline void kvm_load_fs(u16 sel)
{
asm("mov %0, %%fs" : : "rm"(sel));
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 55,344,757,113,565,030,000,000,000,000,000,000,000 | 4 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static inline u16 kvm_read_gs(void)
{
u16 seg;
asm("mov %%gs, %0" : "=g"(seg));
return seg;
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 46,310,731,353,131,930,000,000,000,000,000,000,000 | 6 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
unsigned long flags;
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
if (vmx->host_state.fs_reload_needed)
kvm_load_fs(vmx->host_state.fs_sel);
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
/*
* If we have to reload gs, we must take care to
* preserve our gs base.
*/
local_irq_save(flags);
kvm_load_gs(vmx->host_state.gs_sel);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
#endif
local_irq_restore(flags);
}
reload_tss();
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) {
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
}
#endif
if (current_thread_info()->status & TS_USEDFPU)
clts();
load_gdt(&__get_cpu_var(host_gdt));
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 145,884,912,506,886,420,000,000,000,000,000,000,000 | 35 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
u32 host_sysenter_cs, msr_low, msr_high;
u32 junk;
u64 host_pat, tsc_this, tsc_base;
unsigned long a;
struct desc_ptr dt;
int i;
unsigned long kvm_vmx_return;
u32 exec_control;
/* I/O */
vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
if (cpu_has_vmx_msr_bitmap())
vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
vmcs_config.pin_based_exec_ctrl);
exec_control = vmcs_config.cpu_based_exec_ctrl;
if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
exec_control &= ~CPU_BASED_TPR_SHADOW;
#ifdef CONFIG_X86_64
exec_control |= CPU_BASED_CR8_STORE_EXITING |
CPU_BASED_CR8_LOAD_EXITING;
#endif
}
if (!enable_ept)
exec_control |= CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_INVLPG_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
exec_control &=
~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
if (vmx->vpid == 0)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
if (!enable_ept) {
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
enable_unrestricted_guest = 0;
}
if (!enable_unrestricted_guest)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (!ple_gap)
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
if (ple_gap) {
vmcs_write32(PLE_GAP, ple_gap);
vmcs_write32(PLE_WINDOW, ple_window);
}
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */
vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
rdmsrl(MSR_GS_BASE, a);
vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
#else
vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
#endif
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
native_store_idt(&dt);
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
rdmsrl(MSR_IA32_SYSENTER_ESP, a);
vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
rdmsrl(MSR_IA32_SYSENTER_EIP, a);
vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
host_pat = msr_low | ((u64) msr_high << 32);
vmcs_write64(HOST_IA32_PAT, host_pat);
}
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
host_pat = msr_low | ((u64) msr_high << 32);
/* Write the default value follow host pat */
vmcs_write64(GUEST_IA32_PAT, host_pat);
/* Keep arch.pat sync with GUEST_IA32_PAT */
vmx->vcpu.arch.pat = host_pat;
}
for (i = 0; i < NR_VMX_MSR; ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
++vmx->nmsrs;
}
vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
if (enable_ept)
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
rdtscll(tsc_this);
if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
tsc_base = tsc_this;
guest_write_tsc(0, tsc_base);
return 0;
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 242,465,156,071,332,800,000,000,000,000,000,000,000 | 154 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i;
if (vmx->host_state.loaded)
return;
vmx->host_state.loaded = 1;
/*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1.
*/
vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
vmx->host_state.fs_sel = kvm_read_fs();
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0;
} else {
vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1;
}
vmx->host_state.gs_sel = kvm_read_gs();
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
vmcs_write16(HOST_GS_SELECTOR, 0);
vmx->host_state.gs_ldt_reload_needed = 1;
}
#ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else
vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
#endif
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) {
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
}
#endif
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask);
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 220,875,907,387,633,250,000,000,000,000,000,000,000 | 50 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static inline u16 kvm_read_fs(void)
{
u16 seg;
asm("mov %%fs, %0" : "=g"(seg));
return seg;
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 171,176,631,815,915,570,000,000,000,000,000,000,000 | 6 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u16 fs_selector;
u16 gs_selector;
u16 ldt_selector;
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
/*
* A vmexit emulation is required before the vcpu can be executed
* again.
*/
if (unlikely(svm->nested.exit_required))
return;
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
save_host_msrs(vcpu);
fs_selector = kvm_read_fs();
gs_selector = kvm_read_gs();
ldt_selector = kvm_read_ldt();
svm->vmcb->save.cr2 = vcpu->arch.cr2;
/* required for live migration with NPT */
if (npt_enabled)
svm->vmcb->save.cr3 = vcpu->arch.cr3;
clgi();
local_irq_enable();
asm volatile (
"push %%"R"bp; \n\t"
"mov %c[rbx](%[svm]), %%"R"bx \n\t"
"mov %c[rcx](%[svm]), %%"R"cx \n\t"
"mov %c[rdx](%[svm]), %%"R"dx \n\t"
"mov %c[rsi](%[svm]), %%"R"si \n\t"
"mov %c[rdi](%[svm]), %%"R"di \n\t"
"mov %c[rbp](%[svm]), %%"R"bp \n\t"
#ifdef CONFIG_X86_64
"mov %c[r8](%[svm]), %%r8 \n\t"
"mov %c[r9](%[svm]), %%r9 \n\t"
"mov %c[r10](%[svm]), %%r10 \n\t"
"mov %c[r11](%[svm]), %%r11 \n\t"
"mov %c[r12](%[svm]), %%r12 \n\t"
"mov %c[r13](%[svm]), %%r13 \n\t"
"mov %c[r14](%[svm]), %%r14 \n\t"
"mov %c[r15](%[svm]), %%r15 \n\t"
#endif
/* Enter guest mode */
"push %%"R"ax \n\t"
"mov %c[vmcb](%[svm]), %%"R"ax \n\t"
__ex(SVM_VMLOAD) "\n\t"
__ex(SVM_VMRUN) "\n\t"
__ex(SVM_VMSAVE) "\n\t"
"pop %%"R"ax \n\t"
/* Save guest registers, load host registers */
"mov %%"R"bx, %c[rbx](%[svm]) \n\t"
"mov %%"R"cx, %c[rcx](%[svm]) \n\t"
"mov %%"R"dx, %c[rdx](%[svm]) \n\t"
"mov %%"R"si, %c[rsi](%[svm]) \n\t"
"mov %%"R"di, %c[rdi](%[svm]) \n\t"
"mov %%"R"bp, %c[rbp](%[svm]) \n\t"
#ifdef CONFIG_X86_64
"mov %%r8, %c[r8](%[svm]) \n\t"
"mov %%r9, %c[r9](%[svm]) \n\t"
"mov %%r10, %c[r10](%[svm]) \n\t"
"mov %%r11, %c[r11](%[svm]) \n\t"
"mov %%r12, %c[r12](%[svm]) \n\t"
"mov %%r13, %c[r13](%[svm]) \n\t"
"mov %%r14, %c[r14](%[svm]) \n\t"
"mov %%r15, %c[r15](%[svm]) \n\t"
#endif
"pop %%"R"bp"
:
: [svm]"a"(svm),
[vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
[rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
[rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
[rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
[rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
[rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
#ifdef CONFIG_X86_64
, [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
[r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
[r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
[r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
[r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
[r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
[r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
[r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
#endif
: "cc", "memory"
, R"bx", R"cx", R"dx", R"si", R"di"
#ifdef CONFIG_X86_64
, "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
#endif
);
vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
kvm_load_fs(fs_selector);
kvm_load_gs(gs_selector);
kvm_load_ldt(ldt_selector);
load_host_msrs(vcpu);
reload_tss(vcpu);
local_irq_disable();
stgi();
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
if (npt_enabled) {
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
}
/*
* We need to handle MC intercepts here before the vcpu has a chance to
* change the physical cpu
*/
if (unlikely(svm->vmcb->control.exit_code ==
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 9,151,409,030,979,133,000,000,000,000,000,000,000 | 139 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static inline void kvm_load_gs(u16 sel)
{
asm("mov %0, %%gs" : : "rm"(sel));
} | 1 | [
"CWE-400"
] | linux-2.6 | 9581d442b9058d3699b4be568b6e5eae38a41493 | 90,281,557,106,910,140,000,000,000,000,000,000,000 | 4 | KVM: Fix fs/gs reload oops with invalid ldt
kvm reloads the host's fs and gs blindly, however the underlying segment
descriptors may be invalid due to the user modifying the ldt after loading
them.
Fix by using the safe accessors (loadsegment() and load_gs_index()) instead
of home grown unsafe versions.
This is CVE-2010-3698.
KVM-Stable-Tag.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
*/
xmlNodePtr
xmlXPathNextFollowing(xmlXPathParserContextPtr ctxt, xmlNodePtr cur) {
if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL);
if (cur != NULL && cur->children != NULL)
return cur->children ;
if (cur == NULL) cur = ctxt->context->node;
if (cur == NULL) return(NULL) ; /* ERROR */
if (cur->next != NULL) return(cur->next) ;
do {
cur = cur->parent;
if (cur == NULL) break;
if (cur == (xmlNodePtr) ctxt->context->doc) return(NULL);
if (cur->next != NULL) return(cur->next);
} while (cur != NULL); | 1 | [
"CWE-119"
] | libxml2 | 91d19754d46acd4a639a8b9e31f50f31c78f8c9c | 14,314,166,539,612,756,000,000,000,000,000,000,000 | 15 | Fix the semantic of XPath axis for namespace/attribute context nodes
The processing of namespace and attributes nodes was not compliant
to the XPath-1.0 specification |
*/
static xmlNodePtr
xmlXPathNextPrecedingInternal(xmlXPathParserContextPtr ctxt,
xmlNodePtr cur)
{
if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL);
if (cur == NULL) {
cur = ctxt->context->node;
if (cur == NULL)
return (NULL);
if (cur->type == XML_NAMESPACE_DECL)
cur = (xmlNodePtr)((xmlNsPtr)cur)->next;
ctxt->ancestor = cur->parent;
}
if ((cur->prev != NULL) && (cur->prev->type == XML_DTD_NODE))
cur = cur->prev;
while (cur->prev == NULL) {
cur = cur->parent;
if (cur == NULL)
return (NULL);
if (cur == ctxt->context->doc->children)
return (NULL);
if (cur != ctxt->ancestor)
return (cur);
ctxt->ancestor = cur->parent;
}
cur = cur->prev;
while (cur->last != NULL)
cur = cur->last; | 1 | [
"CWE-119"
] | libxml2 | 91d19754d46acd4a639a8b9e31f50f31c78f8c9c | 305,399,539,984,494,130,000,000,000,000,000,000,000 | 29 | Fix the semantic of XPath axis for namespace/attribute context nodes
The processing of namespace and attributes nodes was not compliant
to the XPath-1.0 specification |
*/
xmlNodePtr
xmlXPathNextPreceding(xmlXPathParserContextPtr ctxt, xmlNodePtr cur)
{
if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL);
if (cur == NULL)
cur = ctxt->context->node;
if (cur == NULL)
return (NULL);
if ((cur->prev != NULL) && (cur->prev->type == XML_DTD_NODE))
cur = cur->prev;
do {
if (cur->prev != NULL) {
for (cur = cur->prev; cur->last != NULL; cur = cur->last) ;
return (cur);
}
cur = cur->parent;
if (cur == NULL)
return (NULL);
if (cur == ctxt->context->doc->children)
return (NULL);
} while (xmlXPathIsAncestor(cur, ctxt->context->node)); | 1 | [
"CWE-119"
] | libxml2 | 91d19754d46acd4a639a8b9e31f50f31c78f8c9c | 217,427,973,257,270,060,000,000,000,000,000,000,000 | 23 | Fix the semantic of XPath axis for namespace/attribute context nodes
The processing of namespace and attributes nodes was not compliant
to the XPath-1.0 specification |
*/
xmlNodePtr
xmlXPathNextPreceding(xmlXPathParserContextPtr ctxt, xmlNodePtr cur)
{
if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL);
if ((ctxt->context->node->type == XML_ATTRIBUTE_NODE) ||
(ctxt->context->node->type == XML_NAMESPACE_DECL))
return(NULL);
if (cur == NULL)
cur = ctxt->context->node;
if (cur == NULL)
return (NULL);
if ((cur->prev != NULL) && (cur->prev->type == XML_DTD_NODE))
cur = cur->prev;
do {
if (cur->prev != NULL) {
for (cur = cur->prev; cur->last != NULL; cur = cur->last) ;
return (cur);
}
cur = cur->parent;
if (cur == NULL)
return (NULL);
if (cur == ctxt->context->doc->children)
return (NULL);
} while (xmlXPathIsAncestor(cur, ctxt->context->node)); | 1 | [
"CWE-119"
] | libxml2 | ea90b894146030c214a7df6d8375310174f134b9 | 22,647,589,190,040,830,000,000,000,000,000,000,000 | 26 | Fix a change of semantic on XPath preceding and following axis
This was introduced in the prevous fix, while preceding-sibling and
following sibling axis are empty for attributes and namespaces,
preceding and following axis should still work based on the parent
element. However the parent element is not available for a namespace
node, so we keep the axis empty in that case. |
*/
static xmlNodePtr
xmlXPathNextPrecedingInternal(xmlXPathParserContextPtr ctxt,
xmlNodePtr cur)
{
if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL);
if ((ctxt->context->node->type == XML_ATTRIBUTE_NODE) ||
(ctxt->context->node->type == XML_NAMESPACE_DECL))
return(NULL);
if (cur == NULL) {
cur = ctxt->context->node;
if (cur == NULL)
return (NULL);
ctxt->ancestor = cur->parent;
}
if ((cur->prev != NULL) && (cur->prev->type == XML_DTD_NODE))
cur = cur->prev;
while (cur->prev == NULL) {
cur = cur->parent;
if (cur == NULL)
return (NULL);
if (cur == ctxt->context->doc->children)
return (NULL);
if (cur != ctxt->ancestor)
return (cur);
ctxt->ancestor = cur->parent;
}
cur = cur->prev;
while (cur->last != NULL)
cur = cur->last; | 1 | [
"CWE-119"
] | libxml2 | ea90b894146030c214a7df6d8375310174f134b9 | 56,491,134,944,836,480,000,000,000,000,000,000,000 | 30 | Fix a change of semantic on XPath preceding and following axis
This was introduced in the prevous fix, while preceding-sibling and
following sibling axis are empty for attributes and namespaces,
preceding and following axis should still work based on the parent
element. However the parent element is not available for a namespace
node, so we keep the axis empty in that case. |
*/
xmlNodePtr
xmlXPathNextFollowing(xmlXPathParserContextPtr ctxt, xmlNodePtr cur) {
if ((ctxt == NULL) || (ctxt->context == NULL)) return(NULL);
if ((ctxt->context->node->type == XML_ATTRIBUTE_NODE) ||
(ctxt->context->node->type == XML_NAMESPACE_DECL))
return(NULL);
if (cur != NULL) {
if ((cur->type == XML_ATTRIBUTE_NODE) ||
(cur->type == XML_NAMESPACE_DECL))
return(NULL);
if (cur->children != NULL)
return cur->children ;
}
if (cur == NULL) cur = ctxt->context->node;
if (cur == NULL) return(NULL) ; /* ERROR */
if (cur->next != NULL) return(cur->next) ;
do {
cur = cur->parent;
if (cur == NULL) break;
if (cur == (xmlNodePtr) ctxt->context->doc) return(NULL);
if (cur->next != NULL) return(cur->next);
} while (cur != NULL); | 1 | [
"CWE-119"
] | libxml2 | ea90b894146030c214a7df6d8375310174f134b9 | 5,131,527,229,101,087,000,000,000,000,000,000,000 | 23 | Fix a change of semantic on XPath preceding and following axis
This was introduced in the prevous fix, while preceding-sibling and
following sibling axis are empty for attributes and namespaces,
preceding and following axis should still work based on the parent
element. However the parent element is not available for a namespace
node, so we keep the axis empty in that case. |
static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct net_device *dev;
struct sock *sk = sock->sk;
if (peer)
return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
if (dev)
strlcpy(uaddr->sa_data, dev->name, 15);
else
memset(uaddr->sa_data, 0, 14);
rcu_read_unlock();
*uaddr_len = sizeof(*uaddr);
return 0;
} | 1 | [
"CWE-909"
] | linux-2.6 | 67286640f638f5ad41a946b9a3dc75327950248f | 328,198,438,257,263,270,000,000,000,000,000,000,000 | 21 | net: packet: fix information leak to userland
packet_getname_spkt() doesn't initialize all members of sa_data field of
sockaddr struct if strlen(dev->name) < 13. This structure is then copied
to userland. It leads to leaking of contents of kernel stack memory.
We have to fully fill sa_data with strncpy() instead of strlcpy().
The same with packet_getname(): it doesn't initialize sll_pkttype field of
sockaddr_ll. Set it to zero.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct net_device *dev;
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
if (peer)
return -EOPNOTSUPP;
sll->sll_family = AF_PACKET;
sll->sll_ifindex = po->ifindex;
sll->sll_protocol = po->num;
rcu_read_lock();
dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
if (dev) {
sll->sll_hatype = dev->type;
sll->sll_halen = dev->addr_len;
memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
} else {
sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
sll->sll_halen = 0;
}
rcu_read_unlock();
*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
return 0;
} | 1 | [
"CWE-909"
] | linux-2.6 | 67286640f638f5ad41a946b9a3dc75327950248f | 153,573,650,754,849,100,000,000,000,000,000,000,000 | 29 | net: packet: fix information leak to userland
packet_getname_spkt() doesn't initialize all members of sa_data field of
sockaddr struct if strlen(dev->name) < 13. This structure is then copied
to userland. It leads to leaking of contents of kernel stack memory.
We have to fully fill sa_data with strncpy() instead of strlcpy().
The same with packet_getname(): it doesn't initialize sll_pkttype field of
sockaddr_ll. Set it to zero.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
events->exception.injected =
vcpu->arch.exception.pending &&
!kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected =
vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr;
events->interrupt.soft = 0;
events->interrupt.shadow =
kvm_x86_ops->get_interrupt_shadow(vcpu,
KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW);
} | 1 | [
"CWE-200"
] | kvm | 831d9d02f9522e739825a51a11e3bc5aa531a905 | 171,594,829,116,740,970,000,000,000,000,000,000,000 | 28 | KVM: x86: fix information leak to userland
Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized. It leads to leaking of contents of kernel stack
memory. We have to initialize them to zero.
In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct. It makes sense as these
fields are explicitly marked as padding. No more fields need zeroing.
KVM-Stable-Tag.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int r = 0;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
} | 1 | [
"CWE-200"
] | kvm | 831d9d02f9522e739825a51a11e3bc5aa531a905 | 235,888,838,580,346,270,000,000,000,000,000,000,000 | 11 | KVM: x86: fix information leak to userland
Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized. It leads to leaking of contents of kernel stack
memory. We have to initialize them to zero.
In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct. It makes sense as these
fields are explicitly marked as padding. No more fields need zeroing.
KVM-Stable-Tag.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
} | 1 | [
"CWE-200"
] | kvm | 831d9d02f9522e739825a51a11e3bc5aa531a905 | 36,600,902,219,981,106,000,000,000,000,000,000,000 | 8 | KVM: x86: fix information leak to userland
Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized. It leads to leaking of contents of kernel stack
memory. We have to initialize them to zero.
In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct. It makes sense as these
fields are explicitly marked as padding. No more fields need zeroing.
KVM-Stable-Tag.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -ENOTTY;
/*
* This union makes it completely explicit to gcc-3.x
* that these two variables' stack usage should be
* combined, not added together.
*/
union {
struct kvm_pit_state ps;
struct kvm_pit_state2 ps2;
struct kvm_pit_config pit_config;
} u;
switch (ioctl) {
case KVM_SET_TSS_ADDR:
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
if (r < 0)
goto out;
break;
case KVM_SET_IDENTITY_MAP_ADDR: {
u64 ident_addr;
r = -EFAULT;
if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
goto out;
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
if (r < 0)
goto out;
break;
}
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
if (r)
goto out;
break;
case KVM_GET_NR_MMU_PAGES:
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
break;
case KVM_CREATE_IRQCHIP: {
struct kvm_pic *vpic;
mutex_lock(&kvm->lock);
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
r = kvm_ioapic_init(kvm);
if (r) {
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev);
kfree(vpic);
goto create_irqchip_unlock;
}
} else
goto create_irqchip_unlock;
smp_wmb();
kvm->arch.vpic = vpic;
smp_wmb();
r = kvm_setup_default_irq_routing(kvm);
if (r) {
mutex_lock(&kvm->irq_lock);
kvm_ioapic_destroy(kvm);
kvm_destroy_pic(kvm);
mutex_unlock(&kvm->irq_lock);
}
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
}
case KVM_CREATE_PIT:
u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
goto create_pit;
case KVM_CREATE_PIT2:
r = -EFAULT;
if (copy_from_user(&u.pit_config, argp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
r = -ENOMEM;
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
mutex_unlock(&kvm->slots_lock);
break;
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
r = -ENXIO;
if (irqchip_in_kernel(kvm)) {
__s32 status;
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
if (ioctl == KVM_IRQ_LINE_STATUS) {
r = -EFAULT;
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
sizeof irq_event))
goto out;
}
r = 0;
}
break;
}
case KVM_GET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
r = -ENOMEM;
if (!chip)
goto out;
r = -EFAULT;
if (copy_from_user(chip, argp, sizeof *chip))
goto get_irqchip_out;
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto get_irqchip_out;
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
if (r)
goto get_irqchip_out;
r = -EFAULT;
if (copy_to_user(argp, chip, sizeof *chip))
goto get_irqchip_out;
r = 0;
get_irqchip_out:
kfree(chip);
if (r)
goto out;
break;
}
case KVM_SET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
r = -ENOMEM;
if (!chip)
goto out;
r = -EFAULT;
if (copy_from_user(chip, argp, sizeof *chip))
goto set_irqchip_out;
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
if (r)
goto set_irqchip_out;
r = 0;
set_irqchip_out:
kfree(chip);
if (r)
goto out;
break;
}
case KVM_GET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof u.ps))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
if (r)
goto out;
r = 0;
break;
}
case KVM_GET_PIT2: {
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT2: {
r = -EFAULT;
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
if (r)
goto out;
r = 0;
break;
}
case KVM_REINJECT_CONTROL: {
struct kvm_reinject_control control;
r = -EFAULT;
if (copy_from_user(&control, argp, sizeof(control)))
goto out;
r = kvm_vm_ioctl_reinject(kvm, &control);
if (r)
goto out;
r = 0;
break;
}
case KVM_XEN_HVM_CONFIG: {
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
sizeof(struct kvm_xen_hvm_config)))
goto out;
r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags)
goto out;
r = 0;
break;
}
case KVM_SET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
s64 delta;
r = -EFAULT;
if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
goto out;
r = -EINVAL;
if (user_ns.flags)
goto out;
r = 0;
local_irq_disable();
now_ns = get_kernel_ns();
delta = user_ns.clock - now_ns;
local_irq_enable();
kvm->arch.kvmclock_offset = delta;
break;
}
case KVM_GET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
local_irq_disable();
now_ns = get_kernel_ns();
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable();
user_ns.flags = 0;
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
goto out;
r = 0;
break;
}
default:
;
}
out:
return r;
} | 1 | [
"CWE-200"
] | kvm | 831d9d02f9522e739825a51a11e3bc5aa531a905 | 73,944,347,431,932,040,000,000,000,000,000,000,000 | 288 | KVM: x86: fix information leak to userland
Structures kvm_vcpu_events, kvm_debugregs, kvm_pit_state2 and
kvm_clock_data are copied to userland with some padding and reserved
fields unitialized. It leads to leaking of contents of kernel stack
memory. We have to initialize them to zero.
In patch v1 Jan Kiszka suggested to fill reserved fields with zeros
instead of memset'ting the whole struct. It makes sense as these
fields are explicitly marked as padding. No more fields need zeroing.
KVM-Stable-Tag.
Signed-off-by: Vasiliy Kulikov <segooon@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> |
mono_lookup_pinvoke_call (MonoMethod *method, const char **exc_class, const char **exc_arg)
{
MonoImage *image = method->klass->image;
MonoMethodPInvoke *piinfo = (MonoMethodPInvoke *)method;
MonoTableInfo *tables = image->tables;
MonoTableInfo *im = &tables [MONO_TABLE_IMPLMAP];
MonoTableInfo *mr = &tables [MONO_TABLE_MODULEREF];
guint32 im_cols [MONO_IMPLMAP_SIZE];
guint32 scope_token;
const char *import = NULL;
const char *orig_scope;
const char *new_scope;
char *error_msg;
char *full_name, *file_name;
int i;
MonoDl *module = NULL;
g_assert (method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL);
if (piinfo->addr)
return piinfo->addr;
if (method->klass->image->dynamic) {
MonoReflectionMethodAux *method_aux =
g_hash_table_lookup (
((MonoDynamicImage*)method->klass->image)->method_aux_hash, method);
if (!method_aux)
return NULL;
import = method_aux->dllentry;
orig_scope = method_aux->dll;
}
else {
if (!piinfo->implmap_idx)
return NULL;
mono_metadata_decode_row (im, piinfo->implmap_idx - 1, im_cols, MONO_IMPLMAP_SIZE);
piinfo->piflags = im_cols [MONO_IMPLMAP_FLAGS];
import = mono_metadata_string_heap (image, im_cols [MONO_IMPLMAP_NAME]);
scope_token = mono_metadata_decode_row_col (mr, im_cols [MONO_IMPLMAP_SCOPE] - 1, MONO_MODULEREF_NAME);
orig_scope = mono_metadata_string_heap (image, scope_token);
}
mono_dllmap_lookup (image, orig_scope, import, &new_scope, &import);
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport attempting to load: '%s'.", new_scope);
if (exc_class) {
*exc_class = NULL;
*exc_arg = NULL;
}
/* we allow a special name to dlopen from the running process namespace */
if (strcmp (new_scope, "__Internal") == 0)
module = mono_dl_open (NULL, MONO_DL_LAZY, &error_msg);
/*
* Try loading the module using a variety of names
*/
for (i = 0; i < 4; ++i) {
switch (i) {
case 0:
/* Try the original name */
file_name = g_strdup (new_scope);
break;
case 1:
/* Try trimming the .dll extension */
if (strstr (new_scope, ".dll") == (new_scope + strlen (new_scope) - 4)) {
file_name = g_strdup (new_scope);
file_name [strlen (new_scope) - 4] = '\0';
}
else
continue;
break;
case 2:
if (strstr (new_scope, "lib") != new_scope) {
file_name = g_strdup_printf ("lib%s", new_scope);
}
else
continue;
break;
default:
#ifndef TARGET_WIN32
if (!g_ascii_strcasecmp ("user32.dll", new_scope) ||
!g_ascii_strcasecmp ("kernel32.dll", new_scope) ||
!g_ascii_strcasecmp ("user32", new_scope) ||
!g_ascii_strcasecmp ("kernel", new_scope)) {
file_name = g_strdup ("libMonoSupportW.so");
} else
#endif
continue;
#ifndef TARGET_WIN32
break;
#endif
}
if (!module) {
void *iter = NULL;
while ((full_name = mono_dl_build_path (NULL, file_name, &iter))) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport loading location: '%s'.", full_name);
module = cached_module_load (full_name, MONO_DL_LAZY, &error_msg);
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport error loading library: '%s'.",
error_msg);
g_free (error_msg);
}
g_free (full_name);
if (module)
break;
}
}
if (!module) {
void *iter = NULL;
while ((full_name = mono_dl_build_path (".", file_name, &iter))) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport loading library: '%s'.", full_name);
module = cached_module_load (full_name, MONO_DL_LAZY, &error_msg);
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport error loading library '%s'.",
error_msg);
g_free (error_msg);
}
g_free (full_name);
if (module)
break;
}
}
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport loading: '%s'.", file_name);
module = cached_module_load (file_name, MONO_DL_LAZY, &error_msg);
if (!module) {
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"DllImport error loading library '%s'.",
error_msg);
}
}
g_free (file_name);
if (module)
break;
}
if (!module) {
mono_trace (G_LOG_LEVEL_WARNING, MONO_TRACE_DLLIMPORT,
"DllImport unable to load library '%s'.",
error_msg);
g_free (error_msg);
if (exc_class) {
*exc_class = "DllNotFoundException";
*exc_arg = new_scope;
}
return NULL;
}
#ifdef TARGET_WIN32
if (import && import [0] == '#' && isdigit (import [1])) {
char *end;
long id;
id = strtol (import + 1, &end, 10);
if (id > 0 && *end == '\0')
import++;
}
#endif
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"Searching for '%s'.", import);
if (piinfo->piflags & PINVOKE_ATTRIBUTE_NO_MANGLE) {
error_msg = mono_dl_symbol (module, import, &piinfo->addr);
} else {
char *mangled_name = NULL, *mangled_name2 = NULL;
int mangle_charset;
int mangle_stdcall;
int mangle_param_count;
#ifdef TARGET_WIN32
int param_count;
#endif
/*
* Search using a variety of mangled names
*/
for (mangle_charset = 0; mangle_charset <= 1; mangle_charset ++) {
for (mangle_stdcall = 0; mangle_stdcall <= 1; mangle_stdcall ++) {
gboolean need_param_count = FALSE;
#ifdef TARGET_WIN32
if (mangle_stdcall > 0)
need_param_count = TRUE;
#endif
for (mangle_param_count = 0; mangle_param_count <= (need_param_count ? 256 : 0); mangle_param_count += 4) {
if (piinfo->addr)
continue;
mangled_name = (char*)import;
switch (piinfo->piflags & PINVOKE_ATTRIBUTE_CHAR_SET_MASK) {
case PINVOKE_ATTRIBUTE_CHAR_SET_UNICODE:
/* Try the mangled name first */
if (mangle_charset == 0)
mangled_name = g_strconcat (import, "W", NULL);
break;
case PINVOKE_ATTRIBUTE_CHAR_SET_AUTO:
#ifdef TARGET_WIN32
if (mangle_charset == 0)
mangled_name = g_strconcat (import, "W", NULL);
#else
/* Try the mangled name last */
if (mangle_charset == 1)
mangled_name = g_strconcat (import, "A", NULL);
#endif
break;
case PINVOKE_ATTRIBUTE_CHAR_SET_ANSI:
default:
/* Try the mangled name last */
if (mangle_charset == 1)
mangled_name = g_strconcat (import, "A", NULL);
break;
}
#ifdef TARGET_WIN32
if (mangle_param_count == 0)
param_count = mono_method_signature (method)->param_count * sizeof (gpointer);
else
/* Try brute force, since it would be very hard to compute the stack usage correctly */
param_count = mangle_param_count;
/* Try the stdcall mangled name */
/*
* gcc under windows creates mangled names without the underscore, but MS.NET
* doesn't support it, so we doesn't support it either.
*/
if (mangle_stdcall == 1)
mangled_name2 = g_strdup_printf ("_%s@%d", mangled_name, param_count);
else
mangled_name2 = mangled_name;
#else
mangled_name2 = mangled_name;
#endif
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"Probing '%s'.", mangled_name2);
error_msg = mono_dl_symbol (module, mangled_name2, &piinfo->addr);
if (piinfo->addr)
mono_trace (G_LOG_LEVEL_INFO, MONO_TRACE_DLLIMPORT,
"Found as '%s'.", mangled_name2);
if (mangled_name != mangled_name2)
g_free (mangled_name2);
if (mangled_name != import)
g_free (mangled_name);
}
}
}
}
if (!piinfo->addr) {
g_free (error_msg);
if (exc_class) {
*exc_class = "EntryPointNotFoundException";
*exc_arg = import;
}
return NULL;
}
return piinfo->addr;
} | 1 | [] | mono | 8e890a3bf80a4620e417814dc14886b1bbd17625 | 320,715,786,743,187,800,000,000,000,000,000,000,000 | 276 | Search for dllimported shared libs in the base directory, not cwd.
* loader.c: we don't search the current directory anymore for shared
libraries referenced in DllImport attributes, as it has a slight
security risk. We search in the same directory where the referencing
image was loaded from, instead. Fixes bug# 641915. |
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto discard;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk == NULL) {
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto discard;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
}
/* deliver */
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb)) {
atomic_inc(&sk->sk_drops);
bh_unlock_sock(sk);
sock_put(sk);
goto discard;
}
bh_unlock_sock(sk);
sock_put(sk);
return 0;
short_packet:
LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
ulen, skb->len);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
} | 1 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 131,981,117,658,961,660,000,000,000,000,000,000,000 | 97 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
unsigned int i;
struct sock *sk;
struct sk_buff *skb1;
for (i = 0; i < count; i++) {
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
sk = stack[i];
if (skb1) {
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
else if (sk_add_backlog(sk, skb1)) {
kfree_skb(skb1);
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
continue;
}
drop:
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_INERRORS, IS_UDPLITE(sk));
}
} | 1 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 74,403,901,724,201,770,000,000,000,000,000,000,000 | 31 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
{
int rc = NET_RX_SUCCESS;
if (sk_filter(sk, skb))
goto discard_and_relse;
skb->dev = NULL;
if (nested)
bh_lock_sock_nested(sk);
else
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
/*
* trylock + unlock semantics:
*/
mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else if (sk_add_backlog(sk, skb)) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
bh_unlock_sock(sk);
out:
sock_put(sk);
return rc;
discard_and_relse:
kfree_skb(skb);
goto out;
} | 1 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 51,200,697,962,027,410,000,000,000,000,000,000,000 | 36 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
/*
* Charge it to the socket, dropping if the queue is full.
*/
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
if (up->encap_type) {
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
if (skb->len > sizeof(struct udphdr) &&
up->encap_rcv != NULL) {
int ret;
ret = (*up->encap_rcv)(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
/*
* MIB statistics other than incrementing the error count are
* disabled for the following two types of errors: these depend
* on the application settings, not on the functioning of the
* protocol stack as such.
*
* RFC 3828 here recommends (sec 3.3): "There should also be a
* way ... to ... at least let the receiving application block
* delivery of packets with coverage values less than a value
* provided by the application."
*/
if (up->pcrlen == 0) { /* full coverage was set */
LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
"%d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
/* The next case involves violating the min. coverage requested
* by the receiver. This is subtle: if receiver wants x and x is
* greater than the buffersize/MTU then receiver will complain
* that it wants x while sender emits packets of smaller size y.
* Therefore the above ...()->partial_cov statement is essential.
*/
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
LIMIT_NETDEBUG(KERN_WARNING
"UDPLITE: coverage %d too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (sk->sk_filter) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
rc = 0;
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
rc = __udp_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
} | 1 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 226,670,685,594,920,600,000,000,000,000,000,000,000 | 102 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
sk->sk_backlog.len += skb->truesize;
return 0;
} | 1 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 150,421,577,044,602,550,000,000,000,000,000,000,000 | 9 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
void sock_init_data(struct socket *sock, struct sock *sk)
{
skb_queue_head_init(&sk->sk_receive_queue);
skb_queue_head_init(&sk->sk_write_queue);
skb_queue_head_init(&sk->sk_error_queue);
#ifdef CONFIG_NET_DMA
skb_queue_head_init(&sk->sk_async_wait_queue);
#endif
sk->sk_send_head = NULL;
init_timer(&sk->sk_timer);
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
sock_set_flag(sk, SOCK_ZAPPED);
if (sock) {
sk->sk_type = sock->type;
sk->sk_sleep = &sock->wait;
sock->sk = sk;
} else
sk->sk_sleep = NULL;
spin_lock_init(&sk->sk_dst_lock);
rwlock_init(&sk->sk_callback_lock);
lockdep_set_class_and_name(&sk->sk_callback_lock,
af_callback_keys + sk->sk_family,
af_family_clock_key_strings[sk->sk_family]);
sk->sk_state_change = sock_def_wakeup;
sk->sk_data_ready = sock_def_readable;
sk->sk_write_space = sock_def_write_space;
sk->sk_error_report = sock_def_error_report;
sk->sk_destruct = sock_def_destruct;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
sk->sk_peercred.pid = 0;
sk->sk_peercred.uid = -1;
sk->sk_peercred.gid = -1;
sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_stamp = ktime_set(-1L, 0);
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
*/
smp_wmb();
atomic_set(&sk->sk_refcnt, 1);
atomic_set(&sk->sk_drops, 0);
} | 1 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 154,400,006,282,900,740,000,000,000,000,000,000,000 | 62 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
SCTP_STATIC int sctp_init_sock(struct sock *sk)
{
struct sctp_endpoint *ep;
struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk);
sp = sctp_sk(sk);
/* Initialize the SCTP per socket area. */
switch (sk->sk_type) {
case SOCK_SEQPACKET:
sp->type = SCTP_SOCKET_UDP;
break;
case SOCK_STREAM:
sp->type = SCTP_SOCKET_TCP;
break;
default:
return -ESOCKTNOSUPPORT;
}
/* Initialize default send parameters. These parameters can be
* modified with the SCTP_DEFAULT_SEND_PARAM socket option.
*/
sp->default_stream = 0;
sp->default_ppid = 0;
sp->default_flags = 0;
sp->default_context = 0;
sp->default_timetolive = 0;
sp->default_rcv_context = 0;
sp->max_burst = sctp_max_burst;
/* Initialize default setup parameters. These parameters
* can be modified with the SCTP_INITMSG socket option or
* overridden by the SCTP_INIT CMSG.
*/
sp->initmsg.sinit_num_ostreams = sctp_max_outstreams;
sp->initmsg.sinit_max_instreams = sctp_max_instreams;
sp->initmsg.sinit_max_attempts = sctp_max_retrans_init;
sp->initmsg.sinit_max_init_timeo = sctp_rto_max;
/* Initialize default RTO related parameters. These parameters can
* be modified for with the SCTP_RTOINFO socket option.
*/
sp->rtoinfo.srto_initial = sctp_rto_initial;
sp->rtoinfo.srto_max = sctp_rto_max;
sp->rtoinfo.srto_min = sctp_rto_min;
/* Initialize default association related parameters. These parameters
* can be modified with the SCTP_ASSOCINFO socket option.
*/
sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association;
sp->assocparams.sasoc_number_peer_destinations = 0;
sp->assocparams.sasoc_peer_rwnd = 0;
sp->assocparams.sasoc_local_rwnd = 0;
sp->assocparams.sasoc_cookie_life = sctp_valid_cookie_life;
/* Initialize default event subscriptions. By default, all the
* options are off.
*/
memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
/* Default Peer Address Parameters. These defaults can
* be modified via SCTP_PEER_ADDR_PARAMS
*/
sp->hbinterval = sctp_hb_interval;
sp->pathmaxrxt = sctp_max_retrans_path;
sp->pathmtu = 0; // allow default discovery
sp->sackdelay = sctp_sack_timeout;
sp->sackfreq = 2;
sp->param_flags = SPP_HB_ENABLE |
SPP_PMTUD_ENABLE |
SPP_SACKDELAY_ENABLE;
/* If enabled no SCTP message fragmentation will be performed.
* Configure through SCTP_DISABLE_FRAGMENTS socket option.
*/
sp->disable_fragments = 0;
/* Enable Nagle algorithm by default. */
sp->nodelay = 0;
/* Enable by default. */
sp->v4mapped = 1;
/* Auto-close idle associations after the configured
* number of seconds. A value of 0 disables this
* feature. Configure through the SCTP_AUTOCLOSE socket option,
* for UDP-style sockets only.
*/
sp->autoclose = 0;
/* User specified fragmentation limit. */
sp->user_frag = 0;
sp->adaptation_ind = 0;
sp->pf = sctp_get_pf_specific(sk->sk_family);
/* Control variables for partial data delivery. */
atomic_set(&sp->pd_mode, 0);
skb_queue_head_init(&sp->pd_lobby);
sp->frag_interleave = 0;
/* Create a per socket endpoint structure. Even if we
* change the data structure relationships, this may still
* be useful for storing pre-connect address information.
*/
ep = sctp_endpoint_new(sk, GFP_KERNEL);
if (!ep)
return -ENOMEM;
sp->ep = ep;
sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated);
/* Set socket backlog limit. */
sk->sk_backlog.limit = sysctl_sctp_rmem[1];
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
return 0;
} | 1 | [
"CWE-400"
] | linux-2.6 | c377411f2494a931ff7facdbb3a6839b1266bcf6 | 13,262,037,595,885,664,000,000,000,000,000,000,000 | 128 | net: sk_add_backlog() take rmem_alloc into account
Current socket backlog limit is not enough to really stop DDOS attacks,
because user thread spend many time to process a full backlog each
round, and user might crazy spin on socket lock.
We should add backlog size and receive_queue size (aka rmem_alloc) to
pace writers, and let user run without being slow down too much.
Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
stress situations.
Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
receiver can now process ~200.000 pps (instead of ~100 pps before the
patch) on a 8 core machine.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
MonoReflectionMethod*
mono_reflection_bind_generic_method_parameters (MonoReflectionMethod *rmethod, MonoArray *types)
{
MonoClass *klass;
MonoMethod *method, *inflated;
MonoMethodInflated *imethod;
MonoGenericContext tmp_context;
MonoGenericInst *ginst;
MonoType **type_argv;
int count, i;
MONO_ARCH_SAVE_REGS;
if (!strcmp (rmethod->object.vtable->klass->name, "MethodBuilder")) {
#ifndef DISABLE_REFLECTION_EMIT
MonoReflectionMethodBuilder *mb = NULL;
MonoReflectionTypeBuilder *tb;
MonoClass *klass;
mb = (MonoReflectionMethodBuilder *) rmethod;
tb = (MonoReflectionTypeBuilder *) mb->type;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
method = methodbuilder_to_mono_method (klass, mb);
#else
g_assert_not_reached ();
method = NULL;
#endif
} else {
method = rmethod->method;
}
klass = method->klass;
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
count = mono_method_signature (method)->generic_param_count;
if (count != mono_array_length (types))
return NULL;
type_argv = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *garg = mono_array_get (types, gpointer, i);
type_argv [i] = mono_reflection_type_get_handle (garg);
}
ginst = mono_metadata_get_generic_inst (count, type_argv);
g_free (type_argv);
tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL;
tmp_context.method_inst = ginst;
inflated = mono_class_inflate_generic_method (method, &tmp_context);
imethod = (MonoMethodInflated *) inflated;
if (method->klass->image->dynamic) {
MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image;
/*
* This table maps metadata structures representing inflated methods/fields
* to the reflection objects representing their generic definitions.
*/
mono_loader_lock ();
mono_g_hash_table_insert (image->generic_def_objects, imethod, rmethod);
mono_loader_unlock ();
}
return mono_method_get_object (mono_object_domain (rmethod), inflated, NULL); | 1 | [
"CWE-20"
] | mono | 4905ef1130feb26c3150b28b97e4a96752e0d399 | 340,166,980,670,027,100,000,000,000,000,000,000,000 | 67 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
MonoReflectionMethod*
mono_reflection_bind_generic_method_parameters (MonoReflectionMethod *rmethod, MonoArray *types)
{
MonoClass *klass;
MonoMethod *method, *inflated;
MonoMethodInflated *imethod;
MonoGenericContext tmp_context;
MonoGenericInst *ginst;
MonoType **type_argv;
int count, i;
MONO_ARCH_SAVE_REGS;
/*FIXME but this no longer should happen*/
if (!strcmp (rmethod->object.vtable->klass->name, "MethodBuilder")) {
#ifndef DISABLE_REFLECTION_EMIT
MonoReflectionMethodBuilder *mb = NULL;
MonoReflectionTypeBuilder *tb;
MonoClass *klass;
mb = (MonoReflectionMethodBuilder *) rmethod;
tb = (MonoReflectionTypeBuilder *) mb->type;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
method = methodbuilder_to_mono_method (klass, mb);
#else
g_assert_not_reached ();
method = NULL;
#endif
} else {
method = rmethod->method;
}
klass = method->klass;
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
count = mono_method_signature (method)->generic_param_count;
if (count != mono_array_length (types))
return NULL;
type_argv = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *garg = mono_array_get (types, gpointer, i);
type_argv [i] = mono_reflection_type_get_handle (garg);
}
ginst = mono_metadata_get_generic_inst (count, type_argv);
g_free (type_argv);
tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL;
tmp_context.method_inst = ginst;
inflated = mono_class_inflate_generic_method (method, &tmp_context);
imethod = (MonoMethodInflated *) inflated;
/*FIXME but I think this is no longer necessary*/
if (method->klass->image->dynamic) {
MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image;
/*
* This table maps metadata structures representing inflated methods/fields
* to the reflection objects representing their generic definitions.
*/
mono_loader_lock ();
mono_g_hash_table_insert (image->generic_def_objects, imethod, rmethod);
mono_loader_unlock ();
}
return mono_method_get_object (mono_object_domain (rmethod), inflated, NULL); | 1 | [
"CWE-20"
] | mono | 65292a69c837b8a5f7a392d34db63de592153358 | 161,148,815,879,048,570,000,000,000,000,000,000,000 | 69 | Handle invalid instantiation of generic methods.
* verify.c: Add new function to internal verifier API to check
method instantiations.
* reflection.c (mono_reflection_bind_generic_method_parameters):
Check the instantiation before returning it.
Fixes #655847 |
string_vformat(uschar *buffer, int buflen, char *format, va_list ap)
{
enum { L_NORMAL, L_SHORT, L_LONG, L_LONGLONG, L_LONGDOUBLE };
BOOL yield = TRUE;
int width, precision;
char *fp = format; /* Deliberately not unsigned */
uschar *p = buffer;
uschar *last = buffer + buflen - 1;
string_datestamp_offset = -1; /* Datestamp not inserted */
/* Scan the format and handle the insertions */
while (*fp != 0)
{
int length = L_NORMAL;
int *nptr;
int slen;
char *null = "NULL"; /* ) These variables */
char *item_start, *s; /* ) are deliberately */
char newformat[16]; /* ) not unsigned */
/* Non-% characters just get copied verbatim */
if (*fp != '%')
{
if (p >= last) { yield = FALSE; break; }
*p++ = (uschar)*fp++;
continue;
}
/* Deal with % characters. Pick off the width and precision, for checking
strings, skipping over the flag and modifier characters. */
item_start = fp;
width = precision = -1;
if (strchr("-+ #0", *(++fp)) != NULL)
{
if (*fp == '#') null = "";
fp++;
}
if (isdigit((uschar)*fp))
{
width = *fp++ - '0';
while (isdigit((uschar)*fp)) width = width * 10 + *fp++ - '0';
}
else if (*fp == '*')
{
width = va_arg(ap, int);
fp++;
}
if (*fp == '.')
{
if (*(++fp) == '*')
{
precision = va_arg(ap, int);
fp++;
}
else
{
precision = 0;
while (isdigit((uschar)*fp))
precision = precision*10 + *fp++ - '0';
}
}
/* Skip over 'h', 'L', 'l', and 'll', remembering the item length */
if (*fp == 'h')
{ fp++; length = L_SHORT; }
else if (*fp == 'L')
{ fp++; length = L_LONGDOUBLE; }
else if (*fp == 'l')
{
if (fp[1] == 'l')
{
fp += 2;
length = L_LONGLONG;
}
else
{
fp++;
length = L_LONG;
}
}
/* Handle each specific format type. */
switch (*fp++)
{
case 'n':
nptr = va_arg(ap, int *);
*nptr = p - buffer;
break;
case 'd':
case 'o':
case 'u':
case 'x':
case 'X':
if (p >= last - ((length > L_LONG)? 24 : 12))
{ yield = FALSE; goto END_FORMAT; }
strncpy(newformat, item_start, fp - item_start);
newformat[fp - item_start] = 0;
/* Short int is promoted to int when passing through ..., so we must use
int for va_arg(). */
switch(length)
{
case L_SHORT:
case L_NORMAL: sprintf(CS p, newformat, va_arg(ap, int)); break;
case L_LONG: sprintf(CS p, newformat, va_arg(ap, long int)); break;
case L_LONGLONG: sprintf(CS p, newformat, va_arg(ap, LONGLONG_T)); break;
}
while (*p) p++;
break;
case 'p':
if (p >= last - 24) { yield = FALSE; goto END_FORMAT; }
strncpy(newformat, item_start, fp - item_start);
newformat[fp - item_start] = 0;
sprintf(CS p, newformat, va_arg(ap, void *));
while (*p) p++;
break;
/* %f format is inherently insecure if the numbers that it may be
handed are unknown (e.g. 1e300). However, in Exim, %f is used for
printing load averages, and these are actually stored as integers
(load average * 1000) so the size of the numbers is constrained.
It is also used for formatting sending rates, where the simplicity
of the format prevents overflow. */
case 'f':
case 'e':
case 'E':
case 'g':
case 'G':
if (precision < 0) precision = 6;
if (p >= last - precision - 8) { yield = FALSE; goto END_FORMAT; }
strncpy(newformat, item_start, fp - item_start);
newformat[fp-item_start] = 0;
if (length == L_LONGDOUBLE)
sprintf(CS p, newformat, va_arg(ap, long double));
else
sprintf(CS p, newformat, va_arg(ap, double));
while (*p) p++;
break;
/* String types */
case '%':
if (p >= last) { yield = FALSE; goto END_FORMAT; }
*p++ = '%';
break;
case 'c':
if (p >= last) { yield = FALSE; goto END_FORMAT; }
*p++ = va_arg(ap, int);
break;
case 'D': /* Insert datestamp for log file names */
s = CS tod_stamp(tod_log_datestamp);
string_datestamp_offset = p - buffer; /* Passed back via global */
goto INSERT_STRING;
case 's':
case 'S': /* Forces *lower* case */
s = va_arg(ap, char *);
INSERT_STRING: /* Come to from %D above */
if (s == NULL) s = null;
slen = Ustrlen(s);
/* If the width is specified, check that there is a precision
set; if not, set it to the width to prevent overruns of long
strings. */
if (width >= 0)
{
if (precision < 0) precision = width;
}
/* If a width is not specified and the precision is specified, set
the width to the precision, or the string length if shorted. */
else if (precision >= 0)
{
width = (precision < slen)? precision : slen;
}
/* If neither are specified, set them both to the string length. */
else width = precision = slen;
/* Check string space, and add the string to the buffer if ok. If
not OK, add part of the string (debugging uses this to show as
much as possible). */
if (p >= last - width)
{
yield = FALSE;
width = precision = last - p - 1;
}
sprintf(CS p, "%*.*s", width, precision, s);
if (fp[-1] == 'S')
while (*p) { *p = tolower(*p); p++; }
else
while (*p) p++;
if (!yield) goto END_FORMAT;
break;
/* Some things are never used in Exim; also catches junk. */
default:
strncpy(newformat, item_start, fp - item_start);
newformat[fp-item_start] = 0;
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "string_format: unsupported type "
"in \"%s\" in \"%s\"", newformat, format);
break;
}
}
/* Ensure string is complete; return TRUE if got to the end of the format */
END_FORMAT:
*p = 0;
return yield;
} | 1 | [] | exim | 24c929a27415c7cfc7126c47e4cad39acf3efa6b | 320,005,744,502,620,400,000,000,000,000,000,000,000 | 234 | Buffer overrun fix. fixes: bug #787 |
handle_smtp_call(int *listen_sockets, int listen_socket_count,
int accept_socket, struct sockaddr *accepted)
{
pid_t pid;
union sockaddr_46 interface_sockaddr;
EXIM_SOCKLEN_T ifsize = sizeof(interface_sockaddr);
int dup_accept_socket = -1;
int max_for_this_host = 0;
int wfsize = 0;
int wfptr = 0;
int use_log_write_selector = log_write_selector;
uschar *whofrom = NULL;
void *reset_point = store_get(0);
/* Make the address available in ASCII representation, and also fish out
the remote port. */
sender_host_address = host_ntoa(-1, accepted, NULL, &sender_host_port);
DEBUG(D_any) debug_printf("Connection request from %s port %d\n",
sender_host_address, sender_host_port);
/* Set up the output stream, check the socket has duplicated, and set up the
input stream. These operations fail only the exceptional circumstances. Note
that never_error() won't use smtp_out if it is NULL. */
smtp_out = fdopen(accept_socket, "wb");
if (smtp_out == NULL)
{
never_error(US"daemon: fdopen() for smtp_out failed", US"", errno);
goto ERROR_RETURN;
}
dup_accept_socket = dup(accept_socket);
if (dup_accept_socket < 0)
{
never_error(US"daemon: couldn't dup socket descriptor",
US"Connection setup failed", errno);
goto ERROR_RETURN;
}
smtp_in = fdopen(dup_accept_socket, "rb");
if (smtp_in == NULL)
{
never_error(US"daemon: fdopen() for smtp_in failed",
US"Connection setup failed", errno);
goto ERROR_RETURN;
}
/* Get the data for the local interface address. Panic for most errors, but
"connection reset by peer" just means the connection went away. */
if (getsockname(accept_socket, (struct sockaddr *)(&interface_sockaddr),
&ifsize) < 0)
{
log_write(0, LOG_MAIN | ((errno == ECONNRESET)? 0 : LOG_PANIC),
"getsockname() failed: %s", strerror(errno));
smtp_printf("421 Local problem: getsockname() failed; please try again later\r\n");
goto ERROR_RETURN;
}
interface_address = host_ntoa(-1, &interface_sockaddr, NULL, &interface_port);
DEBUG(D_interface) debug_printf("interface address=%s port=%d\n",
interface_address, interface_port);
/* Build a string identifying the remote host and, if requested, the port and
the local interface data. This is for logging; at the end of this function the
memory is reclaimed. */
whofrom = string_append(whofrom, &wfsize, &wfptr, 3, "[", sender_host_address, "]");
if ((log_extra_selector & LX_incoming_port) != 0)
whofrom = string_append(whofrom, &wfsize, &wfptr, 2, ":", string_sprintf("%d",
sender_host_port));
if ((log_extra_selector & LX_incoming_interface) != 0)
whofrom = string_append(whofrom, &wfsize, &wfptr, 4, " I=[",
interface_address, "]:", string_sprintf("%d", interface_port));
whofrom[wfptr] = 0; /* Terminate the newly-built string */
/* Check maximum number of connections. We do not check for reserved
connections or unacceptable hosts here. That is done in the subprocess because
it might take some time. */
if (smtp_accept_max > 0 && smtp_accept_count >= smtp_accept_max)
{
DEBUG(D_any) debug_printf("rejecting SMTP connection: count=%d max=%d\n",
smtp_accept_count, smtp_accept_max);
smtp_printf("421 Too many concurrent SMTP connections; "
"please try again later.\r\n");
log_write(L_connection_reject,
LOG_MAIN, "Connection from %s refused: too many connections",
whofrom);
goto ERROR_RETURN;
}
/* If a load limit above which only reserved hosts are acceptable is defined,
get the load average here, and if there are in fact no reserved hosts, do
the test right away (saves a fork). If there are hosts, do the check in the
subprocess because it might take time. */
if (smtp_load_reserve >= 0)
{
load_average = OS_GETLOADAVG();
if (smtp_reserve_hosts == NULL && load_average > smtp_load_reserve)
{
DEBUG(D_any) debug_printf("rejecting SMTP connection: load average = %.2f\n",
(double)load_average/1000.0);
smtp_printf("421 Too much load; please try again later.\r\n");
log_write(L_connection_reject,
LOG_MAIN, "Connection from %s refused: load average = %.2f",
whofrom, (double)load_average/1000.0);
goto ERROR_RETURN;
}
}
/* Check that one specific host (strictly, IP address) is not hogging
resources. This is done here to prevent a denial of service attack by someone
forcing you to fork lots of times before denying service. The value of
smtp_accept_max_per_host is a string which is expanded. This makes it possible
to provide host-specific limits according to $sender_host address, but because
this is in the daemon mainline, only fast expansions (such as inline address
checks) should be used. The documentation is full of warnings. */
if (smtp_accept_max_per_host != NULL)
{
uschar *expanded = expand_string(smtp_accept_max_per_host);
if (expanded == NULL)
{
if (!expand_string_forcedfail)
log_write(0, LOG_MAIN|LOG_PANIC, "expansion of smtp_accept_max_per_host "
"failed for %s: %s", whofrom, expand_string_message);
}
/* For speed, interpret a decimal number inline here */
else
{
uschar *s = expanded;
while (isdigit(*s))
max_for_this_host = max_for_this_host * 10 + *s++ - '0';
if (*s != 0)
log_write(0, LOG_MAIN|LOG_PANIC, "expansion of smtp_accept_max_per_host "
"for %s contains non-digit: %s", whofrom, expanded);
}
}
/* If we have fewer connections than max_for_this_host, we can skip the tedious
per host_address checks. Note that at this stage smtp_accept_count contains the
count of *other* connections, not including this one. */
if ((max_for_this_host > 0) &&
(smtp_accept_count >= max_for_this_host))
{
int i;
int host_accept_count = 0;
int other_host_count = 0; /* keep a count of non matches to optimise */
for (i = 0; i < smtp_accept_max; ++i)
{
if (smtp_slots[i].host_address != NULL)
{
if (Ustrcmp(sender_host_address, smtp_slots[i].host_address) == 0)
host_accept_count++;
else
other_host_count++;
/* Testing all these strings is expensive - see if we can drop out
early, either by hitting the target, or finding there are not enough
connections left to make the target. */
if ((host_accept_count >= max_for_this_host) ||
((smtp_accept_count - other_host_count) < max_for_this_host))
break;
}
}
if (host_accept_count >= max_for_this_host)
{
DEBUG(D_any) debug_printf("rejecting SMTP connection: too many from this "
"IP address: count=%d max=%d\n",
host_accept_count, max_for_this_host);
smtp_printf("421 Too many concurrent SMTP connections "
"from this IP address; please try again later.\r\n");
log_write(L_connection_reject,
LOG_MAIN, "Connection from %s refused: too many connections "
"from that IP address", whofrom);
goto ERROR_RETURN;
}
}
/* OK, the connection count checks have been passed. Before we can fork the
accepting process, we must first log the connection if requested. This logging
used to happen in the subprocess, but doing that means that the value of
smtp_accept_count can be out of step by the time it is logged. So we have to do
the logging here and accept the performance cost. Note that smtp_accept_count
hasn't yet been incremented to take account of this connection.
In order to minimize the cost (because this is going to happen for every
connection), do a preliminary selector test here. This saves ploughing through
the generalized logging code each time when the selector is false. If the
selector is set, check whether the host is on the list for logging. If not,
arrange to unset the selector in the subprocess. */
if ((log_write_selector & L_smtp_connection) != 0)
{
uschar *list = hosts_connection_nolog;
if (list != NULL && verify_check_host(&list) == OK)
use_log_write_selector &= ~L_smtp_connection;
else
log_write(L_smtp_connection, LOG_MAIN, "SMTP connection from %s "
"(TCP/IP connection count = %d)", whofrom, smtp_accept_count + 1);
}
/* Now we can fork the accepting process; do a lookup tidy, just in case any
expansion above did a lookup. */
search_tidyup();
pid = fork();
/* Handle the child process */
if (pid == 0)
{
int i;
int queue_only_reason = 0;
int old_pool = store_pool;
int save_debug_selector = debug_selector;
BOOL local_queue_only;
BOOL session_local_queue_only;
#ifdef SA_NOCLDWAIT
struct sigaction act;
#endif
smtp_accept_count++; /* So that it includes this process */
/* May have been modified for the subprocess */
log_write_selector = use_log_write_selector;
/* Get the local interface address into permanent store */
store_pool = POOL_PERM;
interface_address = string_copy(interface_address);
store_pool = old_pool;
/* Check for a tls-on-connect port */
if (host_is_tls_on_connect_port(interface_port)) tls_on_connect = TRUE;
/* Expand smtp_active_hostname if required. We do not do this any earlier,
because it may depend on the local interface address (indeed, that is most
likely what it depends on.) */
smtp_active_hostname = primary_hostname;
if (raw_active_hostname != NULL)
{
uschar *nah = expand_string(raw_active_hostname);
if (nah == NULL)
{
if (!expand_string_forcedfail)
{
log_write(0, LOG_MAIN|LOG_PANIC, "failed to expand \"%s\" "
"(smtp_active_hostname): %s", raw_active_hostname,
expand_string_message);
smtp_printf("421 Local configuration error; "
"please try again later.\r\n");
mac_smtp_fflush();
search_tidyup();
_exit(EXIT_FAILURE);
}
}
else if (nah[0] != 0) smtp_active_hostname = nah;
}
/* Initialize the queueing flags */
queue_check_only();
session_local_queue_only = queue_only;
/* Close the listening sockets, and set the SIGCHLD handler to SIG_IGN.
We also attempt to set things up so that children are automatically reaped,
but just in case this isn't available, there's a paranoid waitpid() in the
loop too (except for systems where we are sure it isn't needed). See the more
extensive comment before the reception loop in exim.c for a fuller
explanation of this logic. */
for (i = 0; i < listen_socket_count; i++) (void)close(listen_sockets[i]);
#ifdef SA_NOCLDWAIT
act.sa_handler = SIG_IGN;
sigemptyset(&(act.sa_mask));
act.sa_flags = SA_NOCLDWAIT;
sigaction(SIGCHLD, &act, NULL);
#else
signal(SIGCHLD, SIG_IGN);
#endif
/* Attempt to get an id from the sending machine via the RFC 1413
protocol. We do this in the sub-process in order not to hold up the
main process if there is any delay. Then set up the fullhost information
in case there is no HELO/EHLO.
If debugging is enabled only for the daemon, we must turn if off while
finding the id, but turn it on again afterwards so that information about the
incoming connection is output. */
if (debug_daemon) debug_selector = 0;
verify_get_ident(IDENT_PORT);
host_build_sender_fullhost();
debug_selector = save_debug_selector;
DEBUG(D_any)
debug_printf("Process %d is handling incoming connection from %s\n",
(int)getpid(), sender_fullhost);
/* Now disable debugging permanently if it's required only for the daemon
process. */
if (debug_daemon) debug_selector = 0;
/* If there are too many child processes for immediate delivery,
set the session_local_queue_only flag, which is initialized from the
configured value and may therefore already be TRUE. Leave logging
till later so it will have a message id attached. Note that there is no
possibility of re-calculating this per-message, because the value of
smtp_accept_count does not change in this subprocess. */
if (smtp_accept_queue > 0 && smtp_accept_count > smtp_accept_queue)
{
session_local_queue_only = TRUE;
queue_only_reason = 1;
}
/* Handle the start of the SMTP session, then loop, accepting incoming
messages from the SMTP connection. The end will come at the QUIT command,
when smtp_setup_msg() returns 0. A break in the connection causes the
process to die (see accept.c).
NOTE: We do *not* call smtp_log_no_mail() if smtp_start_session() fails,
because a log line has already been written for all its failure exists
(usually "connection refused: <reason>") and writing another one is
unnecessary clutter. */
if (!smtp_start_session())
{
mac_smtp_fflush();
search_tidyup();
_exit(EXIT_SUCCESS);
}
for (;;)
{
int rc;
message_id[0] = 0; /* Clear out any previous message_id */
reset_point = store_get(0); /* Save current store high water point */
DEBUG(D_any)
debug_printf("Process %d is ready for new message\n", (int)getpid());
/* Smtp_setup_msg() returns 0 on QUIT or if the call is from an
unacceptable host or if an ACL "drop" command was triggered, -1 on
connection lost, and +1 on validly reaching DATA. Receive_msg() almost
always returns TRUE when smtp_input is true; just retry if no message was
accepted (can happen for invalid message parameters). However, it can yield
FALSE if the connection was forcibly dropped by the DATA ACL. */
if ((rc = smtp_setup_msg()) > 0)
{
BOOL ok = receive_msg(FALSE);
search_tidyup(); /* Close cached databases */
if (!ok) /* Connection was dropped */
{
mac_smtp_fflush();
smtp_log_no_mail(); /* Log no mail if configured */
_exit(EXIT_SUCCESS);
}
if (message_id[0] == 0) continue; /* No message was accepted */
}
else
{
mac_smtp_fflush();
search_tidyup();
smtp_log_no_mail(); /* Log no mail if configured */
_exit((rc == 0)? EXIT_SUCCESS : EXIT_FAILURE);
}
/* Show the recipients when debugging */
DEBUG(D_receive)
{
int i;
if (sender_address != NULL)
debug_printf("Sender: %s\n", sender_address);
if (recipients_list != NULL)
{
debug_printf("Recipients:\n");
for (i = 0; i < recipients_count; i++)
debug_printf(" %s\n", recipients_list[i].address);
}
}
/* A message has been accepted. Clean up any previous delivery processes
that have completed and are defunct, on systems where they don't go away
by themselves (see comments when setting SIG_IGN above). On such systems
(if any) these delivery processes hang around after termination until
the next message is received. */
#ifndef SIG_IGN_WORKS
while (waitpid(-1, NULL, WNOHANG) > 0);
#endif
/* Reclaim up the store used in accepting this message */
store_reset(reset_point);
/* If queue_only is set or if there are too many incoming connections in
existence, session_local_queue_only will be TRUE. If it is not, check
whether we have received too many messages in this session for immediate
delivery. */
if (!session_local_queue_only &&
smtp_accept_queue_per_connection > 0 &&
receive_messagecount > smtp_accept_queue_per_connection)
{
session_local_queue_only = TRUE;
queue_only_reason = 2;
}
/* Initialize local_queue_only from session_local_queue_only. If it is not
true, and queue_only_load is set, check that the load average is below it.
If local_queue_only is set by this means, we also set if for the session if
queue_only_load_latch is true (the default). This means that, once set,
local_queue_only remains set for any subsequent messages on the same SMTP
connection. This is a deliberate choice; even though the load average may
fall, it doesn't seem right to deliver later messages on the same call when
not delivering earlier ones. However, the are special circumstances such as
very long-lived connections from scanning appliances where this is not the
best strategy. In such cases, queue_only_load_latch should be set false. */
local_queue_only = session_local_queue_only;
if (!local_queue_only && queue_only_load >= 0)
{
local_queue_only = (load_average = OS_GETLOADAVG()) > queue_only_load;
if (local_queue_only)
{
queue_only_reason = 3;
if (queue_only_load_latch) session_local_queue_only = TRUE;
}
}
/* Log the queueing here, when it will get a message id attached, but
not if queue_only is set (case 0). */
if (local_queue_only) switch(queue_only_reason)
{
case 1:
log_write(L_delay_delivery,
LOG_MAIN, "no immediate delivery: too many connections "
"(%d, max %d)", smtp_accept_count, smtp_accept_queue);
break;
case 2:
log_write(L_delay_delivery,
LOG_MAIN, "no immediate delivery: more than %d messages "
"received in one connection", smtp_accept_queue_per_connection);
break;
case 3:
log_write(L_delay_delivery,
LOG_MAIN, "no immediate delivery: load average %.2f",
(double)load_average/1000.0);
break;
}
/* If a delivery attempt is required, spin off a new process to handle it.
If we are not root, we have to re-exec exim unless deliveries are being
done unprivileged. */
else if (!queue_only_policy && !deliver_freeze)
{
pid_t dpid;
/* Before forking, ensure that the C output buffer is flushed. Otherwise
anything that it in it will get duplicated, leading to duplicate copies
of the pending output. */
mac_smtp_fflush();
if ((dpid = fork()) == 0)
{
(void)fclose(smtp_in);
(void)fclose(smtp_out);
/* Don't ever molest the parent's SSL connection, but do clean up
the data structures if necessary. */
#ifdef SUPPORT_TLS
tls_close(FALSE);
#endif
/* Reset SIGHUP and SIGCHLD in the child in both cases. */
signal(SIGHUP, SIG_DFL);
signal(SIGCHLD, SIG_DFL);
if (geteuid() != root_uid && !deliver_drop_privilege)
{
signal(SIGALRM, SIG_DFL);
(void)child_exec_exim(CEE_EXEC_PANIC, FALSE, NULL, FALSE, 2, US"-Mc",
message_id);
/* Control does not return here. */
}
/* No need to re-exec; SIGALRM remains set to the default handler */
(void)deliver_message(message_id, FALSE, FALSE);
search_tidyup();
_exit(EXIT_SUCCESS);
}
if (dpid > 0)
{
DEBUG(D_any) debug_printf("forked delivery process %d\n", (int)dpid);
}
else
{
log_write(0, LOG_MAIN|LOG_PANIC, "daemon: delivery process fork "
"failed: %s", strerror(errno));
}
}
}
}
/* Carrying on in the parent daemon process... Can't do much if the fork
failed. Otherwise, keep count of the number of accepting processes and
remember the pid for ticking off when the child completes. */
if (pid < 0)
{
never_error(US"daemon: accept process fork failed", US"Fork failed", errno);
}
else
{
int i;
for (i = 0; i < smtp_accept_max; ++i)
{
if (smtp_slots[i].pid <= 0)
{
smtp_slots[i].pid = pid;
if (smtp_accept_max_per_host != NULL)
smtp_slots[i].host_address = string_copy_malloc(sender_host_address);
smtp_accept_count++;
break;
}
}
DEBUG(D_any) debug_printf("%d SMTP accept process%s running\n",
smtp_accept_count, (smtp_accept_count == 1)? "" : "es");
}
/* Get here via goto in error cases */
ERROR_RETURN:
/* Close the streams associated with the socket which will also close the
socket fds in this process. We can't do anything if fclose() fails, but
logging brings it to someone's attention. However, "connection reset by peer"
isn't really a problem, so skip that one. On Solaris, a dropped connection can
manifest itself as a broken pipe, so drop that one too. If the streams don't
exist, something went wrong while setting things up. Make sure the socket
descriptors are closed, in order to drop the connection. */
if (smtp_out != NULL)
{
if (fclose(smtp_out) != 0 && errno != ECONNRESET && errno != EPIPE)
log_write(0, LOG_MAIN|LOG_PANIC, "daemon: fclose(smtp_out) failed: %s",
strerror(errno));
smtp_out = NULL;
}
else (void)close(accept_socket);
if (smtp_in != NULL)
{
if (fclose(smtp_in) != 0 && errno != ECONNRESET && errno != EPIPE)
log_write(0, LOG_MAIN|LOG_PANIC, "daemon: fclose(smtp_in) failed: %s",
strerror(errno));
smtp_in = NULL;
}
else (void)close(dup_accept_socket);
/* Release any store used in this process, including the store used for holding
the incoming host address and an expanded active_hostname. */
store_reset(reset_point);
sender_host_address = NULL;
} | 1 | [
"CWE-264"
] | exim | fa32850be0d9e605da1b33305c122f7a59a24650 | 98,286,915,549,784,800,000,000,000,000,000,000,000 | 596 | Set FD_CLOEXEC on SMTP sockets after forking to handle the connection. |
readconf_main(void)
{
int sep = 0;
struct stat statbuf;
uschar *s, *filename;
uschar *list = config_main_filelist;
/* Loop through the possible file names */
while((filename = string_nextinlist(&list, &sep, big_buffer, big_buffer_size))
!= NULL)
{
/* Cut out all the fancy processing unless specifically wanted */
#if defined(CONFIGURE_FILE_USE_NODE) || defined(CONFIGURE_FILE_USE_EUID)
uschar *suffix = filename + Ustrlen(filename);
/* Try for the node-specific file if a node name exists */
#ifdef CONFIGURE_FILE_USE_NODE
struct utsname uts;
if (uname(&uts) >= 0)
{
#ifdef CONFIGURE_FILE_USE_EUID
sprintf(CS suffix, ".%ld.%.256s", (long int)original_euid, uts.nodename);
config_file = Ufopen(filename, "rb");
if (config_file == NULL)
#endif /* CONFIGURE_FILE_USE_EUID */
{
sprintf(CS suffix, ".%.256s", uts.nodename);
config_file = Ufopen(filename, "rb");
}
}
#endif /* CONFIGURE_FILE_USE_NODE */
/* Otherwise, try the generic name, possibly with the euid added */
#ifdef CONFIGURE_FILE_USE_EUID
if (config_file == NULL)
{
sprintf(CS suffix, ".%ld", (long int)original_euid);
config_file = Ufopen(filename, "rb");
}
#endif /* CONFIGURE_FILE_USE_EUID */
/* Finally, try the unadorned name */
if (config_file == NULL)
{
*suffix = 0;
config_file = Ufopen(filename, "rb");
}
#else /* if neither defined */
/* This is the common case when the fancy processing is not included. */
config_file = Ufopen(filename, "rb");
#endif
/* If the file does not exist, continue to try any others. For any other
error, break out (and die). */
if (config_file != NULL || errno != ENOENT) break;
}
/* On success, save the name for verification; config_filename is used when
logging configuration errors (it changes for .included files) whereas
config_main_filename is the name shown by -bP. Failure to open a configuration
file is a serious disaster. */
if (config_file != NULL)
{
config_filename = config_main_filename = string_copy(filename);
}
else
{
if (filename == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "non-existent configuration file(s): "
"%s", config_main_filelist);
else
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "%s", string_open_failed(errno,
"configuration file %s", filename));
}
/* Check the status of the file we have opened, unless it was specified on
the command line, in which case privilege was given away at the start. */
if (!config_changed)
{
if (fstat(fileno(config_file), &statbuf) != 0)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to stat configuration file %s",
big_buffer);
if ((statbuf.st_uid != root_uid /* owner not root */
#ifdef CONFIGURE_OWNER
&& statbuf.st_uid != config_uid /* owner not the special one */
#endif
) || /* or */
(statbuf.st_gid != root_gid /* group not root & */
#ifdef CONFIGURE_GROUP
&& statbuf.st_gid != config_gid /* group not the special one */
#endif
&& (statbuf.st_mode & 020) != 0) || /* group writeable */
/* or */
((statbuf.st_mode & 2) != 0)) /* world writeable */
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Exim configuration file %s has the "
"wrong owner, group, or mode", big_buffer);
}
/* Process the main configuration settings. They all begin with a lower case
letter. If we see something starting with an upper case letter, it is taken as
a macro definition. */
while ((s = get_config_line()) != NULL)
{
if (isupper(s[0])) read_macro_assignment(s);
else if (Ustrncmp(s, "domainlist", 10) == 0)
read_named_list(&domainlist_anchor, &domainlist_count,
MAX_NAMED_LIST, s+10, US"domain list");
else if (Ustrncmp(s, "hostlist", 8) == 0)
read_named_list(&hostlist_anchor, &hostlist_count,
MAX_NAMED_LIST, s+8, US"host list");
else if (Ustrncmp(s, US"addresslist", 11) == 0)
read_named_list(&addresslist_anchor, &addresslist_count,
MAX_NAMED_LIST, s+11, US"address list");
else if (Ustrncmp(s, US"localpartlist", 13) == 0)
read_named_list(&localpartlist_anchor, &localpartlist_count,
MAX_NAMED_LIST, s+13, US"local part list");
else
(void) readconf_handle_option(s, optionlist_config, optionlist_config_size,
NULL, US"main option \"%s\" unknown");
}
/* If local_sender_retain is set, local_from_check must be unset. */
if (local_sender_retain && local_from_check)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "both local_from_check and "
"local_sender_retain are set; this combination is not allowed");
/* If the timezone string is empty, set it to NULL, implying no TZ variable
wanted. */
if (timezone_string != NULL && *timezone_string == 0) timezone_string = NULL;
/* The max retry interval must not be greater than 24 hours. */
if (retry_interval_max > 24*60*60) retry_interval_max = 24*60*60;
/* remote_max_parallel must be > 0 */
if (remote_max_parallel <= 0) remote_max_parallel = 1;
/* Save the configured setting of freeze_tell, so we can re-instate it at the
start of a new SMTP message. */
freeze_tell_config = freeze_tell;
/* The primary host name may be required for expansion of spool_directory
and log_file_path, so make sure it is set asap. It is obtained from uname(),
but if that yields an unqualified value, make a FQDN by using gethostbyname to
canonize it. Some people like upper case letters in their host names, so we
don't force the case. */
if (primary_hostname == NULL)
{
uschar *hostname;
struct utsname uts;
if (uname(&uts) < 0)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "uname() failed to yield host name");
hostname = US uts.nodename;
if (Ustrchr(hostname, '.') == NULL)
{
int af = AF_INET;
struct hostent *hostdata;
#if HAVE_IPV6
if (!disable_ipv6 && (dns_ipv4_lookup == NULL ||
match_isinlist(hostname, &dns_ipv4_lookup, 0, NULL, NULL, MCL_DOMAIN,
TRUE, NULL) != OK))
af = AF_INET6;
#else
af = AF_INET;
#endif
for (;;)
{
#if HAVE_IPV6
#if HAVE_GETIPNODEBYNAME
int error_num;
hostdata = getipnodebyname(CS hostname, af, 0, &error_num);
#else
hostdata = gethostbyname2(CS hostname, af);
#endif
#else
hostdata = gethostbyname(CS hostname);
#endif
if (hostdata != NULL)
{
hostname = US hostdata->h_name;
break;
}
if (af == AF_INET) break;
af = AF_INET;
}
}
primary_hostname = string_copy(hostname);
}
/* Set up default value for smtp_active_hostname */
smtp_active_hostname = primary_hostname;
/* If spool_directory wasn't set in the build-time configuration, it must have
got set above. Of course, writing to the log may not work if log_file_path is
not set, but it will at least get to syslog or somewhere, with any luck. */
if (*spool_directory == 0)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "spool_directory undefined: cannot "
"proceed");
/* Expand the spool directory name; it may, for example, contain the primary
host name. Same comment about failure. */
s = expand_string(spool_directory);
if (s == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand spool_directory "
"\"%s\": %s", spool_directory, expand_string_message);
spool_directory = s;
/* Expand log_file_path, which must contain "%s" in any component that isn't
the null string or "syslog". It is also allowed to contain one instance of %D.
However, it must NOT contain % followed by anything else. */
if (*log_file_path != 0)
{
uschar *ss, *sss;
int sep = ':'; /* Fixed for log file path */
s = expand_string(log_file_path);
if (s == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand log_file_path "
"\"%s\": %s", log_file_path, expand_string_message);
ss = s;
while ((sss = string_nextinlist(&ss,&sep,big_buffer,big_buffer_size)) != NULL)
{
uschar *t;
if (sss[0] == 0 || Ustrcmp(sss, "syslog") == 0) continue;
t = Ustrstr(sss, "%s");
if (t == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "log_file_path \"%s\" does not "
"contain \"%%s\"", sss);
*t = 'X';
t = Ustrchr(sss, '%');
if (t != NULL)
{
if (t[1] != 'D' || Ustrchr(t+2, '%') != NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "log_file_path \"%s\" contains "
"unexpected \"%%\" character", s);
}
}
log_file_path = s;
}
/* Interpret syslog_facility into an integer argument for 'ident' param to
openlog(). Default is LOG_MAIL set in globals.c. Allow the user to omit the
leading "log_". */
if (syslog_facility_str != NULL)
{
int i;
uschar *s = syslog_facility_str;
if ((Ustrlen(syslog_facility_str) >= 4) &&
(strncmpic(syslog_facility_str, US"log_", 4) == 0))
s += 4;
for (i = 0; i < syslog_list_size; i++)
{
if (strcmpic(s, syslog_list[i].name) == 0)
{
syslog_facility = syslog_list[i].value;
break;
}
}
if (i >= syslog_list_size)
{
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"failed to interpret syslog_facility \"%s\"", syslog_facility_str);
}
}
/* Expand pid_file_path */
if (*pid_file_path != 0)
{
s = expand_string(pid_file_path);
if (s == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand pid_file_path "
"\"%s\": %s", pid_file_path, expand_string_message);
pid_file_path = s;
}
/* Compile the regex for matching a UUCP-style "From_" line in an incoming
message. */
regex_From = regex_must_compile(uucp_from_pattern, FALSE, TRUE);
/* Unpick the SMTP rate limiting options, if set */
if (smtp_ratelimit_mail != NULL)
{
unpick_ratelimit(smtp_ratelimit_mail, &smtp_rlm_threshold,
&smtp_rlm_base, &smtp_rlm_factor, &smtp_rlm_limit);
}
if (smtp_ratelimit_rcpt != NULL)
{
unpick_ratelimit(smtp_ratelimit_rcpt, &smtp_rlr_threshold,
&smtp_rlr_base, &smtp_rlr_factor, &smtp_rlr_limit);
}
/* The qualify domains default to the primary host name */
if (qualify_domain_sender == NULL)
qualify_domain_sender = primary_hostname;
if (qualify_domain_recipient == NULL)
qualify_domain_recipient = qualify_domain_sender;
/* Setting system_filter_user in the configuration sets the gid as well if a
name is given, but a numerical value does not. */
if (system_filter_uid_set && !system_filter_gid_set)
{
struct passwd *pw = getpwuid(system_filter_uid);
if (pw == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Failed to look up uid %ld",
(long int)system_filter_uid);
system_filter_gid = pw->pw_gid;
system_filter_gid_set = TRUE;
}
/* If the errors_reply_to field is set, check that it is syntactically valid
and ensure it contains a domain. */
if (errors_reply_to != NULL)
{
uschar *errmess;
int start, end, domain;
uschar *recipient = parse_extract_address(errors_reply_to, &errmess,
&start, &end, &domain, FALSE);
if (recipient == NULL)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"error in errors_reply_to (%s): %s", errors_reply_to, errmess);
if (domain == 0)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"errors_reply_to (%s) does not contain a domain", errors_reply_to);
}
/* If smtp_accept_queue or smtp_accept_max_per_host is set, then
smtp_accept_max must also be set. */
if (smtp_accept_max == 0 &&
(smtp_accept_queue > 0 || smtp_accept_max_per_host != NULL))
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"smtp_accept_max must be set if smtp_accept_queue or "
"smtp_accept_max_per_host is set");
/* Set up the host number if anything is specified. It is an expanded string
so that it can be computed from the host name, for example. We do this last
so as to ensure that everything else is set up before the expansion. */
if (host_number_string != NULL)
{
uschar *end;
uschar *s = expand_string(host_number_string);
long int n = Ustrtol(s, &end, 0);
while (isspace(*end)) end++;
if (*end != 0)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"localhost_number value is not a number: %s", s);
if (n > LOCALHOST_MAX)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"localhost_number is greater than the maximum allowed value (%d)",
LOCALHOST_MAX);
host_number = n;
}
#ifdef SUPPORT_TLS
/* If tls_verify_hosts is set, tls_verify_certificates must also be set */
if ((tls_verify_hosts != NULL || tls_try_verify_hosts != NULL) &&
tls_verify_certificates == NULL)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"tls_%sverify_hosts is set, but tls_verify_certificates is not set",
(tls_verify_hosts != NULL)? "" : "try_");
/* If openssl_options is set, validate it */
if (openssl_options != NULL)
{
# ifdef USE_GNUTLS
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"openssl_options is set but we're using GnuTLS");
# else
long dummy;
if (!(tls_openssl_options_parse(openssl_options, &dummy)))
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"openssl_options parse error: %s", openssl_options);
# endif
}
#endif
} | 1 | [
"CWE-264"
] | exim | e2f5dc151e2e79058e93924e6d35510557f0535d | 27,874,412,724,630,780,000,000,000,000,000,000,000 | 426 | Check configure file permissions even for non-default files if still privileged
(Bug 1044, CVE-2010-4345) |
readconf_main(void)
{
int sep = 0;
struct stat statbuf;
uschar *s, *filename;
uschar *list = config_main_filelist;
/* Loop through the possible file names */
while((filename = string_nextinlist(&list, &sep, big_buffer, big_buffer_size))
!= NULL)
{
/* Cut out all the fancy processing unless specifically wanted */
#if defined(CONFIGURE_FILE_USE_NODE) || defined(CONFIGURE_FILE_USE_EUID)
uschar *suffix = filename + Ustrlen(filename);
/* Try for the node-specific file if a node name exists */
#ifdef CONFIGURE_FILE_USE_NODE
struct utsname uts;
if (uname(&uts) >= 0)
{
#ifdef CONFIGURE_FILE_USE_EUID
sprintf(CS suffix, ".%ld.%.256s", (long int)original_euid, uts.nodename);
config_file = Ufopen(filename, "rb");
if (config_file == NULL)
#endif /* CONFIGURE_FILE_USE_EUID */
{
sprintf(CS suffix, ".%.256s", uts.nodename);
config_file = Ufopen(filename, "rb");
}
}
#endif /* CONFIGURE_FILE_USE_NODE */
/* Otherwise, try the generic name, possibly with the euid added */
#ifdef CONFIGURE_FILE_USE_EUID
if (config_file == NULL)
{
sprintf(CS suffix, ".%ld", (long int)original_euid);
config_file = Ufopen(filename, "rb");
}
#endif /* CONFIGURE_FILE_USE_EUID */
/* Finally, try the unadorned name */
if (config_file == NULL)
{
*suffix = 0;
config_file = Ufopen(filename, "rb");
}
#else /* if neither defined */
/* This is the common case when the fancy processing is not included. */
config_file = Ufopen(filename, "rb");
#endif
/* If the file does not exist, continue to try any others. For any other
error, break out (and die). */
if (config_file != NULL || errno != ENOENT) break;
}
/* On success, save the name for verification; config_filename is used when
logging configuration errors (it changes for .included files) whereas
config_main_filename is the name shown by -bP. Failure to open a configuration
file is a serious disaster. */
if (config_file != NULL)
{
config_filename = config_main_filename = string_copy(filename);
}
else
{
if (filename == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "non-existent configuration file(s): "
"%s", config_main_filelist);
else
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "%s", string_open_failed(errno,
"configuration file %s", filename));
}
/* Check the status of the file we have opened, unless it was specified on
the command line, in which case privilege was given away at the start. */
if (!config_changed)
{
if (fstat(fileno(config_file), &statbuf) != 0)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to stat configuration file %s",
big_buffer);
if ((statbuf.st_uid != root_uid && /* owner not root */
statbuf.st_uid != exim_uid /* owner not exim */
#ifdef CONFIGURE_OWNER
&& statbuf.st_uid != config_uid /* owner not the special one */
#endif
) || /* or */
(statbuf.st_gid != exim_gid /* group not exim & */
#ifdef CONFIGURE_GROUP
&& statbuf.st_gid != config_gid /* group not the special one */
#endif
&& (statbuf.st_mode & 020) != 0) || /* group writeable */
/* or */
((statbuf.st_mode & 2) != 0)) /* world writeable */
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Exim configuration file %s has the "
"wrong owner, group, or mode", big_buffer);
}
/* Process the main configuration settings. They all begin with a lower case
letter. If we see something starting with an upper case letter, it is taken as
a macro definition. */
while ((s = get_config_line()) != NULL)
{
if (isupper(s[0])) read_macro_assignment(s);
else if (Ustrncmp(s, "domainlist", 10) == 0)
read_named_list(&domainlist_anchor, &domainlist_count,
MAX_NAMED_LIST, s+10, US"domain list");
else if (Ustrncmp(s, "hostlist", 8) == 0)
read_named_list(&hostlist_anchor, &hostlist_count,
MAX_NAMED_LIST, s+8, US"host list");
else if (Ustrncmp(s, US"addresslist", 11) == 0)
read_named_list(&addresslist_anchor, &addresslist_count,
MAX_NAMED_LIST, s+11, US"address list");
else if (Ustrncmp(s, US"localpartlist", 13) == 0)
read_named_list(&localpartlist_anchor, &localpartlist_count,
MAX_NAMED_LIST, s+13, US"local part list");
else
(void) readconf_handle_option(s, optionlist_config, optionlist_config_size,
NULL, US"main option \"%s\" unknown");
}
/* If local_sender_retain is set, local_from_check must be unset. */
if (local_sender_retain && local_from_check)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "both local_from_check and "
"local_sender_retain are set; this combination is not allowed");
/* If the timezone string is empty, set it to NULL, implying no TZ variable
wanted. */
if (timezone_string != NULL && *timezone_string == 0) timezone_string = NULL;
/* The max retry interval must not be greater than 24 hours. */
if (retry_interval_max > 24*60*60) retry_interval_max = 24*60*60;
/* remote_max_parallel must be > 0 */
if (remote_max_parallel <= 0) remote_max_parallel = 1;
/* Save the configured setting of freeze_tell, so we can re-instate it at the
start of a new SMTP message. */
freeze_tell_config = freeze_tell;
/* The primary host name may be required for expansion of spool_directory
and log_file_path, so make sure it is set asap. It is obtained from uname(),
but if that yields an unqualified value, make a FQDN by using gethostbyname to
canonize it. Some people like upper case letters in their host names, so we
don't force the case. */
if (primary_hostname == NULL)
{
uschar *hostname;
struct utsname uts;
if (uname(&uts) < 0)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "uname() failed to yield host name");
hostname = US uts.nodename;
if (Ustrchr(hostname, '.') == NULL)
{
int af = AF_INET;
struct hostent *hostdata;
#if HAVE_IPV6
if (!disable_ipv6 && (dns_ipv4_lookup == NULL ||
match_isinlist(hostname, &dns_ipv4_lookup, 0, NULL, NULL, MCL_DOMAIN,
TRUE, NULL) != OK))
af = AF_INET6;
#else
af = AF_INET;
#endif
for (;;)
{
#if HAVE_IPV6
#if HAVE_GETIPNODEBYNAME
int error_num;
hostdata = getipnodebyname(CS hostname, af, 0, &error_num);
#else
hostdata = gethostbyname2(CS hostname, af);
#endif
#else
hostdata = gethostbyname(CS hostname);
#endif
if (hostdata != NULL)
{
hostname = US hostdata->h_name;
break;
}
if (af == AF_INET) break;
af = AF_INET;
}
}
primary_hostname = string_copy(hostname);
}
/* Set up default value for smtp_active_hostname */
smtp_active_hostname = primary_hostname;
/* If spool_directory wasn't set in the build-time configuration, it must have
got set above. Of course, writing to the log may not work if log_file_path is
not set, but it will at least get to syslog or somewhere, with any luck. */
if (*spool_directory == 0)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "spool_directory undefined: cannot "
"proceed");
/* Expand the spool directory name; it may, for example, contain the primary
host name. Same comment about failure. */
s = expand_string(spool_directory);
if (s == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand spool_directory "
"\"%s\": %s", spool_directory, expand_string_message);
spool_directory = s;
/* Expand log_file_path, which must contain "%s" in any component that isn't
the null string or "syslog". It is also allowed to contain one instance of %D.
However, it must NOT contain % followed by anything else. */
if (*log_file_path != 0)
{
uschar *ss, *sss;
int sep = ':'; /* Fixed for log file path */
s = expand_string(log_file_path);
if (s == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand log_file_path "
"\"%s\": %s", log_file_path, expand_string_message);
ss = s;
while ((sss = string_nextinlist(&ss,&sep,big_buffer,big_buffer_size)) != NULL)
{
uschar *t;
if (sss[0] == 0 || Ustrcmp(sss, "syslog") == 0) continue;
t = Ustrstr(sss, "%s");
if (t == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "log_file_path \"%s\" does not "
"contain \"%%s\"", sss);
*t = 'X';
t = Ustrchr(sss, '%');
if (t != NULL)
{
if (t[1] != 'D' || Ustrchr(t+2, '%') != NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "log_file_path \"%s\" contains "
"unexpected \"%%\" character", s);
}
}
log_file_path = s;
}
/* Interpret syslog_facility into an integer argument for 'ident' param to
openlog(). Default is LOG_MAIL set in globals.c. Allow the user to omit the
leading "log_". */
if (syslog_facility_str != NULL)
{
int i;
uschar *s = syslog_facility_str;
if ((Ustrlen(syslog_facility_str) >= 4) &&
(strncmpic(syslog_facility_str, US"log_", 4) == 0))
s += 4;
for (i = 0; i < syslog_list_size; i++)
{
if (strcmpic(s, syslog_list[i].name) == 0)
{
syslog_facility = syslog_list[i].value;
break;
}
}
if (i >= syslog_list_size)
{
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"failed to interpret syslog_facility \"%s\"", syslog_facility_str);
}
}
/* Expand pid_file_path */
if (*pid_file_path != 0)
{
s = expand_string(pid_file_path);
if (s == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "failed to expand pid_file_path "
"\"%s\": %s", pid_file_path, expand_string_message);
pid_file_path = s;
}
/* Compile the regex for matching a UUCP-style "From_" line in an incoming
message. */
regex_From = regex_must_compile(uucp_from_pattern, FALSE, TRUE);
/* Unpick the SMTP rate limiting options, if set */
if (smtp_ratelimit_mail != NULL)
{
unpick_ratelimit(smtp_ratelimit_mail, &smtp_rlm_threshold,
&smtp_rlm_base, &smtp_rlm_factor, &smtp_rlm_limit);
}
if (smtp_ratelimit_rcpt != NULL)
{
unpick_ratelimit(smtp_ratelimit_rcpt, &smtp_rlr_threshold,
&smtp_rlr_base, &smtp_rlr_factor, &smtp_rlr_limit);
}
/* The qualify domains default to the primary host name */
if (qualify_domain_sender == NULL)
qualify_domain_sender = primary_hostname;
if (qualify_domain_recipient == NULL)
qualify_domain_recipient = qualify_domain_sender;
/* Setting system_filter_user in the configuration sets the gid as well if a
name is given, but a numerical value does not. */
if (system_filter_uid_set && !system_filter_gid_set)
{
struct passwd *pw = getpwuid(system_filter_uid);
if (pw == NULL)
log_write(0, LOG_MAIN|LOG_PANIC_DIE, "Failed to look up uid %ld",
(long int)system_filter_uid);
system_filter_gid = pw->pw_gid;
system_filter_gid_set = TRUE;
}
/* If the errors_reply_to field is set, check that it is syntactically valid
and ensure it contains a domain. */
if (errors_reply_to != NULL)
{
uschar *errmess;
int start, end, domain;
uschar *recipient = parse_extract_address(errors_reply_to, &errmess,
&start, &end, &domain, FALSE);
if (recipient == NULL)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"error in errors_reply_to (%s): %s", errors_reply_to, errmess);
if (domain == 0)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"errors_reply_to (%s) does not contain a domain", errors_reply_to);
}
/* If smtp_accept_queue or smtp_accept_max_per_host is set, then
smtp_accept_max must also be set. */
if (smtp_accept_max == 0 &&
(smtp_accept_queue > 0 || smtp_accept_max_per_host != NULL))
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"smtp_accept_max must be set if smtp_accept_queue or "
"smtp_accept_max_per_host is set");
/* Set up the host number if anything is specified. It is an expanded string
so that it can be computed from the host name, for example. We do this last
so as to ensure that everything else is set up before the expansion. */
if (host_number_string != NULL)
{
uschar *end;
uschar *s = expand_string(host_number_string);
long int n = Ustrtol(s, &end, 0);
while (isspace(*end)) end++;
if (*end != 0)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"localhost_number value is not a number: %s", s);
if (n > LOCALHOST_MAX)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"localhost_number is greater than the maximum allowed value (%d)",
LOCALHOST_MAX);
host_number = n;
}
#ifdef SUPPORT_TLS
/* If tls_verify_hosts is set, tls_verify_certificates must also be set */
if ((tls_verify_hosts != NULL || tls_try_verify_hosts != NULL) &&
tls_verify_certificates == NULL)
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"tls_%sverify_hosts is set, but tls_verify_certificates is not set",
(tls_verify_hosts != NULL)? "" : "try_");
/* If openssl_options is set, validate it */
if (openssl_options != NULL)
{
# ifdef USE_GNUTLS
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"openssl_options is set but we're using GnuTLS");
# else
long dummy;
if (!(tls_openssl_options_parse(openssl_options, &dummy)))
log_write(0, LOG_PANIC_DIE|LOG_CONFIG,
"openssl_options parse error: %s", openssl_options);
# endif
}
#endif
} | 1 | [
"CWE-264"
] | exim | c1d94452b1b7f3620ee3cc9aa197ad98821de79f | 280,784,835,408,161,930,000,000,000,000,000,000,000 | 427 | Don't allow a configure file which is writeable by the Exim user or group
(Bug 1044, CVE-2010-4345) |
static int
xmlXPathCompOpEvalPositionalPredicate(xmlXPathParserContextPtr ctxt,
xmlXPathStepOpPtr op,
xmlNodeSetPtr set,
int contextSize,
int minPos,
int maxPos,
int hasNsNodes)
{
if (op->ch1 != -1) {
xmlXPathCompExprPtr comp = ctxt->comp;
if (comp->steps[op->ch1].op != XPATH_OP_PREDICATE) {
/*
* TODO: raise an internal error.
*/
}
contextSize = xmlXPathCompOpEvalPredicate(ctxt,
&comp->steps[op->ch1], set, contextSize, hasNsNodes);
CHECK_ERROR0;
if (contextSize <= 0)
return(0);
}
/*
* Check if the node set contains a sufficient number of nodes for
* the requested range.
*/
if (contextSize < minPos) {
xmlXPathNodeSetClear(set, hasNsNodes);
return(0);
}
if (op->ch2 == -1) {
/*
* TODO: Can this ever happen?
*/
return (contextSize);
} else {
xmlDocPtr oldContextDoc;
int i, pos = 0, newContextSize = 0, contextPos = 0, res;
xmlXPathStepOpPtr exprOp;
xmlXPathObjectPtr contextObj = NULL, exprRes = NULL;
xmlNodePtr oldContextNode, contextNode = NULL;
xmlXPathContextPtr xpctxt = ctxt->context;
#ifdef LIBXML_XPTR_ENABLED
/*
* URGENT TODO: Check the following:
* We don't expect location sets if evaluating prediates, right?
* Only filters should expect location sets, right?
*/
#endif /* LIBXML_XPTR_ENABLED */
/*
* Save old context.
*/
oldContextNode = xpctxt->node;
oldContextDoc = xpctxt->doc;
/*
* Get the expression of this predicate.
*/
exprOp = &ctxt->comp->steps[op->ch2];
for (i = 0; i < set->nodeNr; i++) {
if (set->nodeTab[i] == NULL)
continue;
contextNode = set->nodeTab[i];
xpctxt->node = contextNode;
xpctxt->contextSize = contextSize;
xpctxt->proximityPosition = ++contextPos;
/*
* Initialize the new set.
* Also set the xpath document in case things like
* key() evaluation are attempted on the predicate
*/
if ((contextNode->type != XML_NAMESPACE_DECL) &&
(contextNode->doc != NULL))
xpctxt->doc = contextNode->doc;
/*
* Evaluate the predicate expression with 1 context node
* at a time; this node is packaged into a node set; this
* node set is handed over to the evaluation mechanism.
*/
if (contextObj == NULL)
contextObj = xmlXPathCacheNewNodeSet(xpctxt, contextNode);
else
xmlXPathNodeSetAddUnique(contextObj->nodesetval,
contextNode);
valuePush(ctxt, contextObj);
res = xmlXPathCompOpEvalToBoolean(ctxt, exprOp, 1);
if ((ctxt->error != XPATH_EXPRESSION_OK) || (res == -1)) {
xmlXPathObjectPtr tmp;
/* pop the result */
tmp = valuePop(ctxt);
xmlXPathReleaseObject(xpctxt, tmp);
/* then pop off contextObj, which will be freed later */
valuePop(ctxt);
goto evaluation_error;
}
if (res)
pos++;
if (res && (pos >= minPos) && (pos <= maxPos)) {
/*
* Fits in the requested range.
*/
newContextSize++;
if (minPos == maxPos) {
/*
* Only 1 node was requested.
*/
if (contextNode->type == XML_NAMESPACE_DECL) {
/*
* As always: take care of those nasty
* namespace nodes.
*/
set->nodeTab[i] = NULL;
}
xmlXPathNodeSetClear(set, hasNsNodes);
set->nodeNr = 1;
set->nodeTab[0] = contextNode;
goto evaluation_exit;
}
if (pos == maxPos) {
/*
* We are done.
*/
xmlXPathNodeSetClearFromPos(set, i +1, hasNsNodes);
goto evaluation_exit;
}
} else {
/*
* Remove the entry from the initial node set.
*/
set->nodeTab[i] = NULL;
if (contextNode->type == XML_NAMESPACE_DECL)
xmlXPathNodeSetFreeNs((xmlNsPtr) contextNode);
}
if (exprRes != NULL) {
xmlXPathReleaseObject(ctxt->context, exprRes);
exprRes = NULL;
}
if (ctxt->value == contextObj) {
/*
* Don't free the temporary XPath object holding the
* context node, in order to avoid massive recreation
* inside this loop.
*/
valuePop(ctxt);
xmlXPathNodeSetClear(contextObj->nodesetval, hasNsNodes);
} else {
/*
* The object was lost in the evaluation machinery.
* Can this happen? Maybe in case of internal-errors.
*/
contextObj = NULL;
}
}
goto evaluation_exit;
evaluation_error:
xmlXPathNodeSetClear(set, hasNsNodes);
newContextSize = 0;
evaluation_exit:
if (contextObj != NULL) {
if (ctxt->value == contextObj)
valuePop(ctxt);
xmlXPathReleaseObject(xpctxt, contextObj);
}
if (exprRes != NULL)
xmlXPathReleaseObject(ctxt->context, exprRes);
/*
* Reset/invalidate the context.
*/
xpctxt->node = oldContextNode;
xpctxt->doc = oldContextDoc;
xpctxt->contextSize = -1;
xpctxt->proximityPosition = -1;
return(newContextSize);
} | 1 | [
"CWE-415"
] | libxml2 | df83c17e5a2646bd923f75e5e507bc80d73c9722 | 3,326,770,072,555,090,600,000,000,000,000,000,000 | 184 | Fix a potential freeing error in XPath |
int tfm_load_file(const char *filename, TFMInfo *info)
{
int lf, lh, bc, ec, nw, nh, nd, ne;
int i, n;
Uchar *tfm;
Uchar *ptr;
struct stat st;
int size;
FILE *in;
Int32 *cb;
Int32 *charinfo;
Int32 *widths;
Int32 *heights;
Int32 *depths;
Uint32 checksum;
in = fopen(filename, "rb");
if(in == NULL)
return -1;
tfm = NULL;
DEBUG((DBG_FONTS, "(mt) reading TFM file `%s'\n",
filename));
/* We read the entire TFM file into core */
if(fstat(fileno(in), &st) < 0)
return -1;
if(st.st_size == 0)
goto bad_tfm;
/* allocate a word-aligned buffer to hold the file */
size = 4 * ROUND(st.st_size, 4);
if(size != st.st_size)
mdvi_warning(_("Warning: TFM file `%s' has suspicious size\n"),
filename);
tfm = (Uchar *)mdvi_malloc(size);
if(fread(tfm, st.st_size, 1, in) != 1)
goto error;
/* we don't need this anymore */
fclose(in);
in = NULL;
/* not a checksum, but serves a similar purpose */
checksum = 0;
ptr = tfm;
/* get the counters */
lf = muget2(ptr);
lh = muget2(ptr); checksum += 6 + lh;
bc = muget2(ptr);
ec = muget2(ptr); checksum += ec - bc + 1;
nw = muget2(ptr); checksum += nw;
nh = muget2(ptr); checksum += nh;
nd = muget2(ptr); checksum += nd;
checksum += muget2(ptr); /* skip italics correction count */
checksum += muget2(ptr); /* skip lig/kern table size */
checksum += muget2(ptr); /* skip kern table size */
ne = muget2(ptr); checksum += ne;
checksum += muget2(ptr); /* skip # of font parameters */
size = ec - bc + 1;
cb = (Int32 *)tfm; cb += 6 + lh;
charinfo = cb; cb += size;
widths = cb; cb += nw;
heights = cb; cb += nh;
depths = cb;
if(widths[0] || heights[0] || depths[0] ||
checksum != lf || bc - 1 > ec || ec > 255 || ne > 256)
goto bad_tfm;
/* from this point on, no error checking is done */
/* now we're at the header */
/* get the checksum */
info->checksum = muget4(ptr);
/* get the design size */
info->design = muget4(ptr);
/* get the coding scheme */
if(lh > 2) {
/* get the coding scheme */
i = n = msget1(ptr);
if(n < 0 || n > 39) {
mdvi_warning(_("%s: font coding scheme truncated to 40 bytes\n"),
filename);
n = 39;
}
memcpy(info->coding, ptr, n);
info->coding[n] = 0;
ptr += i;
} else
strcpy(info->coding, "FontSpecific");
/* get the font family */
if(lh > 12) {
n = msget1(ptr);
if(n > 0) {
i = Max(n, 63);
memcpy(info->family, ptr, i);
info->family[i] = 0;
} else
strcpy(info->family, "unspecified");
ptr += n;
}
/* now we don't read from `ptr' anymore */
info->loc = bc;
info->hic = ec;
info->type = DviFontTFM;
/* allocate characters */
info->chars = xnalloc(TFMChar, size);
#ifdef WORD_LITTLE_ENDIAN
/* byte-swap the three arrays at once (they are consecutive in memory) */
swap_array((Uint32 *)widths, nw + nh + nd);
#endif
/* get the relevant data */
ptr = (Uchar *)charinfo;
for(i = bc; i <= ec; ptr += 3, i++) {
int ndx;
ndx = (int)*ptr; ptr++;
info->chars[i-bc].advance = widths[ndx];
/* TFM files lack this information */
info->chars[i-bc].left = 0;
info->chars[i-bc].right = widths[ndx];
info->chars[i-bc].present = (ndx != 0);
if(ndx) {
ndx = ((*ptr >> 4) & 0xf);
info->chars[i-bc].height = heights[ndx];
ndx = (*ptr & 0xf);
info->chars[i-bc].depth = depths[ndx];
}
}
/* free everything */
mdvi_free(tfm);
return 0;
bad_tfm:
mdvi_error(_("%s: File corrupted, or not a TFM file\n"), filename);
error:
if(tfm) mdvi_free(tfm);
if(in) fclose(in);
return -1;
} | 1 | [
"CWE-20"
] | evince | d4139205b010ed06310d14284e63114e88ec6de2 | 299,620,427,221,326,900,000,000,000,000,000,000,000 | 148 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
static int pk_load_font(DviParams *unused, DviFont *font)
{
int i;
int flag_byte;
int loc, hic, maxch;
Int32 checksum;
FILE *p;
#ifndef NODEBUG
char s[256];
#endif
long alpha, beta, z;
font->chars = xnalloc(DviFontChar, 256);
p = font->in;
memzero(font->chars, 256 * sizeof(DviFontChar));
for(i = 0; i < 256; i++)
font->chars[i].offset = 0;
/* check the preamble */
loc = fuget1(p); hic = fuget1(p);
if(loc != PK_PRE || hic != PK_ID)
goto badpk;
i = fuget1(p);
#ifndef NODEBUG
for(loc = 0; loc < i; loc++)
s[loc] = fuget1(p);
s[loc] = 0;
DEBUG((DBG_FONTS, "(pk) %s: %s\n", font->fontname, s));
#else
fseek(in, (long)i, SEEK_CUR);
#endif
/* get the design size */
font->design = fuget4(p);
/* get the checksum */
checksum = fuget4(p);
if(checksum && font->checksum && font->checksum != checksum) {
mdvi_warning(_("%s: checksum mismatch (expected %u, got %u)\n"),
font->fontname, font->checksum, checksum);
} else if(!font->checksum)
font->checksum = checksum;
/* skip pixel per point ratios */
fuget4(p);
fuget4(p);
if(feof(p))
goto badpk;
/* now start reading the font */
loc = 256; hic = -1; maxch = 256;
/* initialize alpha and beta for TFM width computation */
TFMPREPARE(font->scale, z, alpha, beta);
while((flag_byte = fuget1(p)) != PK_POST) {
if(feof(p))
break;
if(flag_byte >= PK_CMD_START) {
switch(flag_byte) {
case PK_X1:
case PK_X2:
case PK_X3:
case PK_X4: {
#ifndef NODEBUG
char *t;
int n;
i = fugetn(p, flag_byte - PK_X1 + 1);
if(i < 256)
t = &s[0];
else
t = mdvi_malloc(i + 1);
for(n = 0; n < i; n++)
t[n] = fuget1(p);
t[n] = 0;
DEBUG((DBG_SPECIAL, "(pk) %s: Special \"%s\"\n",
font->fontname, t));
if(t != &s[0])
mdvi_free(t);
#else
i = fugetn(p, flag_byte - PK_X1 + 1);
while(i-- > 0)
fuget1(p);
#endif
break;
}
case PK_Y:
i = fuget4(p);
DEBUG((DBG_SPECIAL, "(pk) %s: MF special %u\n",
font->fontname, (unsigned)i));
break;
case PK_POST:
case PK_NOOP:
break;
case PK_PRE:
mdvi_error(_("%s: unexpected preamble\n"), font->fontname);
goto error;
}
} else {
int pl;
int cc;
int w, h;
int x, y;
int offset;
long tfm;
switch(flag_byte & 0x7) {
case 7:
pl = fuget4(p);
cc = fuget4(p);
offset = ftell(p) + pl;
tfm = fuget4(p);
fsget4(p); /* skip dx */
fsget4(p); /* skip dy */
w = fuget4(p);
h = fuget4(p);
x = fsget4(p);
y = fsget4(p);
break;
case 4:
case 5:
case 6:
pl = (flag_byte % 4) * 65536 + fuget2(p);
cc = fuget1(p);
offset = ftell(p) + pl;
tfm = fuget3(p);
fsget2(p); /* skip dx */
/* dy assumed 0 */
w = fuget2(p);
h = fuget2(p);
x = fsget2(p);
y = fsget2(p);
break;
default:
pl = (flag_byte % 4) * 256 + fuget1(p);
cc = fuget1(p);
offset = ftell(p) + pl;
tfm = fuget3(p);
fsget1(p); /* skip dx */
/* dy assumed 0 */
w = fuget1(p);
h = fuget1(p);
x = fsget1(p);
y = fsget1(p);
}
if(feof(p))
break;
if(cc < loc)
loc = cc;
if(cc > hic)
hic = cc;
if(cc > maxch) {
font->chars = xresize(font->chars,
DviFontChar, cc + 16);
for(i = maxch; i < cc + 16; i++)
font->chars[i].offset = 0;
maxch = cc + 16;
}
font->chars[cc].code = cc;
font->chars[cc].flags = flag_byte;
font->chars[cc].offset = ftell(p);
font->chars[cc].width = w;
font->chars[cc].height = h;
font->chars[cc].glyph.data = NULL;
font->chars[cc].x = x;
font->chars[cc].y = y;
font->chars[cc].glyph.x = x;
font->chars[cc].glyph.y = y;
font->chars[cc].glyph.w = w;
font->chars[cc].glyph.h = h;
font->chars[cc].grey.data = NULL;
font->chars[cc].shrunk.data = NULL;
font->chars[cc].tfmwidth = TFMSCALE(z, tfm, alpha, beta);
font->chars[cc].loaded = 0;
fseek(p, (long)offset, SEEK_SET);
}
}
if(flag_byte != PK_POST) {
mdvi_error(_("%s: unexpected end of file (no postamble)\n"),
font->fontname);
goto error;
}
while((flag_byte = fuget1(p)) != EOF) {
if(flag_byte != PK_NOOP) {
mdvi_error(_("invalid PK file! (junk in postamble)\n"));
goto error;
}
}
/* resize font char data */
if(loc > 0 || hic < maxch-1) {
memmove(font->chars, font->chars + loc,
(hic - loc + 1) * sizeof(DviFontChar));
font->chars = xresize(font->chars,
DviFontChar, hic - loc + 1);
}
font->loc = loc;
font->hic = hic;
return 0;
badpk:
mdvi_error(_("%s: File corrupted, or not a PK file\n"), font->fontname);
error:
mdvi_free(font->chars);
font->chars = NULL;
font->loc = font->hic = 0;
return -1;
} | 1 | [
"CWE-20"
] | evince | d4139205b010ed06310d14284e63114e88ec6de2 | 238,884,600,401,543,050,000,000,000,000,000,000,000 | 206 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
int special(DviContext *dvi, int opcode)
{
char *s;
Int32 arg;
arg = dugetn(dvi, opcode - DVI_XXX1 + 1);
s = mdvi_malloc(arg + 1);
dread(dvi, s, arg);
s[arg] = 0;
mdvi_do_special(dvi, s);
SHOWCMD((dvi, "XXXX", opcode - DVI_XXX1 + 1,
"[%s]", s));
mdvi_free(s);
return 0;
} | 1 | [
"CWE-20"
] | evince | d4139205b010ed06310d14284e63114e88ec6de2 | 144,021,900,882,273,980,000,000,000,000,000,000,000 | 15 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
static char *token(FILE *stream)
{
int ch, idx;
/* skip over white space */
while ((ch = fgetc(stream)) == ' ' || ch == lineterm ||
ch == ',' || ch == '\t' || ch == ';');
idx = 0;
while (ch != EOF && ch != ' ' && ch != lineterm
&& ch != '\t' && ch != ':' && ch != ';')
{
ident[idx++] = ch;
ch = fgetc(stream);
} /* while */
if (ch == EOF && idx < 1) return ((char *)NULL);
if (idx >= 1 && ch != ':' ) ungetc(ch, stream);
if (idx < 1 ) ident[idx++] = ch; /* single-character token */
ident[idx] = 0;
return(ident); /* returns pointer to the token */
} /* token */ | 1 | [
"CWE-20"
] | evince | d4139205b010ed06310d14284e63114e88ec6de2 | 160,514,501,259,155,110,000,000,000,000,000,000,000 | 24 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
static int vf_load_font(DviParams *params, DviFont *font)
{
FILE *p;
Uchar *macros;
int msize;
int mlen;
Int32 checksum;
long alpha, beta, z;
int op;
int i;
int nchars;
int loc, hic;
DviFontRef *last;
macros = NULL;
msize = mlen = 0;
p = font->in;
if(fuget1(p) != 247 || fuget1(p) != 202)
goto badvf;
mlen = fuget1(p);
fseek(p, (long)mlen, SEEK_CUR);
checksum = fuget4(p);
if(checksum && font->checksum && checksum != font->checksum) {
mdvi_warning(_("%s: Checksum mismatch (expected %u, got %u)\n"),
font->fontname, font->checksum, checksum);
} else if(!font->checksum)
font->checksum = checksum;
font->design = fuget4(p);
/* read all the fonts in the preamble */
last = NULL;
/* initialize alpha, beta and z for TFM width computation */
TFMPREPARE(font->scale, z, alpha, beta);
op = fuget1(p);
while(op >= DVI_FNT_DEF1 && op <= DVI_FNT_DEF4) {
DviFontRef *ref;
Int32 scale, design;
Uint32 checksum;
int id;
int n;
int hdpi;
int vdpi;
char *name;
/* process fnt_def commands */
id = fugetn(p, op - DVI_FNT_DEF1 + 1);
checksum = fuget4(p);
scale = fuget4(p);
design = fuget4(p);
/* scale this font according to our parent's scale */
scale = TFMSCALE(scale, z, alpha, beta);
design = FROUND(params->tfm_conv * design);
/* compute the resolution */
hdpi = FROUND(params->mag * params->dpi * scale / design);
vdpi = FROUND(params->mag * params->vdpi * scale / design);
n = fuget1(p) + fuget1(p);
name = mdvi_malloc(n + 1);
fread(name, 1, n, p);
name[n] = 0;
DEBUG((DBG_FONTS, "(vf) %s: defined font `%s' at %.1fpt (%dx%d dpi)\n",
font->fontname, name,
(double)scale / (params->tfm_conv * 0x100000), hdpi, vdpi));
/* get the font */
ref = font_reference(params, id, name, checksum, hdpi, vdpi, scale);
if(ref == NULL) {
mdvi_error(_("(vf) %s: could not load font `%s'\n"),
font->fontname, name);
goto error;
}
mdvi_free(name);
if(last == NULL)
font->subfonts = last = ref;
else
last->next = ref;
ref->next = NULL;
op = fuget1(p);
}
if(op >= DVI_FNT_DEF1 && op <= DVI_FNT_DEF4)
goto error;
/* This function correctly reads both .vf and .ovf files */
font->chars = xnalloc(DviFontChar, 256);
for(i = 0; i < 256; i++)
font->chars[i].offset = 0;
nchars = 256;
loc = -1; hic = -1;
/* now read the characters themselves */
while(op <= 242) {
int pl;
Int32 cc;
Int32 tfm;
if(op == 242) {
pl = fuget4(p);
cc = fuget4(p);
tfm = fuget4(p);
} else {
pl = op;
cc = fuget1(p);
tfm = fuget3(p);
}
if(loc < 0 || cc < loc)
loc = cc;
if(hic < 0 || cc > hic)
hic = cc;
if(cc >= nchars) {
font->chars = xresize(font->chars,
DviFontChar, cc + 16);
for(i = nchars; i < cc + 16; i++)
font->chars[i].offset = 0;
nchars = cc + 16;
}
if(font->chars[cc].offset) {
mdvi_error(_("(vf) %s: character %d redefined\n"),
font->fontname, cc);
goto error;
}
DEBUG((DBG_GLYPHS, "(vf) %s: defined character %d (macro length %d)\n",
font->fontname, cc, pl));
font->chars[cc].width = pl + 1;
font->chars[cc].code = cc;
font->chars[cc].tfmwidth = TFMSCALE(tfm, z, alpha, beta);
font->chars[cc].offset = mlen;
font->chars[cc].loaded = 1;
if(mlen + pl + 1 > msize) {
msize = mlen + pl + 256;
macros = xresize(macros, Uchar, msize);
}
if(pl && fread(macros + mlen, 1, pl, p) != pl)
break;
macros[mlen+pl] = DVI_EOP;
mlen += pl + 1;
op = fuget1(p);
}
if(op != 248) {
mdvi_error(_("(vf) %s: no postamble\n"), font->fontname);
goto error;
}
/* make macro memory just big enough */
if(msize > mlen) {
macros = xresize(macros, Uchar, mlen);
msize = mlen;
}
DEBUG((DBG_FONTS|DBG_GLYPHS,
"(vf) %s: macros use %d bytes\n", font->fontname, msize));
if(loc > 0 || hic < nchars-1) {
memmove(font->chars, font->chars + loc,
(hic - loc + 1) * sizeof(DviFontChar));
font->chars = xresize(font->chars,
DviFontChar, hic - loc + 1);
}
font->loc = loc;
font->hic = hic;
font->private = macros;
return 0;
badvf:
mdvi_error(_("%s: File corrupted, or not a VF file.\n"), font->fontname);
error:
if(font->chars)
mdvi_free(font->chars);
if(macros)
mdvi_free(macros);
return -1;
} | 1 | [
"CWE-20"
] | evince | d4139205b010ed06310d14284e63114e88ec6de2 | 42,137,652,941,757,020,000,000,000,000,000,000,000 | 179 | backends: Fix several security issues in the dvi-backend.
See CVE-2010-2640, CVE-2010-2641, CVE-2010-2642 and CVE-2010-2643. |
AvahiDnsPacket *avahi_recv_dns_packet_ipv6(
int fd,
AvahiIPv6Address *ret_src_address,
uint16_t *ret_src_port,
AvahiIPv6Address *ret_dst_address,
AvahiIfIndex *ret_iface,
uint8_t *ret_ttl) {
AvahiDnsPacket *p = NULL;
struct msghdr msg;
struct iovec io;
size_t aux[1024 / sizeof(size_t)];
ssize_t l;
int ms;
struct cmsghdr *cmsg;
int found_ttl = 0, found_iface = 0;
struct sockaddr_in6 sa;
assert(fd >= 0);
if (ioctl(fd, FIONREAD, &ms) < 0) {
avahi_log_warn("ioctl(): %s", strerror(errno));
goto fail;
}
if (ms < 0) {
avahi_log_warn("FIONREAD returned negative value.");
goto fail;
}
/* For corrupt packets FIONREAD returns zero size (See rhbz #607297) */
if (!ms)
goto fail;
p = avahi_dns_packet_new(ms + AVAHI_DNS_PACKET_EXTRA_SIZE);
io.iov_base = AVAHI_DNS_PACKET_DATA(p);
io.iov_len = p->max_size;
memset(&msg, 0, sizeof(msg));
msg.msg_name = (struct sockaddr*) &sa;
msg.msg_namelen = sizeof(sa);
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_control = aux;
msg.msg_controllen = sizeof(aux);
msg.msg_flags = 0;
if ((l = recvmsg(fd, &msg, 0)) < 0) {
/* Linux returns EAGAIN when an invalid IP packet has been
received. We suppress warnings in this case because this might
create quite a bit of log traffic on machines with unstable
links. (See #60) */
if (errno != EAGAIN)
avahi_log_warn("recvmsg(): %s", strerror(errno));
goto fail;
}
assert(!(msg.msg_flags & MSG_CTRUNC));
assert(!(msg.msg_flags & MSG_TRUNC));
p->size = (size_t) l;
if (ret_src_port)
*ret_src_port = avahi_port_from_sockaddr((struct sockaddr*) &sa);
if (ret_src_address) {
AvahiAddress a;
avahi_address_from_sockaddr((struct sockaddr*) &sa, &a);
*ret_src_address = a.data.ipv6;
}
for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == IPPROTO_IPV6) {
switch (cmsg->cmsg_type) {
case IPV6_HOPLIMIT:
if (ret_ttl)
*ret_ttl = (uint8_t) (*(int *) CMSG_DATA(cmsg));
found_ttl = 1;
break;
case IPV6_PKTINFO: {
struct in6_pktinfo *i = (struct in6_pktinfo*) CMSG_DATA(cmsg);
if (ret_iface)
*ret_iface = i->ipi6_ifindex;
if (ret_dst_address)
memcpy(ret_dst_address->address, i->ipi6_addr.s6_addr, 16);
found_iface = 1;
break;
}
default:
avahi_log_warn("Unhandled cmsg_type : %d", cmsg->cmsg_type);
break;
}
}
}
assert(found_iface);
assert(found_ttl);
return p;
fail:
if (p)
avahi_dns_packet_free(p);
return NULL;
} | 1 | [
"CWE-399"
] | avahi | 46109dfec75534fe270c0ab902576f685d5ab3a6 | 154,873,054,250,953,480,000,000,000,000,000,000,000 | 121 | socket: Still read corrupt packets from the sockets
Else, we end up with an infinite loop with 100% CPU.
http://www.avahi.org/ticket/325
https://bugzilla.redhat.com/show_bug.cgi?id=667187 |
AvahiDnsPacket *avahi_recv_dns_packet_ipv4(
int fd,
AvahiIPv4Address *ret_src_address,
uint16_t *ret_src_port,
AvahiIPv4Address *ret_dst_address,
AvahiIfIndex *ret_iface,
uint8_t *ret_ttl) {
AvahiDnsPacket *p= NULL;
struct msghdr msg;
struct iovec io;
size_t aux[1024 / sizeof(size_t)]; /* for alignment on ia64 ! */
ssize_t l;
struct cmsghdr *cmsg;
int found_addr = 0;
int ms;
struct sockaddr_in sa;
assert(fd >= 0);
if (ioctl(fd, FIONREAD, &ms) < 0) {
avahi_log_warn("ioctl(): %s", strerror(errno));
goto fail;
}
if (ms < 0) {
avahi_log_warn("FIONREAD returned negative value.");
goto fail;
}
/* For corrupt packets FIONREAD returns zero size (See rhbz #607297) */
if (!ms)
goto fail;
p = avahi_dns_packet_new(ms + AVAHI_DNS_PACKET_EXTRA_SIZE);
io.iov_base = AVAHI_DNS_PACKET_DATA(p);
io.iov_len = p->max_size;
memset(&msg, 0, sizeof(msg));
msg.msg_name = &sa;
msg.msg_namelen = sizeof(sa);
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_control = aux;
msg.msg_controllen = sizeof(aux);
msg.msg_flags = 0;
if ((l = recvmsg(fd, &msg, 0)) < 0) {
/* Linux returns EAGAIN when an invalid IP packet has been
received. We suppress warnings in this case because this might
create quite a bit of log traffic on machines with unstable
links. (See #60) */
if (errno != EAGAIN)
avahi_log_warn("recvmsg(): %s", strerror(errno));
goto fail;
}
if (sa.sin_addr.s_addr == INADDR_ANY) {
/* Linux 2.4 behaves very strangely sometimes! */
goto fail;
}
assert(!(msg.msg_flags & MSG_CTRUNC));
assert(!(msg.msg_flags & MSG_TRUNC));
p->size = (size_t) l;
if (ret_src_port)
*ret_src_port = avahi_port_from_sockaddr((struct sockaddr*) &sa);
if (ret_src_address) {
AvahiAddress a;
avahi_address_from_sockaddr((struct sockaddr*) &sa, &a);
*ret_src_address = a.data.ipv4;
}
if (ret_ttl)
*ret_ttl = 255;
if (ret_iface)
*ret_iface = AVAHI_IF_UNSPEC;
for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == IPPROTO_IP) {
switch (cmsg->cmsg_type) {
#ifdef IP_RECVTTL
case IP_RECVTTL:
#endif
case IP_TTL:
if (ret_ttl)
*ret_ttl = (uint8_t) (*(int *) CMSG_DATA(cmsg));
break;
#ifdef IP_PKTINFO
case IP_PKTINFO: {
struct in_pktinfo *i = (struct in_pktinfo*) CMSG_DATA(cmsg);
if (ret_iface)
*ret_iface = (int) i->ipi_ifindex;
if (ret_dst_address)
ret_dst_address->address = i->ipi_addr.s_addr;
found_addr = 1;
break;
}
#endif
#ifdef IP_RECVIF
case IP_RECVIF: {
struct sockaddr_dl *sdl = (struct sockaddr_dl *) CMSG_DATA (cmsg);
if (ret_iface)
#ifdef __sun
*ret_iface = *(uint_t*) sdl;
#else
*ret_iface = (int) sdl->sdl_index;
#endif
break;
}
#endif
#ifdef IP_RECVDSTADDR
case IP_RECVDSTADDR:
if (ret_dst_address)
memcpy(&ret_dst_address->address, CMSG_DATA (cmsg), 4);
found_addr = 1;
break;
#endif
default:
avahi_log_warn("Unhandled cmsg_type : %d", cmsg->cmsg_type);
break;
}
}
}
assert(found_addr);
return p;
fail:
if (p)
avahi_dns_packet_free(p);
return NULL;
} | 1 | [
"CWE-399"
] | avahi | 46109dfec75534fe270c0ab902576f685d5ab3a6 | 85,362,802,703,699,340,000,000,000,000,000,000,000 | 156 | socket: Still read corrupt packets from the sockets
Else, we end up with an infinite loop with 100% CPU.
http://www.avahi.org/ticket/325
https://bugzilla.redhat.com/show_bug.cgi?id=667187 |
strip_leading_slashes (char *name, int strip_leading)
{
int s = strip_leading;
char *p, *n;
for (p = n = name; *p; p++)
{
if (ISSLASH (*p))
{
while (ISSLASH (p[1]))
p++;
if (strip_leading < 0 || --s >= 0)
n = p+1;
}
}
if ((strip_leading < 0 || s <= 0) && *n)
{
memmove (name, n, strlen (n) + 1);
return true;
}
else
return false;
} | 1 | [
"CWE-22"
] | patch | 685a78b6052f4df6eac6d625a545cfb54a6ac0e1 | 264,885,369,498,121,160,000,000,000,000,000,000,000 | 23 | Do not let a malicious patch create files above current directory
This addresses CVE-2010-4651, reported by Jakub Wilk.
https://bugzilla.redhat.com/show_bug.cgi?id=CVE-2010-4651
* src/util.c (strip_leading_slashes): Reject absolute file names
and file names containing a component of "..".
* tests/bad-filenames: New file. Test for this.
* tests/Makefile.am (TESTS): Add it.
Improvements by Andreas Gruenbacher. |
int mainloop(CLIENT *client) {
struct nbd_request request;
struct nbd_reply reply;
gboolean go_on=TRUE;
#ifdef DODBG
int i = 0;
#endif
negotiate(client);
DEBUG("Entering request loop!\n");
reply.magic = htonl(NBD_REPLY_MAGIC);
reply.error = 0;
while (go_on) {
char buf[BUFSIZE];
size_t len;
#ifdef DODBG
i++;
printf("%d: ", i);
#endif
if (client->server->timeout)
alarm(client->server->timeout);
readit(client->net, &request, sizeof(request));
request.from = ntohll(request.from);
request.type = ntohl(request.type);
if (request.type==NBD_CMD_DISC) {
msg2(LOG_INFO, "Disconnect request received.");
if (client->difmap) g_free(client->difmap) ;
if (client->difffile>=0) {
close(client->difffile);
unlink(client->difffilename);
free(client->difffilename);
}
go_on=FALSE;
continue;
}
len = ntohl(request.len);
if (request.magic != htonl(NBD_REQUEST_MAGIC))
err("Not enough magic.");
if (len > BUFSIZE)
err("Request too big!");
#ifdef DODBG
printf("%s from %Lu (%Lu) len %d, ", request.type ? "WRITE" :
"READ", (unsigned long long)request.from,
(unsigned long long)request.from / 512, len);
#endif
memcpy(reply.handle, request.handle, sizeof(reply.handle));
if ((request.from + len) > (OFFT_MAX)) {
DEBUG("[Number too large!]");
ERROR(client, reply);
continue;
}
if (((ssize_t)((off_t)request.from + len) > client->exportsize) ||
((client->server->flags & F_READONLY) && request.type)) {
DEBUG("[RANGE!]");
ERROR(client, reply);
continue;
}
if (request.type==NBD_CMD_WRITE) {
DEBUG("wr: net->buf, ");
readit(client->net, buf, len);
DEBUG("buf->exp, ");
if ((client->server->flags & F_AUTOREADONLY) ||
expwrite(request.from, buf, len,
client)) {
DEBUG("Write failed: %m" );
ERROR(client, reply);
continue;
}
SEND(client->net, reply);
DEBUG("OK!\n");
continue;
}
/* READ */
DEBUG("exp->buf, ");
if (expread(request.from, buf + sizeof(struct nbd_reply), len, client)) {
DEBUG("Read failed: %m");
ERROR(client, reply);
continue;
}
DEBUG("buf->net, ");
memcpy(buf, &reply, sizeof(struct nbd_reply));
writeit(client->net, buf, len + sizeof(struct nbd_reply));
DEBUG("OK!\n");
}
return 0;
} | 1 | [
"CWE-119"
] | nbd | 4ed24fe0d64c7cc9963c57b52cad1555ad7c6b60 | 72,590,078,856,250,560,000,000,000,000,000,000,000 | 92 | r134: CVE-2005-3534 |
int mainloop(CLIENT *client) {
struct nbd_request request;
struct nbd_reply reply;
gboolean go_on=TRUE;
#ifdef DODBG
int i = 0;
#endif
negotiate(client->net, client, NULL);
DEBUG("Entering request loop!\n");
reply.magic = htonl(NBD_REPLY_MAGIC);
reply.error = 0;
while (go_on) {
char buf[BUFSIZE];
size_t len;
#ifdef DODBG
i++;
printf("%d: ", i);
#endif
readit(client->net, &request, sizeof(request));
request.from = ntohll(request.from);
request.type = ntohl(request.type);
if (request.type==NBD_CMD_DISC) {
msg2(LOG_INFO, "Disconnect request received.");
if (client->server->flags & F_COPYONWRITE) {
if (client->difmap) g_free(client->difmap) ;
close(client->difffile);
unlink(client->difffilename);
free(client->difffilename);
}
go_on=FALSE;
continue;
}
len = ntohl(request.len);
if (request.magic != htonl(NBD_REQUEST_MAGIC))
err("Not enough magic.");
if (len > BUFSIZE + sizeof(struct nbd_reply))
err("Request too big!");
#ifdef DODBG
printf("%s from %llu (%llu) len %d, ", request.type ? "WRITE" :
"READ", (unsigned long long)request.from,
(unsigned long long)request.from / 512, len);
#endif
memcpy(reply.handle, request.handle, sizeof(reply.handle));
if ((request.from + len) > (OFFT_MAX)) {
DEBUG("[Number too large!]");
ERROR(client, reply, EINVAL);
continue;
}
if (((ssize_t)((off_t)request.from + len) > client->exportsize)) {
DEBUG("[RANGE!]");
ERROR(client, reply, EINVAL);
continue;
}
if (request.type==NBD_CMD_WRITE) {
DEBUG("wr: net->buf, ");
readit(client->net, buf, len);
DEBUG("buf->exp, ");
if ((client->server->flags & F_READONLY) ||
(client->server->flags & F_AUTOREADONLY)) {
DEBUG("[WRITE to READONLY!]");
ERROR(client, reply, EPERM);
continue;
}
if (expwrite(request.from, buf, len, client)) {
DEBUG("Write failed: %m" );
ERROR(client, reply, errno);
continue;
}
SEND(client->net, reply);
DEBUG("OK!\n");
continue;
}
/* READ */
DEBUG("exp->buf, ");
if (expread(request.from, buf + sizeof(struct nbd_reply), len, client)) {
DEBUG("Read failed: %m");
ERROR(client, reply, errno);
continue;
}
DEBUG("buf->net, ");
memcpy(buf, &reply, sizeof(struct nbd_reply));
writeit(client->net, buf, len + sizeof(struct nbd_reply));
DEBUG("OK!\n");
}
return 0;
} | 1 | [
"CWE-119",
"CWE-787"
] | nbd | 3ef52043861ab16352d49af89e048ba6339d6df8 | 168,268,959,423,393,980,000,000,000,000,000,000,000 | 93 | Fix buffer size checking
Yes, this means we've re-introduced CVE-2005-3534. Sigh. |
xfs_ioc_fsgeometry_v1(
xfs_mount_t *mp,
void __user *arg)
{
xfs_fsop_geom_v1_t fsgeo;
int error;
error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
if (error)
return -error;
if (copy_to_user(arg, &fsgeo, sizeof(fsgeo)))
return -XFS_ERROR(EFAULT);
return 0;
} | 1 | [
"CWE-200"
] | linux-2.6 | af24ee9ea8d532e16883251a6684dfa1be8eec29 | 308,237,029,051,335,170,000,000,000,000,000,000,000 | 15 | xfs: zero proper structure size for geometry calls
Commit 493f3358cb289ccf716c5a14fa5bb52ab75943e5 added this call to
xfs_fs_geometry() in order to avoid passing kernel stack data back
to user space:
+ memset(geo, 0, sizeof(*geo));
Unfortunately, one of the callers of that function passes the
address of a smaller data type, cast to fit the type that
xfs_fs_geometry() requires. As a result, this can happen:
Kernel panic - not syncing: stack-protector: Kernel stack is corrupted
in: f87aca93
Pid: 262, comm: xfs_fsr Not tainted 2.6.38-rc6-493f3358cb2+ #1
Call Trace:
[<c12991ac>] ? panic+0x50/0x150
[<c102ed71>] ? __stack_chk_fail+0x10/0x18
[<f87aca93>] ? xfs_ioc_fsgeometry_v1+0x56/0x5d [xfs]
Fix this by fixing that one caller to pass the right type and then
copy out the subset it is interested in.
Note: This patch is an alternative to one originally proposed by
Eric Sandeen.
Reported-by: Jeffrey Hundstad <jeffrey.hundstad@mnsu.edu>
Signed-off-by: Alex Elder <aelder@sgi.com>
Reviewed-by: Eric Sandeen <sandeen@redhat.com>
Tested-by: Jeffrey Hundstad <jeffrey.hundstad@mnsu.edu> |
_hb_buffer_add_output_glyph_ids( HB_Buffer buffer,
HB_UShort num_in,
HB_UShort num_out,
const GlyphID *glyph_data,
HB_UShort component,
HB_UShort ligID )
{
HB_Error error;
HB_UShort i;
HB_UInt properties;
HB_UInt cluster;
error = hb_buffer_ensure( buffer, buffer->out_pos + num_out );
if ( error )
return error;
if ( !buffer->separate_out )
{
error = hb_buffer_duplicate_out_buffer( buffer );
if ( error )
return error;
}
properties = buffer->in_string[buffer->in_pos].properties;
cluster = buffer->in_string[buffer->in_pos].cluster;
if ( component == 0xFFFF )
component = buffer->in_string[buffer->in_pos].component;
if ( ligID == 0xFFFF )
ligID = buffer->in_string[buffer->in_pos].ligID;
for ( i = 0; i < num_out; i++ )
{
HB_GlyphItem item = &buffer->out_string[buffer->out_pos + i];
item->gindex = glyph_data[i];
item->properties = properties;
item->cluster = cluster;
item->component = component;
item->ligID = ligID;
item->gproperty = HB_GLYPH_PROPERTY_UNKNOWN;
}
buffer->in_pos += num_in;
buffer->out_pos += num_out;
buffer->out_length = buffer->out_pos;
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 201,553,425,437,025,600,000,000,000,000,000,000,000 | 49 | [HB] Remove all references to the old code! |
hb_buffer_ensure( HB_Buffer buffer,
HB_UInt size )
{
HB_UInt new_allocated = buffer->allocated;
if (size > new_allocated)
{
HB_Error error;
while (size > new_allocated)
new_allocated += (new_allocated >> 1) + 8;
if ( buffer->positions )
{
if ( REALLOC_ARRAY( buffer->positions, new_allocated, HB_PositionRec ) )
return error;
}
if ( REALLOC_ARRAY( buffer->in_string, new_allocated, HB_GlyphItemRec ) )
return error;
if ( buffer->separate_out )
{
if ( REALLOC_ARRAY( buffer->alt_string, new_allocated, HB_GlyphItemRec ) )
return error;
buffer->out_string = buffer->alt_string;
}
else
{
buffer->out_string = buffer->in_string;
if ( buffer->alt_string )
{
if ( REALLOC_ARRAY( buffer->alt_string, new_allocated, HB_GlyphItemRec ) )
return error;
}
}
buffer->allocated = new_allocated;
}
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 128,240,973,658,443,860,000,000,000,000,000,000,000 | 44 | [HB] Remove all references to the old code! |
set_unicode_charmap (FT_Face face)
{
int charmap;
for (charmap = 0; charmap < face->num_charmaps; charmap++)
if (face->charmaps[charmap]->encoding == ft_encoding_unicode)
{
HB_Error error = FT_Set_Charmap(face, face->charmaps[charmap]);
return error == HB_Err_Ok;
}
return FALSE;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 146,936,864,701,422,160,000,000,000,000,000,000,000 | 13 | [HB] Remove all references to the old code! |
release_buffer (HB_Buffer buffer, gboolean free_buffer)
{
if (G_LIKELY (!free_buffer))
{
hb_buffer_clear (buffer);
G_UNLOCK (cached_buffer);
}
else
hb_buffer_free (buffer);
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 306,292,585,713,761,760,000,000,000,000,000,000,000 | 10 | [HB] Remove all references to the old code! |
acquire_buffer (gboolean *free_buffer)
{
HB_Buffer buffer;
if (G_LIKELY (G_TRYLOCK (cached_buffer)))
{
if (G_UNLIKELY (!cached_buffer))
hb_buffer_new (&cached_buffer);
buffer = cached_buffer;
*free_buffer = FALSE;
}
else
{
hb_buffer_new (&buffer);
*free_buffer = TRUE;
}
return buffer;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 136,535,772,702,542,390,000,000,000,000,000,000,000 | 20 | [HB] Remove all references to the old code! |
_hb_buffer_next_glyph ( HB_Buffer buffer )
{
HB_Error error;
if ( buffer->separate_out )
{
error = hb_buffer_ensure( buffer, buffer->out_pos + 1 );
if ( error )
return error;
buffer->out_string[buffer->out_pos] = buffer->in_string[buffer->in_pos];
}
buffer->in_pos++;
buffer->out_pos++;
buffer->out_length = buffer->out_pos;
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 214,351,697,270,207,900,000,000,000,000,000,000,000 | 19 | [HB] Remove all references to the old code! |
_hb_buffer_clear_output( HB_Buffer buffer )
{
buffer->out_length = 0;
buffer->out_pos = 0;
buffer->out_string = buffer->in_string;
buffer->separate_out = FALSE;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 70,132,925,757,040,090,000,000,000,000,000,000,000 | 7 | [HB] Remove all references to the old code! |
_hb_buffer_add_output_glyph( HB_Buffer buffer,
HB_UInt glyph_index,
HB_UShort component,
HB_UShort ligID )
{
HB_UShort glyph_data = glyph_index;
return _hb_buffer_add_output_glyphs ( buffer, 1, 1,
&glyph_data, component, ligID );
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 185,065,478,222,829,970,000,000,000,000,000,000,000 | 10 | [HB] Remove all references to the old code! |
_hb_buffer_add_output_glyphs( HB_Buffer buffer,
HB_UShort num_in,
HB_UShort num_out,
HB_UShort *glyph_data,
HB_UShort component,
HB_UShort ligID )
{
HB_Error error;
HB_UShort i;
HB_UInt properties;
HB_UInt cluster;
error = hb_buffer_ensure( buffer, buffer->out_pos + num_out );
if ( error )
return error;
if ( !buffer->separate_out )
{
error = hb_buffer_duplicate_out_buffer( buffer );
if ( error )
return error;
}
properties = buffer->in_string[buffer->in_pos].properties;
cluster = buffer->in_string[buffer->in_pos].cluster;
if ( component == 0xFFFF )
component = buffer->in_string[buffer->in_pos].component;
if ( ligID == 0xFFFF )
ligID = buffer->in_string[buffer->in_pos].ligID;
for ( i = 0; i < num_out; i++ )
{
HB_GlyphItem item = &buffer->out_string[buffer->out_pos + i];
item->gindex = glyph_data[i];
item->properties = properties;
item->cluster = cluster;
item->component = component;
item->ligID = ligID;
item->gproperty = HB_GLYPH_PROPERTY_UNKNOWN;
}
buffer->in_pos += num_in;
buffer->out_pos += num_out;
buffer->out_length = buffer->out_pos;
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 260,531,919,655,114,740,000,000,000,000,000,000,000 | 49 | [HB] Remove all references to the old code! |
hb_buffer_new( HB_Buffer *pbuffer )
{
HB_Buffer buffer;
HB_Error error;
if ( ALLOC( buffer, sizeof( HB_BufferRec ) ) )
return error;
buffer->allocated = 0;
buffer->in_string = NULL;
buffer->alt_string = NULL;
buffer->positions = NULL;
hb_buffer_clear( buffer );
*pbuffer = buffer;
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 17,822,031,719,098,954,000,000,000,000,000,000,000 | 19 | [HB] Remove all references to the old code! |
_pango_ot_info_position (const PangoOTInfo *info,
const PangoOTRuleset *ruleset,
PangoOTBuffer *buffer)
{
unsigned int i;
_hb_buffer_clear_positions (buffer->buffer);
hb_ot_layout_set_direction (info->layout,
buffer->rtl);
hb_ot_layout_set_hinting (info->layout,
buffer->font->is_hinted);
hb_ot_layout_set_scale (info->layout,
info->face->size->metrics.x_scale,
info->face->size->metrics.y_scale);
hb_ot_layout_set_ppem (info->layout,
info->face->size->metrics.x_ppem,
info->face->size->metrics.y_ppem);
for (i = 0; i < ruleset->rules->len; i++)
{
PangoOTRule *rule = &g_array_index (ruleset->rules, PangoOTRule, i);
hb_ot_layout_feature_mask_t mask;
unsigned int lookup_count, j;
if (rule->table_type != PANGO_OT_TABLE_GPOS)
continue;
mask = rule->property_bit;
lookup_count = hb_ot_layout_feature_get_lookup_count (info->layout,
HB_OT_LAYOUT_TABLE_TYPE_GPOS,
rule->feature_index);
for (j = 0; j < lookup_count; j++)
{
unsigned int lookup_index;
lookup_index = hb_ot_layout_feature_get_lookup_index (info->layout,
HB_OT_LAYOUT_TABLE_TYPE_GPOS,
rule->feature_index,
j);
hb_ot_layout_position_lookup (info->layout,
buffer->buffer,
lookup_index,
rule->property_bit);
}
}
{
HB_UInt i, j;
HB_Position positions = buffer->buffer->positions;
/* First handle all left-to-right connections */
for (j = 0; j < buffer->buffer->in_length; j++)
{
if (positions[j].cursive_chain > 0)
positions[j].y_pos += positions[j - positions[j].cursive_chain].y_pos;
}
/* Then handle all right-to-left connections */
for (i = buffer->buffer->in_length; i > 0; i--)
{
j = i - 1;
if (positions[j].cursive_chain < 0)
positions[j].y_pos += positions[j - positions[j].cursive_chain].y_pos;
}
}
buffer->applied_gpos = TRUE;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 310,930,926,522,775,530,000,000,000,000,000,000,000 | 72 | [HB] Remove all references to the old code! |
hb_buffer_add_glyph( HB_Buffer buffer,
HB_UInt glyph_index,
HB_UInt properties,
HB_UInt cluster )
{
HB_Error error;
HB_GlyphItem glyph;
error = hb_buffer_ensure( buffer, buffer->in_length + 1 );
if ( error )
return error;
glyph = &buffer->in_string[buffer->in_length];
glyph->gindex = glyph_index;
glyph->properties = properties;
glyph->cluster = cluster;
glyph->component = 0;
glyph->ligID = 0;
glyph->gproperty = HB_GLYPH_PROPERTY_UNKNOWN;
buffer->in_length++;
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 75,903,311,502,410,780,000,000,000,000,000,000,000 | 24 | [HB] Remove all references to the old code! |
_hb_buffer_replace_glyph( HB_Buffer buffer,
HB_UInt glyph_index )
{
if ( !buffer->separate_out )
{
buffer->out_string[buffer->out_pos].gindex = glyph_index;
buffer->in_pos++;
buffer->out_pos++;
buffer->out_length = buffer->out_pos;
}
else
{
return _hb_buffer_add_output_glyph( buffer, glyph_index, 0xFFFF, 0xFFFF );
}
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 162,395,777,182,231,330,000,000,000,000,000,000,000 | 18 | [HB] Remove all references to the old code! |
get_glyph_class (gunichar charcode,
HB_UShort *class)
{
/* For characters mapped into the Arabic Presentation forms, using properties
* derived as we apply GSUB substitutions will be more reliable
*/
if ((charcode >= 0xFB50 && charcode <= 0xFDFF) || /* Arabic Presentation Forms-A */
(charcode >= 0xFE70 && charcode <= 0XFEFF)) /* Arabic Presentation Forms-B */
return FALSE;
switch ((int) g_unichar_type (charcode))
{
case G_UNICODE_COMBINING_MARK:
case G_UNICODE_ENCLOSING_MARK:
case G_UNICODE_NON_SPACING_MARK:
*class = 3; /* Mark glyph (non-spacing combining glyph) */
return TRUE;
case G_UNICODE_UNASSIGNED:
case G_UNICODE_PRIVATE_USE:
return FALSE; /* Unknown, don't assign a class; classes get
* propagated during GSUB application */
default:
*class = 1; /* Base glyph (single character, spacing glyph) */
return TRUE;
}
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 95,198,079,724,196,330,000,000,000,000,000,000,000 | 26 | [HB] Remove all references to the old code! |
_hb_buffer_allocate_ligid( HB_Buffer buffer )
{
return ++buffer->max_ligID;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 326,918,235,780,216,000,000,000,000,000,000,000,000 | 4 | [HB] Remove all references to the old code! |
hb_buffer_duplicate_out_buffer( HB_Buffer buffer )
{
if ( !buffer->alt_string )
{
HB_Error error;
if ( ALLOC_ARRAY( buffer->alt_string, buffer->allocated, HB_GlyphItemRec ) )
return error;
}
buffer->out_string = buffer->alt_string;
memcpy( buffer->out_string, buffer->in_string, buffer->out_length * sizeof (buffer->out_string[0]) );
buffer->separate_out = TRUE;
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 80,948,619,183,144,920,000,000,000,000,000,000,000 | 16 | [HB] Remove all references to the old code! |
_hb_buffer_swap( HB_Buffer buffer )
{
HB_GlyphItem tmp_string;
int tmp_length;
int tmp_pos;
if ( buffer->separate_out )
{
tmp_string = buffer->in_string;
buffer->in_string = buffer->out_string;
buffer->out_string = tmp_string;
buffer->alt_string = buffer->out_string;
}
tmp_length = buffer->in_length;
buffer->in_length = buffer->out_length;
buffer->out_length = tmp_length;
tmp_pos = buffer->in_pos;
buffer->in_pos = buffer->out_pos;
buffer->out_pos = tmp_pos;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 311,922,746,781,343,040,000,000,000,000,000,000,000 | 22 | [HB] Remove all references to the old code! |
hb_buffer_free( HB_Buffer buffer )
{
FREE( buffer->in_string );
FREE( buffer->alt_string );
buffer->out_string = NULL;
FREE( buffer->positions );
FREE( buffer );
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 303,476,735,294,025,560,000,000,000,000,000,000,000 | 8 | [HB] Remove all references to the old code! |
_hb_buffer_clear_positions( HB_Buffer buffer )
{
_hb_buffer_clear_output (buffer);
if ( !buffer->positions )
{
HB_Error error;
if ( ALLOC_ARRAY( buffer->positions, buffer->allocated, HB_PositionRec ) )
return error;
}
memset (buffer->positions, 0, sizeof (buffer->positions[0]) * buffer->in_length);
return HB_Err_Ok;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 294,341,651,951,332,700,000,000,000,000,000,000,000 | 16 | [HB] Remove all references to the old code! |
hb_buffer_clear( HB_Buffer buffer )
{
buffer->in_length = 0;
buffer->out_length = 0;
buffer->in_pos = 0;
buffer->out_pos = 0;
buffer->out_string = buffer->in_string;
buffer->separate_out = FALSE;
buffer->max_ligID = 0;
} | 1 | [] | pango | 336bb3201096bdd0494d29926dd44e8cca8bed26 | 136,695,699,471,913,200,000,000,000,000,000,000,000 | 10 | [HB] Remove all references to the old code! |
static char *linetoken(FILE *stream)
{
int ch, idx;
while ((ch = fgetc(stream)) == ' ' || ch == '\t' );
idx = 0;
while (ch != EOF && ch != lineterm)
{
ident[idx++] = ch;
ch = fgetc(stream);
} /* while */
ungetc(ch, stream);
ident[idx] = 0;
return(ident); /* returns pointer to the token */
} /* linetoken */ | 1 | [] | evince | 439c5070022eab6cef7266aab47f978058012c72 | 33,353,991,592,269,273,000,000,000,000,000,000,000 | 19 | backends: Fix another security issue in the dvi-backend
This is similar to one of the fixes from d4139205.
https://bugzilla.gnome.org/show_bug.cgi?id=640923 |
static char *token(FILE *stream)
{
int ch, idx;
/* skip over white space */
while ((ch = fgetc(stream)) == ' ' || ch == lineterm ||
ch == ',' || ch == '\t' || ch == ';');
idx = 0;
while (ch != EOF && ch != ' ' && ch != lineterm
&& ch != '\t' && ch != ':' && ch != ';' && idx < MAX_NAME)
{
ident[idx++] = ch;
ch = fgetc(stream);
} /* while */
if (ch == EOF && idx < 1) return ((char *)NULL);
if (idx >= 1 && ch != ':' ) ungetc(ch, stream);
if (idx < 1 ) ident[idx++] = ch; /* single-character token */
ident[idx] = 0;
return(ident); /* returns pointer to the token */
} /* token */ | 1 | [] | evince | efadec4ffcdde3373f6f4ca0eaac98dc963c4fd5 | 123,780,927,118,347,540,000,000,000,000,000,000,000 | 24 | dvi: Another fix for buffer overwrite in dvi-backend
https://bugzilla.gnome.org/show_bug.cgi?id=643882 |
static char *linetoken(FILE *stream)
{
int ch, idx;
while ((ch = fgetc(stream)) == ' ' || ch == '\t' );
idx = 0;
while (ch != EOF && ch != lineterm && idx < MAX_NAME)
{
ident[idx++] = ch;
ch = fgetc(stream);
} /* while */
ungetc(ch, stream);
ident[idx] = 0;
return(ident); /* returns pointer to the token */
} /* linetoken */ | 1 | [] | evince | efadec4ffcdde3373f6f4ca0eaac98dc963c4fd5 | 127,373,773,716,473,140,000,000,000,000,000,000,000 | 19 | dvi: Another fix for buffer overwrite in dvi-backend
https://bugzilla.gnome.org/show_bug.cgi?id=643882 |
vba_read_project_strings(int fd, int big_endian)
{
unsigned char *buf = NULL;
uint16_t buflen = 0;
int ret = 0;
for(;;) {
off_t offset;
uint16_t length;
char *name;
if(!read_uint16(fd, &length, big_endian))
break;
if (length < 6) {
lseek(fd, -2, SEEK_CUR);
break;
}
if(length > buflen) {
unsigned char *newbuf = (unsigned char *)cli_realloc(buf, length);
if(newbuf == NULL) {
if(buf)
free(buf);
return 0;
}
buflen = length;
buf = newbuf;
}
offset = lseek(fd, 0, SEEK_CUR);
if(cli_readn(fd, buf, length) != (int)length) {
cli_dbgmsg("read name failed - rewinding\n");
lseek(fd, offset, SEEK_SET);
break;
}
name = get_unicode_name((const char *)buf, length, big_endian);
cli_dbgmsg("length: %d, name: %s\n", length, (name) ? name : "[null]");
if((name == NULL) || (memcmp("*\\", name, 2) != 0) ||
(strchr("ghcd", name[2]) == NULL)) {
/* Not a string */
lseek(fd, -(length+2), SEEK_CUR);
if(name)
free(name);
break;
}
free(name);
if(!read_uint16(fd, &length, big_endian)) {
if(buf)
free(buf);
break;
}
ret++;
if ((length != 0) && (length != 65535)) {
lseek(fd, -2, SEEK_CUR);
continue;
}
offset = lseek(fd, 10, SEEK_CUR);
cli_dbgmsg("offset: %lu\n", (unsigned long)offset);
vba56_test_middle(fd);
}
if(buf)
free(buf);
return ret;
} | 1 | [
"CWE-399"
] | clamav-devel | d21fb8d975f8c9688894a8cef4d50d977022e09f | 143,502,632,929,613,950,000,000,000,000,000,000,000 | 69 | libclamav/vba_extract.c: fix error path double free (bb#2486) |
static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg,
unsigned int off)
{
/* Do not send cong updates to loopback */
if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
}
BUG_ON(hdr_off || sg || off);
rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
/* For the embedded inc. Matching put is in loop_inc_free() */
rds_message_addref(rm);
rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
GFP_KERNEL, KM_USER0);
rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
NULL);
rds_inc_put(&rm->m_inc);
return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
} | 1 | [] | linux-2.6 | 6094628bfd94323fc1cea05ec2c6affd98c18f7f | 26,909,808,047,240,740,000,000,000,000,000,000,000 | 26 | rds: prevent BUG_ON triggering on congestion map updates
Recently had this bug halt reported to me:
kernel BUG at net/rds/send.c:329!
Oops: Exception in kernel mode, sig: 5 [#1]
SMP NR_CPUS=1024 NUMA pSeries
Modules linked in: rds sunrpc ipv6 dm_mirror dm_region_hash dm_log ibmveth sg
ext4 jbd2 mbcache sd_mod crc_t10dif ibmvscsic scsi_transport_srp scsi_tgt
dm_mod [last unloaded: scsi_wait_scan]
NIP: d000000003ca68f4 LR: d000000003ca67fc CTR: d000000003ca8770
REGS: c000000175cab980 TRAP: 0700 Not tainted (2.6.32-118.el6.ppc64)
MSR: 8000000000029032 <EE,ME,CE,IR,DR> CR: 44000022 XER: 00000000
TASK = c00000017586ec90[1896] 'krdsd' THREAD: c000000175ca8000 CPU: 0
GPR00: 0000000000000150 c000000175cabc00 d000000003cb7340 0000000000002030
GPR04: ffffffffffffffff 0000000000000030 0000000000000000 0000000000000030
GPR08: 0000000000000001 0000000000000001 c0000001756b1e30 0000000000010000
GPR12: d000000003caac90 c000000000fa2500 c0000001742b2858 c0000001742b2a00
GPR16: c0000001742b2a08 c0000001742b2820 0000000000000001 0000000000000001
GPR20: 0000000000000040 c0000001742b2814 c000000175cabc70 0800000000000000
GPR24: 0000000000000004 0200000000000000 0000000000000000 c0000001742b2860
GPR28: 0000000000000000 c0000001756b1c80 d000000003cb68e8 c0000001742b27b8
NIP [d000000003ca68f4] .rds_send_xmit+0x4c4/0x8a0 [rds]
LR [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
Call Trace:
[c000000175cabc00] [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
(unreliable)
[c000000175cabd30] [d000000003ca7e64] .rds_send_worker+0x54/0x100 [rds]
[c000000175cabdb0] [c0000000000b475c] .worker_thread+0x1dc/0x3c0
[c000000175cabed0] [c0000000000baa9c] .kthread+0xbc/0xd0
[c000000175cabf90] [c000000000032114] .kernel_thread+0x54/0x70
Instruction dump:
4bfffd50 60000000 60000000 39080001 935f004c f91f0040 41820024 813d017c
7d094a78 7d290074 7929d182 394a0020 <0b090000> 40e2ff68 4bffffa4 39200000
Kernel panic - not syncing: Fatal exception
Call Trace:
[c000000175cab560] [c000000000012e04] .show_stack+0x74/0x1c0 (unreliable)
[c000000175cab610] [c0000000005a365c] .panic+0x80/0x1b4
[c000000175cab6a0] [c00000000002fbcc] .die+0x21c/0x2a0
[c000000175cab750] [c000000000030000] ._exception+0x110/0x220
[c000000175cab910] [c000000000004b9c] program_check_common+0x11c/0x180
Signed-off-by: David S. Miller <davem@davemloft.net> |
int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_device *dev = ic->i_cm_id->device;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
struct ib_send_wr *failed_wr;
struct scatterlist *scat;
u32 pos;
u32 i;
u32 work_alloc;
u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
int bytes_sent = 0;
int ret;
int flow_controlled = 0;
int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
/* Do not send cong updates to IB loopback */
if (conn->c_loopback
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
}
/* FIXME we may overallocate here */
if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
i = 1;
else
i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_ring_full);
ret = -ENOMEM;
goto out;
}
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
rds_ib_stats_inc(s_ib_tx_throttle);
ret = -ENOMEM;
goto out;
}
}
/* map the message the first time we see it */
if (!ic->i_data_op) {
if (rm->data.op_nents) {
rm->data.op_count = ib_dma_map_sg(dev,
rm->data.op_sg,
rm->data.op_nents,
DMA_TO_DEVICE);
rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
rm->data.op_count = 0;
}
rds_message_addref(rm);
ic->i_data_op = &rm->data;
/* Finalize the header */
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
if (rm->m_rdma_cookie) {
rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
rds_rdma_cookie_key(rm->m_rdma_cookie),
rds_rdma_cookie_offset(rm->m_rdma_cookie));
}
/* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
* we should not do this unless we have a chance of at least
* sticking the header into the send ring. Which is why we
* should call rds_ib_ring_alloc first. */
rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
rds_message_make_checksum(&rm->m_inc.i_hdr);
/*
* Update adv_credits since we reset the ACK_REQUIRED bit.
*/
if (ic->i_flowctl) {
rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
adv_credits += posted;
BUG_ON(adv_credits > 255);
}
}
/* Sometimes you want to put a fence between an RDMA
* READ and the following SEND.
* We could either do this all the time
* or when requested by the user. Right now, we let
* the application choose.
*/
if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
/* Each frag gets a header. Msgs may be 0 bytes */
send = &ic->i_sends[pos];
first = send;
prev = NULL;
scat = &ic->i_data_op->op_sg[sg];
i = 0;
do {
unsigned int len = 0;
/* Set up the header */
send->s_wr.send_flags = send_flags;
send->s_wr.opcode = IB_WR_SEND;
send->s_wr.num_sge = 1;
send->s_wr.next = NULL;
send->s_queued = jiffies;
send->s_op = NULL;
send->s_sge[0].addr = ic->i_send_hdrs_dma
+ (pos * sizeof(struct rds_header));
send->s_sge[0].length = sizeof(struct rds_header);
memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
/* Set up the data, if present */
if (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]) {
len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
send->s_wr.num_sge = 2;
send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
send->s_sge[1].length = len;
bytes_sent += len;
off += len;
if (off == ib_sg_dma_len(dev, scat)) {
scat++;
off = 0;
}
}
rds_ib_set_wr_signal_state(ic, send, 0);
/*
* Always signal the last one if we're stopping due to flow control.
*/
if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = &ic->i_send_hdrs[pos];
/* add credit and redo the header checksum */
hdr->h_credit = adv_credits;
rds_message_make_checksum(hdr);
adv_credits = 0;
rds_ib_stats_inc(s_ib_tx_credit_updates);
}
if (prev)
prev->s_wr.next = &send->s_wr;
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
send = &ic->i_sends[pos];
i++;
} while (i < work_alloc
&& scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */
if (hdr_off == 0)
bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
ic->i_data_op = NULL;
}
/* Put back wrs & credits we didn't use */
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
if (nr_sig)
atomic_add(nr_sig, &ic->i_signaled_sends);
/* XXX need to worry about failed_wr and partial sends. */
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
first, &first->s_wr, ret, failed_wr);
BUG_ON(failed_wr != &first->s_wr);
if (ret) {
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
rds_ib_sub_signaled(ic, nr_sig);
if (prev->s_op) {
ic->i_data_op = prev->s_op;
prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
} | 1 | [] | linux-2.6 | 6094628bfd94323fc1cea05ec2c6affd98c18f7f | 160,272,378,570,640,800,000,000,000,000,000,000,000 | 253 | rds: prevent BUG_ON triggering on congestion map updates
Recently had this bug halt reported to me:
kernel BUG at net/rds/send.c:329!
Oops: Exception in kernel mode, sig: 5 [#1]
SMP NR_CPUS=1024 NUMA pSeries
Modules linked in: rds sunrpc ipv6 dm_mirror dm_region_hash dm_log ibmveth sg
ext4 jbd2 mbcache sd_mod crc_t10dif ibmvscsic scsi_transport_srp scsi_tgt
dm_mod [last unloaded: scsi_wait_scan]
NIP: d000000003ca68f4 LR: d000000003ca67fc CTR: d000000003ca8770
REGS: c000000175cab980 TRAP: 0700 Not tainted (2.6.32-118.el6.ppc64)
MSR: 8000000000029032 <EE,ME,CE,IR,DR> CR: 44000022 XER: 00000000
TASK = c00000017586ec90[1896] 'krdsd' THREAD: c000000175ca8000 CPU: 0
GPR00: 0000000000000150 c000000175cabc00 d000000003cb7340 0000000000002030
GPR04: ffffffffffffffff 0000000000000030 0000000000000000 0000000000000030
GPR08: 0000000000000001 0000000000000001 c0000001756b1e30 0000000000010000
GPR12: d000000003caac90 c000000000fa2500 c0000001742b2858 c0000001742b2a00
GPR16: c0000001742b2a08 c0000001742b2820 0000000000000001 0000000000000001
GPR20: 0000000000000040 c0000001742b2814 c000000175cabc70 0800000000000000
GPR24: 0000000000000004 0200000000000000 0000000000000000 c0000001742b2860
GPR28: 0000000000000000 c0000001756b1c80 d000000003cb68e8 c0000001742b27b8
NIP [d000000003ca68f4] .rds_send_xmit+0x4c4/0x8a0 [rds]
LR [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
Call Trace:
[c000000175cabc00] [d000000003ca67fc] .rds_send_xmit+0x3cc/0x8a0 [rds]
(unreliable)
[c000000175cabd30] [d000000003ca7e64] .rds_send_worker+0x54/0x100 [rds]
[c000000175cabdb0] [c0000000000b475c] .worker_thread+0x1dc/0x3c0
[c000000175cabed0] [c0000000000baa9c] .kthread+0xbc/0xd0
[c000000175cabf90] [c000000000032114] .kernel_thread+0x54/0x70
Instruction dump:
4bfffd50 60000000 60000000 39080001 935f004c f91f0040 41820024 813d017c
7d094a78 7d290074 7929d182 394a0020 <0b090000> 40e2ff68 4bffffa4 39200000
Kernel panic - not syncing: Fatal exception
Call Trace:
[c000000175cab560] [c000000000012e04] .show_stack+0x74/0x1c0 (unreliable)
[c000000175cab610] [c0000000005a365c] .panic+0x80/0x1b4
[c000000175cab6a0] [c00000000002fbcc] .die+0x21c/0x2a0
[c000000175cab750] [c000000000030000] ._exception+0x110/0x220
[c000000175cab910] [c000000000004b9c] program_check_common+0x11c/0x180
Signed-off-by: David S. Miller <davem@davemloft.net> |
static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
{
struct kmem_cache *slab;
va_list args;
va_start(args, fmt);
vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
va_end(args);
slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
return slab;
} | 1 | [
"CWE-476"
] | linux-2.6 | 8ed030dd0aa400d18c63861c2c6deb7c38f4edde | 320,108,053,811,713,200,000,000,000,000,000,000,000 | 13 | dccp: fix bug in cache allocation
This fixes a bug introduced in commit de4ef86cfce60d2250111f34f8a084e769f23b16
("dccp: fix dccp rmmod when kernel configured to use slub", 17 Jan): the
vsnprintf used sizeof(slab_name_fmt), which became truncated to 4 bytes, since
slab_name_fmt is now a 4-byte pointer and no longer a 32-character array.
This lead to error messages such as
FATAL: Error inserting dccp: No buffer space available
>> kernel: [ 1456.341501] kmem_cache_create: duplicate cache cci
generated due to the truncation after the 3rd character.
Fixed for the moment by introducing a symbolic constant. Tested to fix the bug.
Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net> |
xsltGenerateIdFunction(xmlXPathParserContextPtr ctxt, int nargs){
xmlNodePtr cur = NULL;
unsigned long val;
xmlChar str[20];
if (nargs == 0) {
cur = ctxt->context->node;
} else if (nargs == 1) {
xmlXPathObjectPtr obj;
xmlNodeSetPtr nodelist;
int i, ret;
if ((ctxt->value == NULL) || (ctxt->value->type != XPATH_NODESET)) {
ctxt->error = XPATH_INVALID_TYPE;
xsltTransformError(xsltXPathGetTransformContext(ctxt), NULL, NULL,
"generate-id() : invalid arg expecting a node-set\n");
return;
}
obj = valuePop(ctxt);
nodelist = obj->nodesetval;
if ((nodelist == NULL) || (nodelist->nodeNr <= 0)) {
xmlXPathFreeObject(obj);
valuePush(ctxt, xmlXPathNewCString(""));
return;
}
cur = nodelist->nodeTab[0];
for (i = 1;i < nodelist->nodeNr;i++) {
ret = xmlXPathCmpNodes(cur, nodelist->nodeTab[i]);
if (ret == -1)
cur = nodelist->nodeTab[i];
}
xmlXPathFreeObject(obj);
} else {
xsltTransformError(xsltXPathGetTransformContext(ctxt), NULL, NULL,
"generate-id() : invalid number of args %d\n", nargs);
ctxt->error = XPATH_INVALID_ARITY;
return;
}
/*
* Okay this is ugly but should work, use the NodePtr address
* to forge the ID
*/
val = (unsigned long)((char *)cur - (char *)0);
val /= sizeof(xmlNode);
sprintf((char *)str, "id%ld", val);
valuePush(ctxt, xmlXPathNewString(str));
} | 1 | [
"CWE-200"
] | libxslt | ecb6bcb8d1b7e44842edde3929f412d46b40c89f | 218,814,172,311,437,360,000,000,000,000,000,000,000 | 47 | Fix generate-id() to not expose object addresses
As pointed out by Chris Evans <scarybeasts@gmail.com> it's better
security wise to not expose object addresses directly, use a diff
w.r.t. the document root own address to avoid this
* libxslt/functions.c: fix IDs generation code |
int tpm_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct tpm_chip *chip = NULL, *pos;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (pos->vendor.miscdev.minor == minor) {
chip = pos;
get_device(chip->dev);
break;
}
}
rcu_read_unlock();
if (!chip)
return -ENODEV;
if (test_and_set_bit(0, &chip->is_open)) {
dev_dbg(chip->dev, "Another process owns this TPM\n");
put_device(chip->dev);
return -EBUSY;
}
chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL);
if (chip->data_buffer == NULL) {
clear_bit(0, &chip->is_open);
put_device(chip->dev);
return -ENOMEM;
}
atomic_set(&chip->data_pending, 0);
file->private_data = chip;
return 0;
} | 1 | [
"CWE-200"
] | linux | 1309d7afbed112f0e8e90be9af975550caa0076b | 298,741,035,352,776,100,000,000,000,000,000,000,000 | 36 | char/tpm: Fix unitialized usage of data buffer
This patch fixes information leakage to the userspace by initializing
the data buffer to zero.
Reported-by: Peter Huewe <huewe.external@infineon.com>
Signed-off-by: Peter Huewe <huewe.external@infineon.com>
Signed-off-by: Marcel Selhorst <m.selhorst@sirrix.com>
[ Also removed the silly "* sizeof(u8)". If that isn't 1, we have way
deeper problems than a simple multiplication can fix. - Linus ]
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> |
_save_user_settings (GdmSessionWorker *worker,
const char *home_dir)
{
GError *error;
if (!gdm_session_settings_is_loaded (worker->priv->user_settings)) {
return;
}
error = NULL;
if (!gdm_session_settings_save (worker->priv->user_settings,
home_dir, &error)) {
g_warning ("could not save session and language settings: %s",
error->message);
g_error_free (error);
}
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 306,929,041,855,611,160,000,000,000,000,000,000,000 | 17 | Store the face and dmrc files in a cache. Refer to bug #565151. |
gdm_session_settings_load (GdmSessionSettings *settings,
const char *home_directory,
GError **error)
{
GKeyFile *key_file;
GError *load_error;
gboolean is_loaded;
char *session_name;
char *language_name;
char *layout_name;
char *filename;
g_return_val_if_fail (settings != NULL, FALSE);
g_return_val_if_fail (home_directory != NULL, FALSE);
g_return_val_if_fail (!gdm_session_settings_is_loaded (settings), FALSE);
filename = g_build_filename (home_directory, ".dmrc", NULL);
is_loaded = FALSE;
key_file = g_key_file_new ();
load_error = NULL;
if (!g_key_file_load_from_file (key_file, filename,
G_KEY_FILE_NONE, &load_error)) {
g_propagate_error (error, load_error);
goto out;
}
session_name = g_key_file_get_string (key_file, "Desktop", "Session",
&load_error);
if (session_name != NULL) {
gdm_session_settings_set_session_name (settings, session_name);
g_free (session_name);
} else if (g_error_matches (load_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND)) {
g_error_free (load_error);
load_error = NULL;
} else {
g_propagate_error (error, load_error);
goto out;
}
language_name = g_key_file_get_string (key_file, "Desktop", "Language",
&load_error);
if (language_name != NULL) {
gdm_session_settings_set_language_name (settings, language_name);
g_free (language_name);
} else if (g_error_matches (load_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND)) {
g_error_free (load_error);
load_error = NULL;
} else {
g_propagate_error (error, load_error);
goto out;
}
layout_name = g_key_file_get_string (key_file, "Desktop", "Layout",
&load_error);
if (layout_name != NULL) {
gdm_session_settings_set_layout_name (settings, layout_name);
g_free (layout_name);
} else if (g_error_matches (load_error, G_KEY_FILE_ERROR, G_KEY_FILE_ERROR_KEY_NOT_FOUND)) {
g_error_free (load_error);
load_error = NULL;
} else {
g_propagate_error (error, load_error);
goto out;
}
is_loaded = TRUE;
out:
g_key_file_free (key_file);
g_free (filename);
return is_loaded;
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 244,182,928,452,744,940,000,000,000,000,000,000,000 | 76 | Store the face and dmrc files in a cache. Refer to bug #565151. |
gdm_user_render_icon (GdmUser *user,
gint icon_size)
{
GdkPixbuf *pixbuf;
GdkPixbuf *framed;
char *path;
char *tmp;
gboolean res;
g_return_val_if_fail (GDM_IS_USER (user), NULL);
g_return_val_if_fail (icon_size > 12, NULL);
path = NULL;
pixbuf = render_icon_from_home (user, icon_size);
if (pixbuf != NULL) {
goto out;
}
/* Try ${GlobalFaceDir}/${username} */
path = g_build_filename (GLOBAL_FACEDIR, user->user_name, NULL);
res = check_user_file (path,
user->uid,
MAX_FILE_SIZE,
RELAX_GROUP,
RELAX_OTHER);
if (res) {
pixbuf = gdk_pixbuf_new_from_file_at_size (path,
icon_size,
icon_size,
NULL);
} else {
pixbuf = NULL;
}
g_free (path);
if (pixbuf != NULL) {
goto out;
}
/* Finally, ${GlobalFaceDir}/${username}.png */
tmp = g_strconcat (user->user_name, ".png", NULL);
path = g_build_filename (GLOBAL_FACEDIR, tmp, NULL);
g_free (tmp);
res = check_user_file (path,
user->uid,
MAX_FILE_SIZE,
RELAX_GROUP,
RELAX_OTHER);
if (res) {
pixbuf = gdk_pixbuf_new_from_file_at_size (path,
icon_size,
icon_size,
NULL);
} else {
pixbuf = NULL;
}
g_free (path);
out:
if (pixbuf != NULL) {
framed = frame_pixbuf (pixbuf);
if (framed != NULL) {
g_object_unref (pixbuf);
pixbuf = framed;
}
}
return pixbuf;
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 220,037,533,270,129,500,000,000,000,000,000,000,000 | 70 | Store the face and dmrc files in a cache. Refer to bug #565151. |
gdm_session_worker_start_user_session (GdmSessionWorker *worker,
GError **error)
{
struct passwd *passwd_entry;
pid_t session_pid;
int error_code;
g_debug ("GdmSessionWorker: querying pam for user environment");
gdm_session_worker_update_environment_from_pam (worker);
register_ck_session (worker);
passwd_entry = getpwnam (worker->priv->username);
#ifdef HAVE_LOGINDEVPERM
/*
* Only do logindevperm processing if /dev/console or
* a device associated with a VT
*/
if (worker->priv->display_device != NULL &&
(strncmp (worker->priv->display_device, "/dev/vt/", strlen ("/dev/vt/")) == 0 ||
strcmp (worker->priv->display_device, "/dev/console") == 0)) {
g_debug ("Logindevperm login for user %s, device %s",
worker->priv->username,
worker->priv->display_device);
(void) di_devperm_login (worker->priv->display_device,
passwd_entry->pw_uid,
passwd_entry->pw_gid,
NULL);
}
#endif /* HAVE_LOGINDEVPERM */
g_debug ("GdmSessionWorker: opening user session with program '%s'",
worker->priv->arguments[0]);
error_code = PAM_SUCCESS;
session_pid = fork ();
if (session_pid < 0) {
g_set_error (error,
GDM_SESSION_WORKER_ERROR,
GDM_SESSION_WORKER_ERROR_OPENING_SESSION,
"%s", g_strerror (errno));
error_code = PAM_ABORT;
goto out;
}
if (session_pid == 0) {
char **environment;
char *home_dir;
int fd;
if (setuid (worker->priv->uid) < 0) {
g_debug ("GdmSessionWorker: could not reset uid - %s", g_strerror (errno));
_exit (1);
}
if (setsid () < 0) {
g_debug ("GdmSessionWorker: could not set pid '%u' as leader of new session and process group - %s",
(guint) getpid (), g_strerror (errno));
_exit (2);
}
environment = gdm_session_worker_get_environment (worker);
g_assert (geteuid () == getuid ());
home_dir = g_hash_table_lookup (worker->priv->environment,
"HOME");
if ((home_dir == NULL) || g_chdir (home_dir) < 0) {
g_chdir ("/");
}
fd = open ("/dev/null", O_RDWR);
dup2 (fd, STDIN_FILENO);
close (fd);
fd = _open_session_log (home_dir);
dup2 (fd, STDOUT_FILENO);
dup2 (fd, STDERR_FILENO);
close (fd);
_save_user_settings (worker, home_dir);
gdm_session_execute (worker->priv->arguments[0],
worker->priv->arguments,
environment,
TRUE);
g_debug ("GdmSessionWorker: child '%s' could not be started - %s",
worker->priv->arguments[0],
g_strerror (errno));
g_strfreev (environment);
_exit (127);
}
worker->priv->child_pid = session_pid;
g_debug ("GdmSessionWorker: session opened creating reply...");
g_assert (sizeof (GPid) <= sizeof (int));
g_debug ("GdmSessionWorker: state SESSION_STARTED");
worker->priv->state = GDM_SESSION_WORKER_STATE_SESSION_STARTED;
gdm_session_worker_watch_child (worker);
out:
if (error_code != PAM_SUCCESS) {
gdm_session_worker_uninitialize_pam (worker, error_code);
return FALSE;
}
return TRUE;
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 223,008,347,016,847,300,000,000,000,000,000,000,000 | 117 | Store the face and dmrc files in a cache. Refer to bug #565151. |
_change_user (GdmSessionWorker *worker,
uid_t uid,
gid_t gid)
{
gboolean ret;
ret = FALSE;
#ifdef THE_MAN_PAGE_ISNT_LYING
/* pam_setcred wants to be called as the authenticated user
* but pam_open_session needs to be called as super-user.
*
* Set the real uid and gid to the user and give the user a
* temporary super-user effective id.
*/
if (setreuid (uid, GDM_SESSION_ROOT_UID) < 0) {
return FALSE;
}
#endif
worker->priv->uid = uid;
if (setgid (gid) < 0) {
return FALSE;
}
if (initgroups (worker->priv->username, gid) < 0) {
return FALSE;
}
return TRUE;
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 55,887,595,463,552,430,000,000,000,000,000,000,000 | 31 | Store the face and dmrc files in a cache. Refer to bug #565151. |
attempt_to_load_user_settings (GdmSessionWorker *worker,
const char *username)
{
struct passwd *passwd_entry;
uid_t old_uid;
gid_t old_gid;
old_uid = geteuid ();
old_gid = getegid ();
passwd_entry = getpwnam (username);
/* User input isn't a valid username
*/
if (passwd_entry == NULL) {
return;
}
/* We may get called late in the pam conversation after
* the user has already been authenticated. This could
* happen if for instance, the user's home directory isn't
* available until late in the pam conversation so user
* settings couldn't get loaded until late in the conversation.
* If we get called late the seteuid/setgid calls here will fail,
* but that's okay, because we'll already be the uid/gid we want
* to be.
*/
setegid (passwd_entry->pw_gid);
seteuid (passwd_entry->pw_uid);
gdm_session_settings_load (worker->priv->user_settings,
passwd_entry->pw_dir,
NULL);
seteuid (old_uid);
setegid (old_gid);
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 170,513,602,404,851,200,000,000,000,000,000,000,000 | 37 | Store the face and dmrc files in a cache. Refer to bug #565151. |
render_icon_from_home (GdmUser *user,
int icon_size)
{
GdkPixbuf *retval;
char *path;
gboolean is_local;
gboolean is_autofs;
gboolean res;
char *filesystem_type;
is_local = FALSE;
/* special case: look at parent of home to detect autofs
this is so we don't try to trigger an automount */
path = g_path_get_dirname (user->home_dir);
filesystem_type = get_filesystem_type (path);
is_autofs = (filesystem_type != NULL && strcmp (filesystem_type, "autofs") == 0);
g_free (filesystem_type);
g_free (path);
if (is_autofs) {
return NULL;
}
/* now check that home dir itself is local */
filesystem_type = get_filesystem_type (user->home_dir);
is_local = ((filesystem_type != NULL) &&
(strcmp (filesystem_type, "nfs") != 0) &&
(strcmp (filesystem_type, "afs") != 0) &&
(strcmp (filesystem_type, "autofs") != 0) &&
(strcmp (filesystem_type, "unknown") != 0) &&
(strcmp (filesystem_type, "ncpfs") != 0));
g_free (filesystem_type);
/* only look at local home directories so we don't try to
read from remote (e.g. NFS) volumes */
if (! is_local) {
return NULL;
}
/* First, try "~/.face" */
path = g_build_filename (user->home_dir, ".face", NULL);
res = check_user_file (path,
user->uid,
MAX_FILE_SIZE,
RELAX_GROUP,
RELAX_OTHER);
if (res) {
retval = gdk_pixbuf_new_from_file_at_size (path,
icon_size,
icon_size,
NULL);
} else {
retval = NULL;
}
g_free (path);
/* Next, try "~/.face.icon" */
if (retval == NULL) {
path = g_build_filename (user->home_dir,
".face.icon",
NULL);
res = check_user_file (path,
user->uid,
MAX_FILE_SIZE,
RELAX_GROUP,
RELAX_OTHER);
if (res) {
retval = gdk_pixbuf_new_from_file_at_size (path,
icon_size,
icon_size,
NULL);
} else {
retval = NULL;
}
g_free (path);
}
/* Still nothing, try the user's personal GDM config */
if (retval == NULL) {
path = g_build_filename (user->home_dir,
".gnome",
"gdm",
NULL);
res = check_user_file (path,
user->uid,
MAX_FILE_SIZE,
RELAX_GROUP,
RELAX_OTHER);
if (res) {
GKeyFile *keyfile;
char *icon_path;
keyfile = g_key_file_new ();
g_key_file_load_from_file (keyfile,
path,
G_KEY_FILE_NONE,
NULL);
icon_path = g_key_file_get_string (keyfile,
"face",
"picture",
NULL);
res = check_user_file (icon_path,
user->uid,
MAX_FILE_SIZE,
RELAX_GROUP,
RELAX_OTHER);
if (icon_path && res) {
retval = gdk_pixbuf_new_from_file_at_size (path,
icon_size,
icon_size,
NULL);
} else {
retval = NULL;
}
g_free (icon_path);
g_key_file_free (keyfile);
} else {
retval = NULL;
}
g_free (path);
}
return retval;
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 99,492,356,848,916,130,000,000,000,000,000,000,000 | 129 | Store the face and dmrc files in a cache. Refer to bug #565151. |
gdm_session_worker_uninitialize_pam (GdmSessionWorker *worker,
int status)
{
g_debug ("GdmSessionWorker: uninitializing PAM");
if (worker->priv->pam_handle == NULL)
return;
if (worker->priv->state >= GDM_SESSION_WORKER_STATE_SESSION_OPENED) {
pam_close_session (worker->priv->pam_handle, 0);
gdm_session_auditor_report_logout (worker->priv->auditor);
#ifdef HAVE_LOGINDEVPERM
/*
* Only do logindevperm processing if /dev/console or
* a device associated with a VT
*/
if (worker->priv->display_device != NULL &&
(strncmp (worker->priv->display_device, "/dev/vt/", strlen ("/dev/vt/")) == 0 ||
strcmp (worker->priv->display_device, "/dev/console") == 0)) {
g_debug ("Logindevperm logout for user %s, device %s",
worker->priv->username,
worker->priv->display_device);
(void) di_devperm_logout (worker->priv->display_device);
}
#endif /* HAVE_LOGINDEVPERM */
} else {
void *p;
if ((pam_get_item (worker->priv->pam_handle, PAM_USER, &p)) == PAM_SUCCESS) {
gdm_session_auditor_set_username (worker->priv->auditor, (const char *)p);
}
gdm_session_auditor_report_login_failure (worker->priv->auditor,
status,
pam_strerror (worker->priv->pam_handle, status));
}
if (worker->priv->state >= GDM_SESSION_WORKER_STATE_ACCREDITED) {
pam_setcred (worker->priv->pam_handle, PAM_DELETE_CRED);
}
pam_end (worker->priv->pam_handle, status);
worker->priv->pam_handle = NULL;
gdm_session_worker_stop_auditor (worker);
g_debug ("GdmSessionWorker: state NONE");
worker->priv->state = GDM_SESSION_WORKER_STATE_NONE;
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 209,889,407,189,912,480,000,000,000,000,000,000,000 | 51 | Store the face and dmrc files in a cache. Refer to bug #565151. |
check_user_file (const char *filename,
uid_t user,
gssize max_file_size,
gboolean relax_group,
gboolean relax_other)
{
struct stat fileinfo;
if (max_file_size < 0) {
max_file_size = G_MAXSIZE;
}
/* Exists/Readable? */
if (stat (filename, &fileinfo) < 0) {
return FALSE;
}
/* Is a regular file */
if (G_UNLIKELY (!S_ISREG (fileinfo.st_mode))) {
return FALSE;
}
/* Owned by user? */
if (G_UNLIKELY (fileinfo.st_uid != user)) {
return FALSE;
}
/* Group not writable or relax_group? */
if (G_UNLIKELY ((fileinfo.st_mode & S_IWGRP) == S_IWGRP && !relax_group)) {
return FALSE;
}
/* Other not writable or relax_other? */
if (G_UNLIKELY ((fileinfo.st_mode & S_IWOTH) == S_IWOTH && !relax_other)) {
return FALSE;
}
/* Size is kosher? */
if (G_UNLIKELY (fileinfo.st_size > max_file_size)) {
return FALSE;
}
return TRUE;
} | 1 | [] | gdm | c25ef9245be4e0be2126ef3d075df4401949b570 | 110,109,748,805,067,890,000,000,000,000,000,000,000 | 44 | Store the face and dmrc files in a cache. Refer to bug #565151. |
network_init ()
{
#ifdef HAVE_GNUTLS
char *ca_path, *ca_path2;
gnutls_global_init ();
gnutls_certificate_allocate_credentials (&gnutls_xcred);
ca_path = string_expand_home (CONFIG_STRING(config_network_gnutls_ca_file));
if (ca_path)
{
ca_path2 = string_replace (ca_path, "%h", weechat_home);
if (ca_path2)
{
gnutls_certificate_set_x509_trust_file (gnutls_xcred, ca_path2,
GNUTLS_X509_FMT_PEM);
free (ca_path2);
}
free (ca_path);
}
gnutls_certificate_client_set_retrieve_function (gnutls_xcred,
&hook_connect_gnutls_set_certificates);
network_init_ok = 1;
#endif
#ifdef HAVE_GCRYPT
gcry_check_version (GCRYPT_VERSION);
gcry_control (GCRYCTL_DISABLE_SECMEM, 0);
gcry_control (GCRYCTL_INITIALIZATION_FINISHED, 0);
#endif
} | 1 | [
"CWE-20"
] | weechat | c265cad1c95b84abfd4e8d861f25926ef13b5d91 | 85,114,822,597,843,440,000,000,000,000,000,000,000 | 30 | Fix verification of SSL certificates by calling gnutls verify callback (patch #7459) |
irc_server_gnutls_callback (void *data, gnutls_session_t tls_session,
const gnutls_datum_t *req_ca, int nreq,
const gnutls_pk_algorithm_t *pk_algos,
int pk_algos_len, gnutls_retr_st *answer)
{
struct t_irc_server *server;
gnutls_retr_st tls_struct;
gnutls_x509_crt_t cert_temp;
const gnutls_datum_t *cert_list;
gnutls_datum_t filedatum;
unsigned int cert_list_len, status;
time_t cert_time;
char *cert_path0, *cert_path1, *cert_path2, *cert_str, *hostname;
const char *weechat_dir;
int rc, ret, i, j, hostname_match;
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706
gnutls_datum_t cinfo;
int rinfo;
#endif
/* make C compiler happy */
(void) req_ca;
(void) nreq;
(void) pk_algos;
(void) pk_algos_len;
rc = 0;
if (!data)
return -1;
server = (struct t_irc_server *) data;
hostname = server->current_address;
hostname_match = 0;
weechat_printf (server->buffer,
_("gnutls: connected using %d-bit Diffie-Hellman shared "
"secret exchange"),
IRC_SERVER_OPTION_INTEGER (server,
IRC_SERVER_OPTION_SSL_DHKEY_SIZE));
if (gnutls_certificate_verify_peers2 (tls_session, &status) < 0)
{
weechat_printf (server->buffer,
_("%sgnutls: error while checking peer's certificate"),
weechat_prefix ("error"));
rc = -1;
}
else
{
/* some checks */
if (status & GNUTLS_CERT_INVALID)
{
weechat_printf (server->buffer,
_("%sgnutls: peer's certificate is NOT trusted"),
weechat_prefix ("error"));
rc = -1;
}
else
{
weechat_printf (server->buffer,
_("gnutls: peer's certificate is trusted"));
}
if (status & GNUTLS_CERT_SIGNER_NOT_FOUND)
{
weechat_printf (server->buffer,
_("%sgnutls: peer's certificate issuer is unknown"),
weechat_prefix ("error"));
rc = -1;
}
if (status & GNUTLS_CERT_REVOKED)
{
weechat_printf (server->buffer,
_("%sgnutls: the certificate has been revoked"),
weechat_prefix ("error"));
rc = -1;
}
/* check certificates */
if (gnutls_x509_crt_init (&cert_temp) >= 0)
{
cert_list = gnutls_certificate_get_peers (tls_session, &cert_list_len);
if (cert_list)
{
weechat_printf (server->buffer,
NG_("gnutls: receiving %d certificate",
"gnutls: receiving %d certificates",
cert_list_len),
cert_list_len);
for (i = 0, j = (int) cert_list_len; i < j; i++)
{
if (gnutls_x509_crt_import (cert_temp, &cert_list[i], GNUTLS_X509_FMT_DER) >= 0)
{
/* checking if hostname matches in the first certificate */
if (i == 0 && gnutls_x509_crt_check_hostname (cert_temp, hostname) != 0)
{
hostname_match = 1;
}
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706
/* displaying infos about certificate */
#if LIBGNUTLS_VERSION_NUMBER < 0x020400
rinfo = gnutls_x509_crt_print (cert_temp, GNUTLS_X509_CRT_ONELINE, &cinfo);
#else
rinfo = gnutls_x509_crt_print (cert_temp, GNUTLS_CRT_PRINT_ONELINE, &cinfo);
#endif
if (rinfo == 0)
{
weechat_printf (server->buffer,
_(" - certificate[%d] info:"), i + 1);
weechat_printf (server->buffer,
" - %s", cinfo.data);
gnutls_free (cinfo.data);
}
#endif
/* check expiration date */
cert_time = gnutls_x509_crt_get_expiration_time (cert_temp);
if (cert_time < time(NULL))
{
weechat_printf (server->buffer,
_("%sgnutls: certificate has expired"),
weechat_prefix ("error"));
rc = -1;
}
/* check expiration date */
cert_time = gnutls_x509_crt_get_activation_time (cert_temp);
if (cert_time > time(NULL))
{
weechat_printf (server->buffer,
_("%sgnutls: certificate is not yet activated"),
weechat_prefix ("error"));
rc = -1;
}
}
}
if (hostname_match == 0)
{
weechat_printf (server->buffer,
_("%sgnutls: the hostname in the "
"certificate does NOT match \"%s\""),
weechat_prefix ("error"), hostname);
rc = -1;
}
}
}
}
/* using client certificate if it exists */
cert_path0 = (char *) IRC_SERVER_OPTION_STRING(server,
IRC_SERVER_OPTION_SSL_CERT);
if (cert_path0 && cert_path0[0])
{
weechat_dir = weechat_info_get ("weechat_dir", "");
cert_path1 = weechat_string_replace (cert_path0, "%h", weechat_dir);
cert_path2 = (cert_path1) ?
weechat_string_expand_home (cert_path1) : NULL;
if (cert_path2)
{
cert_str = weechat_file_get_content (cert_path2);
if (cert_str)
{
weechat_printf (server->buffer,
_("gnutls: sending one certificate"));
filedatum.data = (unsigned char *) cert_str;
filedatum.size = strlen (cert_str);
/* certificate */
gnutls_x509_crt_init (&server->tls_cert);
gnutls_x509_crt_import (server->tls_cert, &filedatum,
GNUTLS_X509_FMT_PEM);
/* key */
gnutls_x509_privkey_init (&server->tls_cert_key);
ret = gnutls_x509_privkey_import (server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM);
if (ret < 0)
{
ret = gnutls_x509_privkey_import_pkcs8 (server->tls_cert_key,
&filedatum,
GNUTLS_X509_FMT_PEM,
NULL,
GNUTLS_PKCS_PLAIN);
}
if (ret < 0)
{
weechat_printf (server->buffer,
_("%sgnutls: invalid certificate \"%s\", "
"error: %s"),
weechat_prefix ("error"), cert_path2,
gnutls_strerror (ret));
rc = -1;
}
else
{
tls_struct.type = GNUTLS_CRT_X509;
tls_struct.ncerts = 1;
tls_struct.deinit_all = 0;
tls_struct.cert.x509 = &server->tls_cert;
tls_struct.key.x509 = server->tls_cert_key;
#if LIBGNUTLS_VERSION_NUMBER >= 0x010706
/* client certificate info */
#if LIBGNUTLS_VERSION_NUMBER < 0x020400
rinfo = gnutls_x509_crt_print (server->tls_cert,
GNUTLS_X509_CRT_ONELINE,
&cinfo);
#else
rinfo = gnutls_x509_crt_print (server->tls_cert,
GNUTLS_CRT_PRINT_ONELINE,
&cinfo);
#endif
if (rinfo == 0)
{
weechat_printf (server->buffer,
_(" - client certificate info (%s):"),
cert_path2);
weechat_printf (server->buffer, " - %s", cinfo.data);
gnutls_free (cinfo.data);
}
#endif
memcpy (answer, &tls_struct, sizeof (gnutls_retr_st));
free (cert_str);
}
}
else
{
weechat_printf (server->buffer,
_("%sgnutls: unable to read certifcate \"%s\""),
weechat_prefix ("error"), cert_path2);
}
}
if (cert_path1)
free (cert_path1);
if (cert_path2)
free (cert_path2);
}
/* an error should stop the handshake unless the user doesn't care */
if ((rc == -1)
&& (IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_SSL_VERIFY) == 0))
{
rc = 0;
}
return rc;
} | 1 | [
"CWE-20"
] | weechat | c265cad1c95b84abfd4e8d861f25926ef13b5d91 | 126,645,762,687,895,800,000,000,000,000,000,000,000 | 247 | Fix verification of SSL certificates by calling gnutls verify callback (patch #7459) |
hook_connect_gnutls_set_certificates (gnutls_session_t tls_session,
const gnutls_datum_t *req_ca, int nreq,
const gnutls_pk_algorithm_t *pk_algos,
int pk_algos_len,
gnutls_retr_st *answer)
{
struct t_hook *ptr_hook;
int rc;
rc = -1;
ptr_hook = weechat_hooks[HOOK_TYPE_CONNECT];
while (ptr_hook)
{
/* looking for the right hook using to the gnutls session pointer */
if (!ptr_hook->deleted
&& HOOK_CONNECT(ptr_hook, gnutls_sess)
&& (*(HOOK_CONNECT(ptr_hook, gnutls_sess)) == tls_session))
{
rc = (int) (HOOK_CONNECT(ptr_hook, gnutls_cb))
(ptr_hook->callback_data, tls_session, req_ca, nreq,
pk_algos, pk_algos_len, answer);
break;
}
ptr_hook = ptr_hook->next_hook;
}
return rc;
} | 1 | [
"CWE-20"
] | weechat | c265cad1c95b84abfd4e8d861f25926ef13b5d91 | 248,931,123,191,713,030,000,000,000,000,000,000,000 | 28 | Fix verification of SSL certificates by calling gnutls verify callback (patch #7459) |
static int do_replace(struct net *net, const void __user *user,
unsigned int len)
{
int ret;
struct arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct arpt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
/* choose the copy that is on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_table(newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
duprintf("arp_tables: Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
} | 1 | [
"CWE-200"
] | linux-2.6 | 42eab94fff18cb1091d3501cd284d6bd6cc9c143 | 334,420,326,663,364,100,000,000,000,000,000,000,000 | 47 | netfilter: arp_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first bug was introduced before the git epoch; the second is
introduced by 6b7d31fc (v2.6.15-rc1); the third is introduced by
6b7d31fc (v2.6.15-rc1). To trigger the bug one should have
CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Patrick McHardy <kaber@trash.net> |
static int compat_do_replace(struct net *net, void __user *user,
unsigned int len)
{
int ret;
struct compat_arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct arpt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.size >= INT_MAX / num_possible_cpus())
return -ENOMEM;
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
/* choose the copy that is on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_compat_table(tmp.name, tmp.valid_hooks,
&newinfo, &loc_cpu_entry, tmp.size,
tmp.num_entries, tmp.hook_entry,
tmp.underflow);
if (ret != 0)
goto free_newinfo;
duprintf("compat_do_replace: Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, compat_ptr(tmp.counters));
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
} | 1 | [
"CWE-200"
] | linux-2.6 | 42eab94fff18cb1091d3501cd284d6bd6cc9c143 | 156,237,724,386,010,550,000,000,000,000,000,000,000 | 51 | netfilter: arp_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first bug was introduced before the git epoch; the second is
introduced by 6b7d31fc (v2.6.15-rc1); the third is introduced by
6b7d31fc (v2.6.15-rc1). To trigger the bug one should have
CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Patrick McHardy <kaber@trash.net> |
static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case ARPT_SO_GET_INFO:
ret = get_info(sock_net(sk), user, len, 0);
break;
case ARPT_SO_GET_ENTRIES:
ret = get_entries(sock_net(sk), user, len);
break;
case ARPT_SO_GET_REVISION_TARGET: {
struct xt_get_revision rev;
if (*len != sizeof(rev)) {
ret = -EINVAL;
break;
}
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
ret = -EFAULT;
break;
}
try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name,
rev.revision, 1, &ret),
"arpt_%s", rev.name);
break;
}
default:
duprintf("do_arpt_get_ctl: unknown request %i\n", cmd);
ret = -EINVAL;
}
return ret;
} | 1 | [
"CWE-200"
] | linux-2.6 | 42eab94fff18cb1091d3501cd284d6bd6cc9c143 | 27,359,996,194,193,083,000,000,000,000,000,000,000 | 41 | netfilter: arp_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first bug was introduced before the git epoch; the second is
introduced by 6b7d31fc (v2.6.15-rc1); the third is introduced by
6b7d31fc (v2.6.15-rc1). To trigger the bug one should have
CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Patrick McHardy <kaber@trash.net> |
do_replace(struct net *net, const void __user *user, unsigned int len)
{
int ret;
struct ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct ipt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
/* choose the copy that is on our node/cpu */
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
duprintf("Translated table\n");
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
} | 1 | [
"CWE-200"
] | linux-2.6 | 78b79876761b86653df89c48a7010b5cbd41a84a | 281,108,937,860,126,430,000,000,000,000,000,000,000 | 46 | netfilter: ip_tables: fix infoleak to userspace
Structures ipt_replace, compat_ipt_replace, and xt_get_revision are
copied from userspace. Fields of these structs that are
zero-terminated strings are not checked. When they are used as argument
to a format string containing "%s" in request_module(), some sensitive
information is leaked to userspace via argument of spawned modprobe
process.
The first and the third bugs were introduced before the git epoch; the
second was introduced in 2722971c (v2.6.17-rc1). To trigger the bug
one should have CAP_NET_ADMIN.
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Patrick McHardy <kaber@trash.net> |