CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2018-17467 | https://www.cvedetails.com/cve/CVE-2018-17467/ | CWE-20 | https://github.com/chromium/chromium/commit/7da6c3419fd172405bcece1ae4ec6ec8316cd345 | 7da6c3419fd172405bcece1ae4ec6ec8316cd345 | Start rendering timer after first navigation
Currently the new content rendering timer in the browser process,
which clears an old page's contents 4 seconds after a navigation if the
new page doesn't draw in that time, is not set on the first navigation
for a top-level frame.
This is problematic because content can exist before the first
navigation, for instance if it was created by a javascript: URL.
This CL removes the code that skips the timer activation on the first
navigation.
Bug: 844881
Change-Id: I19b3ad1ff62c69ded3a5f7b1c0afde191aaf4584
Reviewed-on: https://chromium-review.googlesource.com/1188589
Reviewed-by: Fady Samuel <fsamuel@chromium.org>
Reviewed-by: ccameron <ccameron@chromium.org>
Commit-Queue: Ken Buchanan <kenrb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#586913} | void SimulateGesturePinchUpdateEvent(float scale,
float anchorX,
float anchorY,
int modifiers) {
SimulateGestureEventCore(SyntheticWebGestureEventBuilder::BuildPinchUpdate(
scale, anchorX, anchorY, modifiers,
blink::kWebGestureDeviceTouchscreen));
}
| void SimulateGesturePinchUpdateEvent(float scale,
float anchorX,
float anchorY,
int modifiers) {
SimulateGestureEventCore(SyntheticWebGestureEventBuilder::BuildPinchUpdate(
scale, anchorX, anchorY, modifiers,
blink::kWebGestureDeviceTouchscreen));
}
| C | Chrome | 0 |
CVE-2017-7864 | https://www.cvedetails.com/cve/CVE-2017-7864/ | CWE-787 | https://git.savannah.gnu.org/cgit/freetype/freetype2.git/commit/?id=e6699596af5c5d6f0ae0ea06e19df87dce088df8 | e6699596af5c5d6f0ae0ea06e19df87dce088df8 | null | tt_size_run_prep( TT_Size size,
FT_Bool pedantic )
{
TT_Face face = (TT_Face)size->root.face;
TT_ExecContext exec;
FT_Error error;
exec = size->context;
error = TT_Load_Context( exec, face, size );
if ( error )
return error;
exec->callTop = 0;
exec->top = 0;
exec->instruction_trap = FALSE;
exec->pedantic_hinting = pedantic;
TT_Set_CodeRange( exec,
tt_coderange_cvt,
face->cvt_program,
(FT_Long)face->cvt_program_size );
TT_Clear_CodeRange( exec, tt_coderange_glyph );
if ( face->cvt_program_size > 0 )
{
TT_Goto_CodeRange( exec, tt_coderange_cvt, 0 );
FT_TRACE4(( "Executing `prep' table.\n" ));
error = face->interpreter( exec );
#ifdef FT_DEBUG_LEVEL_TRACE
if ( error )
FT_TRACE4(( " interpretation failed with error code 0x%x\n",
error ));
#endif
}
else
error = FT_Err_Ok;
size->cvt_ready = error;
/* UNDOCUMENTED! The MS rasterizer doesn't allow the following */
/* graphics state variables to be modified by the CVT program. */
exec->GS.dualVector.x = 0x4000;
exec->GS.dualVector.y = 0;
exec->GS.projVector.x = 0x4000;
exec->GS.projVector.y = 0x0;
exec->GS.freeVector.x = 0x4000;
exec->GS.freeVector.y = 0x0;
exec->GS.rp0 = 0;
exec->GS.rp1 = 0;
exec->GS.rp2 = 0;
exec->GS.gep0 = 1;
exec->GS.gep1 = 1;
exec->GS.gep2 = 1;
exec->GS.loop = 1;
/* save as default graphics state */
size->GS = exec->GS;
TT_Save_Context( exec, size );
return error;
}
| tt_size_run_prep( TT_Size size,
FT_Bool pedantic )
{
TT_Face face = (TT_Face)size->root.face;
TT_ExecContext exec;
FT_Error error;
exec = size->context;
error = TT_Load_Context( exec, face, size );
if ( error )
return error;
exec->callTop = 0;
exec->top = 0;
exec->instruction_trap = FALSE;
exec->pedantic_hinting = pedantic;
TT_Set_CodeRange( exec,
tt_coderange_cvt,
face->cvt_program,
(FT_Long)face->cvt_program_size );
TT_Clear_CodeRange( exec, tt_coderange_glyph );
if ( face->cvt_program_size > 0 )
{
TT_Goto_CodeRange( exec, tt_coderange_cvt, 0 );
FT_TRACE4(( "Executing `prep' table.\n" ));
error = face->interpreter( exec );
#ifdef FT_DEBUG_LEVEL_TRACE
if ( error )
FT_TRACE4(( " interpretation failed with error code 0x%x\n",
error ));
#endif
}
else
error = FT_Err_Ok;
size->cvt_ready = error;
/* UNDOCUMENTED! The MS rasterizer doesn't allow the following */
/* graphics state variables to be modified by the CVT program. */
exec->GS.dualVector.x = 0x4000;
exec->GS.dualVector.y = 0;
exec->GS.projVector.x = 0x4000;
exec->GS.projVector.y = 0x0;
exec->GS.freeVector.x = 0x4000;
exec->GS.freeVector.y = 0x0;
exec->GS.rp0 = 0;
exec->GS.rp1 = 0;
exec->GS.rp2 = 0;
exec->GS.gep0 = 1;
exec->GS.gep1 = 1;
exec->GS.gep2 = 1;
exec->GS.loop = 1;
/* save as default graphics state */
size->GS = exec->GS;
TT_Save_Context( exec, size );
return error;
}
| C | savannah | 0 |
CVE-2017-9051 | https://www.cvedetails.com/cve/CVE-2017-9051/ | CWE-476 | https://github.com/libav/libav/commit/fe6eea99efac66839052af547426518efd970b24 | fe6eea99efac66839052af547426518efd970b24 | nsvdec: don't ignore the return value of av_get_packet()
Fixes invalid reads with corrupted files.
CC: libav-stable@libav.org
Bug-Id: 1039 | static int nsv_parse_NSVf_header(AVFormatContext *s)
{
NSVContext *nsv = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int av_unused file_size;
unsigned int size;
int64_t duration;
int strings_size;
int table_entries;
int table_entries_used;
nsv->state = NSV_UNSYNC; /* in case we fail */
size = avio_rl32(pb);
if (size < 28)
return -1;
nsv->NSVf_end = size;
file_size = (uint32_t)avio_rl32(pb);
av_log(s, AV_LOG_TRACE, "NSV NSVf chunk_size %u\n", size);
av_log(s, AV_LOG_TRACE, "NSV NSVf file_size %u\n", file_size);
nsv->duration = duration = avio_rl32(pb); /* in ms */
av_log(s, AV_LOG_TRACE, "NSV NSVf duration %"PRId64" ms\n", duration);
strings_size = avio_rl32(pb);
table_entries = avio_rl32(pb);
table_entries_used = avio_rl32(pb);
av_log(s, AV_LOG_TRACE, "NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
strings_size, table_entries, table_entries_used);
if (pb->eof_reached)
return -1;
av_log(s, AV_LOG_TRACE, "NSV got header; filepos %"PRId64"\n", avio_tell(pb));
if (strings_size > 0) {
char *strings; /* last byte will be '\0' to play safe with str*() */
char *p, *endp;
char *token, *value;
char quote;
p = strings = av_mallocz((size_t)strings_size + 1);
if (!p)
return AVERROR(ENOMEM);
endp = strings + strings_size;
avio_read(pb, strings, strings_size);
while (p < endp) {
while (*p == ' ')
p++; /* strip out spaces */
if (p >= endp-2)
break;
token = p;
p = strchr(p, '=');
if (!p || p >= endp-2)
break;
*p++ = '\0';
quote = *p++;
value = p;
p = strchr(p, quote);
if (!p || p >= endp)
break;
*p++ = '\0';
av_log(s, AV_LOG_TRACE, "NSV NSVf INFO: %s='%s'\n", token, value);
av_dict_set(&s->metadata, token, value, 0);
}
av_free(strings);
}
if (pb->eof_reached)
return -1;
av_log(s, AV_LOG_TRACE, "NSV got infos; filepos %"PRId64"\n", avio_tell(pb));
if (table_entries_used > 0) {
int i;
nsv->index_entries = table_entries_used;
if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t))
return -1;
nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t));
if (!nsv->nsvs_file_offset)
return AVERROR(ENOMEM);
for(i=0;i<table_entries_used;i++)
nsv->nsvs_file_offset[i] = avio_rl32(pb) + size;
if(table_entries > table_entries_used &&
avio_rl32(pb) == MKTAG('T','O','C','2')) {
nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t));
if (!nsv->nsvs_timestamps)
return AVERROR(ENOMEM);
for(i=0;i<table_entries_used;i++) {
nsv->nsvs_timestamps[i] = avio_rl32(pb);
}
}
}
av_log(s, AV_LOG_TRACE, "NSV got index; filepos %"PRId64"\n", avio_tell(pb));
avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */
if (pb->eof_reached)
return -1;
nsv->state = NSV_HAS_READ_NSVF;
return 0;
}
| static int nsv_parse_NSVf_header(AVFormatContext *s)
{
NSVContext *nsv = s->priv_data;
AVIOContext *pb = s->pb;
unsigned int av_unused file_size;
unsigned int size;
int64_t duration;
int strings_size;
int table_entries;
int table_entries_used;
nsv->state = NSV_UNSYNC; /* in case we fail */
size = avio_rl32(pb);
if (size < 28)
return -1;
nsv->NSVf_end = size;
file_size = (uint32_t)avio_rl32(pb);
av_log(s, AV_LOG_TRACE, "NSV NSVf chunk_size %u\n", size);
av_log(s, AV_LOG_TRACE, "NSV NSVf file_size %u\n", file_size);
nsv->duration = duration = avio_rl32(pb); /* in ms */
av_log(s, AV_LOG_TRACE, "NSV NSVf duration %"PRId64" ms\n", duration);
strings_size = avio_rl32(pb);
table_entries = avio_rl32(pb);
table_entries_used = avio_rl32(pb);
av_log(s, AV_LOG_TRACE, "NSV NSVf info-strings size: %d, table entries: %d, bis %d\n",
strings_size, table_entries, table_entries_used);
if (pb->eof_reached)
return -1;
av_log(s, AV_LOG_TRACE, "NSV got header; filepos %"PRId64"\n", avio_tell(pb));
if (strings_size > 0) {
char *strings; /* last byte will be '\0' to play safe with str*() */
char *p, *endp;
char *token, *value;
char quote;
p = strings = av_mallocz((size_t)strings_size + 1);
if (!p)
return AVERROR(ENOMEM);
endp = strings + strings_size;
avio_read(pb, strings, strings_size);
while (p < endp) {
while (*p == ' ')
p++; /* strip out spaces */
if (p >= endp-2)
break;
token = p;
p = strchr(p, '=');
if (!p || p >= endp-2)
break;
*p++ = '\0';
quote = *p++;
value = p;
p = strchr(p, quote);
if (!p || p >= endp)
break;
*p++ = '\0';
av_log(s, AV_LOG_TRACE, "NSV NSVf INFO: %s='%s'\n", token, value);
av_dict_set(&s->metadata, token, value, 0);
}
av_free(strings);
}
if (pb->eof_reached)
return -1;
av_log(s, AV_LOG_TRACE, "NSV got infos; filepos %"PRId64"\n", avio_tell(pb));
if (table_entries_used > 0) {
int i;
nsv->index_entries = table_entries_used;
if((unsigned)table_entries_used >= UINT_MAX / sizeof(uint32_t))
return -1;
nsv->nsvs_file_offset = av_malloc((unsigned)table_entries_used * sizeof(uint32_t));
if (!nsv->nsvs_file_offset)
return AVERROR(ENOMEM);
for(i=0;i<table_entries_used;i++)
nsv->nsvs_file_offset[i] = avio_rl32(pb) + size;
if(table_entries > table_entries_used &&
avio_rl32(pb) == MKTAG('T','O','C','2')) {
nsv->nsvs_timestamps = av_malloc((unsigned)table_entries_used*sizeof(uint32_t));
if (!nsv->nsvs_timestamps)
return AVERROR(ENOMEM);
for(i=0;i<table_entries_used;i++) {
nsv->nsvs_timestamps[i] = avio_rl32(pb);
}
}
}
av_log(s, AV_LOG_TRACE, "NSV got index; filepos %"PRId64"\n", avio_tell(pb));
avio_seek(pb, nsv->base_offset + size, SEEK_SET); /* required for dumbdriving-271.nsv (2 extra bytes) */
if (pb->eof_reached)
return -1;
nsv->state = NSV_HAS_READ_NSVF;
return 0;
}
| C | libav | 0 |
CVE-2013-7421 | https://www.cvedetails.com/cve/CVE-2013-7421/ | CWE-264 | https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b | 5d26a105b5a73e5635eae0629b42fa0a90e07b7b | crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> | struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
struct crypto_alg *larval;
int ok;
if (!((type | mask) & CRYPTO_ALG_TESTED)) {
type |= CRYPTO_ALG_TESTED;
mask |= CRYPTO_ALG_TESTED;
}
larval = crypto_larval_lookup(name, type, mask);
if (IS_ERR(larval) || !crypto_is_larval(larval))
return larval;
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP)
alg = crypto_larval_wait(larval);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
}
crypto_larval_kill(larval);
return alg;
}
| struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
struct crypto_alg *larval;
int ok;
if (!((type | mask) & CRYPTO_ALG_TESTED)) {
type |= CRYPTO_ALG_TESTED;
mask |= CRYPTO_ALG_TESTED;
}
larval = crypto_larval_lookup(name, type, mask);
if (IS_ERR(larval) || !crypto_is_larval(larval))
return larval;
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP)
alg = crypto_larval_wait(larval);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
}
crypto_larval_kill(larval);
return alg;
}
| C | linux | 0 |
CVE-2018-16427 | https://www.cvedetails.com/cve/CVE-2018-16427/ | CWE-125 | https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | 8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes. | static int coolkey_initialize(sc_card_t *card)
{
int r;
coolkey_private_data_t *priv = NULL;
coolkey_life_cycle_t life_cycle;
coolkey_object_info_t object_info;
int combined_processed = 0;
/* already found? */
if (card->drv_data) {
return SC_SUCCESS;
}
sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"Coolkey Applet found");
priv = coolkey_new_private_data();
if (priv == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
goto cleanup;
}
r = coolkey_get_life_cycle(card, &life_cycle);
if (r < 0) {
goto cleanup;
}
/* Select a coolkey read the coolkey objects out */
r = coolkey_select_applet(card);
if (r < 0) {
goto cleanup;
}
priv->protocol_version_major = life_cycle.protocol_version_major;
priv->protocol_version_minor = life_cycle.protocol_version_minor;
priv->pin_count = life_cycle.pin_count;
priv->life_cycle = life_cycle.life_cycle;
/* walk down the list of objects and read them off the token */
for(r=coolkey_list_object(card, COOLKEY_LIST_RESET, &object_info); r >= 0;
r= coolkey_list_object(card, COOLKEY_LIST_NEXT, &object_info)) {
unsigned long object_id = bebytes2ulong(object_info.object_id);
unsigned short object_len = bebytes2ulong(object_info.object_length);
/* also look at the ACL... */
/* the combined object is a single object that can store the other objects.
* most coolkeys provisioned by TPS has a single combined object that is
* compressed greatly increasing the effectiveness of compress (since lots
* of certs on the token share the same Subject and Issuer DN's). We now
* process it separately so that we can have both combined objects managed
* by TPS and user managed certs on the same token */
if (object_id == COOLKEY_COMBINED_OBJECT_ID) {
u8 *object = malloc(object_len);
if (object == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
break;
}
r = coolkey_read_object(card, COOLKEY_COMBINED_OBJECT_ID, 0, object, object_len,
priv->nonce, sizeof(priv->nonce));
if (r < 0) {
free(object);
break;
}
r = coolkey_process_combined_object(card, priv, object, r);
free(object);
if (r != SC_SUCCESS) {
break;
}
combined_processed = 1;
continue;
}
r = coolkey_add_object(priv, object_id, NULL, object_len, 0);
if (r != SC_SUCCESS)
sc_log(card->ctx, "coolkey_add_object() returned %d", r);
}
if (r != SC_ERROR_FILE_END_REACHED) {
goto cleanup;
}
/* if we didn't pull the cuid from the combined object, then grab it now */
if (!combined_processed) {
global_platform_cplc_data_t cplc_data;
/* select the card manager, because a card with applet only will have
already selected the coolkey applet */
r = gp_select_card_manager(card);
if (r < 0) {
goto cleanup;
}
r = coolkey_get_cplc_data(card, &cplc_data);
if (r < 0) {
goto cleanup;
}
coolkey_make_cuid_from_cplc(&priv->cuid, &cplc_data);
priv->token_name = (u8 *)strdup("COOLKEY");
if (priv->token_name == NULL) {
r= SC_ERROR_OUT_OF_MEMORY;
goto cleanup;
}
priv->token_name_length = sizeof("COOLKEY")-1;
}
card->drv_data = priv;
return SC_SUCCESS;
cleanup:
if (priv) {
coolkey_free_private_data(priv);
}
return r;
}
| static int coolkey_initialize(sc_card_t *card)
{
int r;
coolkey_private_data_t *priv = NULL;
coolkey_life_cycle_t life_cycle;
coolkey_object_info_t object_info;
int combined_processed = 0;
/* already found? */
if (card->drv_data) {
return SC_SUCCESS;
}
sc_debug(card->ctx, SC_LOG_DEBUG_VERBOSE,"Coolkey Applet found");
priv = coolkey_new_private_data();
if (priv == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
goto cleanup;
}
r = coolkey_get_life_cycle(card, &life_cycle);
if (r < 0) {
goto cleanup;
}
/* Select a coolkey read the coolkey objects out */
r = coolkey_select_applet(card);
if (r < 0) {
goto cleanup;
}
priv->protocol_version_major = life_cycle.protocol_version_major;
priv->protocol_version_minor = life_cycle.protocol_version_minor;
priv->pin_count = life_cycle.pin_count;
priv->life_cycle = life_cycle.life_cycle;
/* walk down the list of objects and read them off the token */
for(r=coolkey_list_object(card, COOLKEY_LIST_RESET, &object_info); r >= 0;
r= coolkey_list_object(card, COOLKEY_LIST_NEXT, &object_info)) {
unsigned long object_id = bebytes2ulong(object_info.object_id);
unsigned short object_len = bebytes2ulong(object_info.object_length);
/* also look at the ACL... */
/* the combined object is a single object that can store the other objects.
* most coolkeys provisioned by TPS has a single combined object that is
* compressed greatly increasing the effectiveness of compress (since lots
* of certs on the token share the same Subject and Issuer DN's). We now
* process it separately so that we can have both combined objects managed
* by TPS and user managed certs on the same token */
if (object_id == COOLKEY_COMBINED_OBJECT_ID) {
u8 *object = malloc(object_len);
if (object == NULL) {
r = SC_ERROR_OUT_OF_MEMORY;
break;
}
r = coolkey_read_object(card, COOLKEY_COMBINED_OBJECT_ID, 0, object, object_len,
priv->nonce, sizeof(priv->nonce));
if (r < 0) {
free(object);
break;
}
r = coolkey_process_combined_object(card, priv, object, r);
free(object);
if (r != SC_SUCCESS) {
break;
}
combined_processed = 1;
continue;
}
r = coolkey_add_object(priv, object_id, NULL, object_len, 0);
if (r != SC_SUCCESS)
sc_log(card->ctx, "coolkey_add_object() returned %d", r);
}
if (r != SC_ERROR_FILE_END_REACHED) {
goto cleanup;
}
/* if we didn't pull the cuid from the combined object, then grab it now */
if (!combined_processed) {
global_platform_cplc_data_t cplc_data;
/* select the card manager, because a card with applet only will have
already selected the coolkey applet */
r = gp_select_card_manager(card);
if (r < 0) {
goto cleanup;
}
r = coolkey_get_cplc_data(card, &cplc_data);
if (r < 0) {
goto cleanup;
}
coolkey_make_cuid_from_cplc(&priv->cuid, &cplc_data);
priv->token_name = (u8 *)strdup("COOLKEY");
if (priv->token_name == NULL) {
r= SC_ERROR_OUT_OF_MEMORY;
goto cleanup;
}
priv->token_name_length = sizeof("COOLKEY")-1;
}
card->drv_data = priv;
return SC_SUCCESS;
cleanup:
if (priv) {
coolkey_free_private_data(priv);
}
return r;
}
| C | OpenSC | 0 |
CVE-2018-1000040 | https://www.cvedetails.com/cve/CVE-2018-1000040/ | CWE-20 | http://git.ghostscript.com/?p=mupdf.git;a=commitdiff;h=83d4dae44c71816c084a635550acc1a51529b881;hp=f597300439e62f5e921f0d7b1e880b5c1a1f1607 | 83d4dae44c71816c084a635550acc1a51529b881 | null | clamp_lab_icc(const fz_colorspace *cs, const float *src, float *dst)
{
dst[0] = (fz_clamp(src[0], 0, 100)) / 100.0f;
dst[1] = (fz_clamp(src[1], -128, 127) + 128.0f) / 256;
dst[2] = (fz_clamp(src[2], -128, 127) + 128.0f) / 256;
}
| clamp_lab_icc(const fz_colorspace *cs, const float *src, float *dst)
{
dst[0] = (fz_clamp(src[0], 0, 100)) / 100.0f;
dst[1] = (fz_clamp(src[1], -128, 127) + 128.0f) / 256;
dst[2] = (fz_clamp(src[2], -128, 127) + 128.0f) / 256;
}
| C | ghostscript | 0 |
CVE-2018-6033 | https://www.cvedetails.com/cve/CVE-2018-6033/ | CWE-20 | https://github.com/chromium/chromium/commit/a8d6ae61d266d8bc44c3dd2d08bda32db701e359 | a8d6ae61d266d8bc44c3dd2d08bda32db701e359 | Downloads : Fixed an issue of opening incorrect download file
When one download overwrites another completed download, calling download.open in the old download causes the new download to open, which could be dangerous and undesirable. In this CL, we are trying to avoid this by blocking the opening of the old download.
Bug: 793620
Change-Id: Ic948175756700ad7c08489c3cc347330daedb6f8
Reviewed-on: https://chromium-review.googlesource.com/826477
Reviewed-by: David Trainor <dtrainor@chromium.org>
Reviewed-by: Xing Liu <xingliu@chromium.org>
Reviewed-by: John Abd-El-Malek <jam@chromium.org>
Commit-Queue: Shakti Sahu <shaktisahu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#525810} | bool ChromeDownloadManagerDelegate::IsOpenInBrowserPreferreredForFile(
const base::FilePath& path) {
#if defined(OS_WIN) || defined(OS_LINUX) || defined(OS_MACOSX)
if (path.MatchesExtension(FILE_PATH_LITERAL(".pdf"))) {
return !download_prefs_->ShouldOpenPdfInSystemReader();
}
#endif
#if !defined(OS_ANDROID) && !defined(OS_CHROMEOS) && BUILDFLAG(ENABLE_PLUGINS)
if (path.MatchesExtension(FILE_PATH_LITERAL(".pdf")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".htm")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".html")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".shtm")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".shtml")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".svg")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xht")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xhtm")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xhtml")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xsl")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xslt"))) {
return true;
}
#endif
return false;
}
| bool ChromeDownloadManagerDelegate::IsOpenInBrowserPreferreredForFile(
const base::FilePath& path) {
#if defined(OS_WIN) || defined(OS_LINUX) || defined(OS_MACOSX)
if (path.MatchesExtension(FILE_PATH_LITERAL(".pdf"))) {
return !download_prefs_->ShouldOpenPdfInSystemReader();
}
#endif
#if !defined(OS_ANDROID) && !defined(OS_CHROMEOS) && BUILDFLAG(ENABLE_PLUGINS)
if (path.MatchesExtension(FILE_PATH_LITERAL(".pdf")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".htm")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".html")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".shtm")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".shtml")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".svg")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xht")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xhtm")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xhtml")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xsl")) ||
path.MatchesExtension(FILE_PATH_LITERAL(".xslt"))) {
return true;
}
#endif
return false;
}
| C | Chrome | 0 |
CVE-2016-4568 | https://www.cvedetails.com/cve/CVE-2016-4568/ | CWE-119 | https://github.com/torvalds/linux/commit/2c1f6951a8a82e6de0d82b1158b5e493fc6c54ab | 2c1f6951a8a82e6de0d82b1158b5e493fc6c54ab | [media] videobuf2-v4l2: Verify planes array in buffer dequeueing
When a buffer is being dequeued using VIDIOC_DQBUF IOCTL, the exact buffer
which will be dequeued is not known until the buffer has been removed from
the queue. The number of planes is specific to a buffer, not to the queue.
This does lead to the situation where multi-plane buffers may be requested
and queued with n planes, but VIDIOC_DQBUF IOCTL may be passed an argument
struct with fewer planes.
__fill_v4l2_buffer() however uses the number of planes from the dequeued
videobuf2 buffer, overwriting kernel memory (the m.planes array allocated
in video_usercopy() in v4l2-ioctl.c) if the user provided fewer
planes than the dequeued buffer had. Oops!
Fixes: b0e0e1f83de3 ("[media] media: videobuf2: Prepare to divide videobuf2")
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Acked-by: Hans Verkuil <hans.verkuil@cisco.com>
Cc: stable@vger.kernel.org # for v4.4 and later
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
return 0;
/* Is memory for copying plane information present? */
if (b->m.planes == NULL) {
dprintk(1, "multi-planar buffer passed but "
"planes array not provided\n");
return -EINVAL;
}
if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
dprintk(1, "incorrect planes array length, "
"expected %d, got %d\n", vb->num_planes, b->length);
return -EINVAL;
}
return 0;
}
| static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
return 0;
/* Is memory for copying plane information present? */
if (b->m.planes == NULL) {
dprintk(1, "multi-planar buffer passed but "
"planes array not provided\n");
return -EINVAL;
}
if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
dprintk(1, "incorrect planes array length, "
"expected %d, got %d\n", vb->num_planes, b->length);
return -EINVAL;
}
return 0;
}
| C | linux | 0 |
CVE-2013-0839 | https://www.cvedetails.com/cve/CVE-2013-0839/ | CWE-399 | https://github.com/chromium/chromium/commit/dd3b6fe574edad231c01c78e4647a74c38dc4178 | dd3b6fe574edad231c01c78e4647a74c38dc4178 | Remove parent* arg from GDataEntry ctor.
* Remove static FromDocumentEntry from GDataEntry, GDataFile, GDataDirectory. Replace with InitFromDocumentEntry.
* Move common code from GDataFile::InitFromDocumentEntry and GDataDirectory::InitFromDocumentEntry to GDataEntry::InitFromDocumentEntry.
* Add GDataDirectoryService::FromDocumentEntry and use this everywhere.
* Make ctors of GDataFile, GDataDirectory private, so these must be created by GDataDirectoryService's CreateGDataFile and
CreateGDataDirectory. Make GDataEntry ctor protected.
BUG=141494
TEST=unit tests.
Review URL: https://chromiumcodereview.appspot.com/10854083
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@151008 0039d316-1c4b-4281-b951-d872f2087c98 | GDataWapiFeedProcessor::~GDataWapiFeedProcessor() {
}
| GDataWapiFeedProcessor::~GDataWapiFeedProcessor() {
}
| C | Chrome | 0 |
CVE-2014-7822 | https://www.cvedetails.com/cve/CVE-2014-7822/ | CWE-264 | https://github.com/torvalds/linux/commit/8d0207652cbe27d1f962050737848e5ad4671958 | 8d0207652cbe27d1f962050737848e5ad4671958 | ->splice_write() via ->write_iter()
iter_file_splice_write() - a ->splice_write() instance that gathers the
pipe buffers, builds a bio_vec-based iov_iter covering those and feeds
it to ->write_iter(). A bunch of simple cases coverted to that...
[AV: fixed the braino spotted by Cyrill]
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | static struct block_device *bd_acquire(struct inode *inode)
{
struct block_device *bdev;
spin_lock(&bdev_lock);
bdev = inode->i_bdev;
if (bdev) {
ihold(bdev->bd_inode);
spin_unlock(&bdev_lock);
return bdev;
}
spin_unlock(&bdev_lock);
bdev = bdget(inode->i_rdev);
if (bdev) {
spin_lock(&bdev_lock);
if (!inode->i_bdev) {
/*
* We take an additional reference to bd_inode,
* and it's released in clear_inode() of inode.
* So, we can access it via ->i_mapping always
* without igrab().
*/
ihold(bdev->bd_inode);
inode->i_bdev = bdev;
inode->i_mapping = bdev->bd_inode->i_mapping;
list_add(&inode->i_devices, &bdev->bd_inodes);
}
spin_unlock(&bdev_lock);
}
return bdev;
}
| static struct block_device *bd_acquire(struct inode *inode)
{
struct block_device *bdev;
spin_lock(&bdev_lock);
bdev = inode->i_bdev;
if (bdev) {
ihold(bdev->bd_inode);
spin_unlock(&bdev_lock);
return bdev;
}
spin_unlock(&bdev_lock);
bdev = bdget(inode->i_rdev);
if (bdev) {
spin_lock(&bdev_lock);
if (!inode->i_bdev) {
/*
* We take an additional reference to bd_inode,
* and it's released in clear_inode() of inode.
* So, we can access it via ->i_mapping always
* without igrab().
*/
ihold(bdev->bd_inode);
inode->i_bdev = bdev;
inode->i_mapping = bdev->bd_inode->i_mapping;
list_add(&inode->i_devices, &bdev->bd_inodes);
}
spin_unlock(&bdev_lock);
}
return bdev;
}
| C | linux | 0 |
CVE-2015-1221 | https://www.cvedetails.com/cve/CVE-2015-1221/ | null | https://github.com/chromium/chromium/commit/a69c7b5d863dacbb08bfaa04359e3bc0bb4470dc | a69c7b5d863dacbb08bfaa04359e3bc0bb4470dc | Make TypingCommand::insertText() to take SelectionInDOMTree instead of VisibleSelection
This patch makes |TypingCommand::insertText()| to take |SelectionInDOMTree|
instead of |VisibleSelection| to reduce usage of |VisibleSelection| for
improving code health.
BUG=657237
TEST=n/a
Review-Url: https://codereview.chromium.org/2733183002
Cr-Commit-Position: refs/heads/master@{#455368} | void Editor::reappliedEditing(UndoStep* cmd) {
EventQueueScope scope;
dispatchEditableContentChangedEvents(cmd->startingRootEditableElement(),
cmd->endingRootEditableElement());
dispatchInputEventEditableContentChanged(
cmd->startingRootEditableElement(), cmd->endingRootEditableElement(),
InputEvent::InputType::HistoryRedo, nullAtom,
InputEvent::EventIsComposing::NotComposing);
frame().document()->updateStyleAndLayoutIgnorePendingStylesheets();
const VisibleSelection& newSelection =
correctedVisibleSelection(cmd->endingSelection());
DCHECK(newSelection.isValidFor(*frame().document())) << newSelection;
changeSelectionAfterCommand(
newSelection.asSelection(),
FrameSelection::CloseTyping | FrameSelection::ClearTypingStyle);
m_lastEditCommand = nullptr;
m_undoStack->registerUndoStep(cmd);
respondToChangedContents(newSelection.start());
}
| void Editor::reappliedEditing(UndoStep* cmd) {
EventQueueScope scope;
dispatchEditableContentChangedEvents(cmd->startingRootEditableElement(),
cmd->endingRootEditableElement());
dispatchInputEventEditableContentChanged(
cmd->startingRootEditableElement(), cmd->endingRootEditableElement(),
InputEvent::InputType::HistoryRedo, nullAtom,
InputEvent::EventIsComposing::NotComposing);
frame().document()->updateStyleAndLayoutIgnorePendingStylesheets();
const VisibleSelection& newSelection =
correctedVisibleSelection(cmd->endingSelection());
DCHECK(newSelection.isValidFor(*frame().document())) << newSelection;
changeSelectionAfterCommand(
newSelection.asSelection(),
FrameSelection::CloseTyping | FrameSelection::ClearTypingStyle);
m_lastEditCommand = nullptr;
m_undoStack->registerUndoStep(cmd);
respondToChangedContents(newSelection.start());
}
| C | Chrome | 0 |
CVE-2013-0886 | https://www.cvedetails.com/cve/CVE-2013-0886/ | null | https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76 | 18d67244984a574ba2dd8779faabc0e3e34f4b76 | Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
TBR=sky@chromium.org
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98 | void BrowserPluginGuest::UpdateRect(
RenderViewHost* render_view_host,
const ViewHostMsg_UpdateRect_Params& params) {
if (!params.needs_ack)
return;
if (((auto_size_enabled_ && InAutoSizeBounds(params.view_size)) ||
(params.view_size.width() == damage_view_size().width() &&
params.view_size.height() == damage_view_size().height())) &&
params.scale_factor == damage_buffer_scale_factor()) {
TransportDIB* dib = render_view_host->GetProcess()->
GetTransportDIB(params.bitmap);
if (dib) {
#if defined(OS_WIN)
size_t guest_damage_buffer_size = params.bitmap_rect.width() *
params.bitmap_rect.height() * 4;
size_t embedder_damage_buffer_size = damage_buffer_size_;
#else
size_t guest_damage_buffer_size = dib->size();
size_t embedder_damage_buffer_size = damage_buffer_->size();
#endif
void* guest_memory = dib->memory();
void* embedder_memory = damage_buffer_->memory();
size_t size = std::min(guest_damage_buffer_size,
embedder_damage_buffer_size);
memcpy(embedder_memory, guest_memory, size);
}
}
BrowserPluginMsg_UpdateRect_Params relay_params;
#if defined(OS_MACOSX)
relay_params.damage_buffer_identifier = damage_buffer_->id();
#elif defined(OS_WIN)
relay_params.damage_buffer_identifier = remote_damage_buffer_handle_;
#else
relay_params.damage_buffer_identifier = damage_buffer_->handle();
#endif
relay_params.bitmap_rect = params.bitmap_rect;
relay_params.scroll_delta = params.scroll_delta;
relay_params.scroll_rect = params.scroll_rect;
relay_params.copy_rects = params.copy_rects;
relay_params.view_size = params.view_size;
relay_params.scale_factor = params.scale_factor;
relay_params.is_resize_ack = ViewHostMsg_UpdateRect_Flags::is_resize_ack(
params.flags);
int message_id = pending_update_counter_++;
pending_updates_.AddWithID(render_view_host, message_id);
SendMessageToEmbedder(new BrowserPluginMsg_UpdateRect(embedder_routing_id(),
instance_id(),
message_id,
relay_params));
}
| void BrowserPluginGuest::UpdateRect(
RenderViewHost* render_view_host,
const ViewHostMsg_UpdateRect_Params& params) {
if (!params.needs_ack)
return;
if (((auto_size_enabled_ && InAutoSizeBounds(params.view_size)) ||
(params.view_size.width() == damage_view_size().width() &&
params.view_size.height() == damage_view_size().height())) &&
params.scale_factor == damage_buffer_scale_factor()) {
TransportDIB* dib = render_view_host->GetProcess()->
GetTransportDIB(params.bitmap);
if (dib) {
#if defined(OS_WIN)
size_t guest_damage_buffer_size = params.bitmap_rect.width() *
params.bitmap_rect.height() * 4;
size_t embedder_damage_buffer_size = damage_buffer_size_;
#else
size_t guest_damage_buffer_size = dib->size();
size_t embedder_damage_buffer_size = damage_buffer_->size();
#endif
void* guest_memory = dib->memory();
void* embedder_memory = damage_buffer_->memory();
size_t size = std::min(guest_damage_buffer_size,
embedder_damage_buffer_size);
memcpy(embedder_memory, guest_memory, size);
}
}
BrowserPluginMsg_UpdateRect_Params relay_params;
#if defined(OS_MACOSX)
relay_params.damage_buffer_identifier = damage_buffer_->id();
#elif defined(OS_WIN)
relay_params.damage_buffer_identifier = remote_damage_buffer_handle_;
#else
relay_params.damage_buffer_identifier = damage_buffer_->handle();
#endif
relay_params.bitmap_rect = params.bitmap_rect;
relay_params.scroll_delta = params.scroll_delta;
relay_params.scroll_rect = params.scroll_rect;
relay_params.copy_rects = params.copy_rects;
relay_params.view_size = params.view_size;
relay_params.scale_factor = params.scale_factor;
relay_params.is_resize_ack = ViewHostMsg_UpdateRect_Flags::is_resize_ack(
params.flags);
int message_id = pending_update_counter_++;
pending_updates_.AddWithID(render_view_host, message_id);
SendMessageToEmbedder(new BrowserPluginMsg_UpdateRect(embedder_routing_id(),
instance_id(),
message_id,
relay_params));
}
| C | Chrome | 0 |
CVE-2015-1191 | https://www.cvedetails.com/cve/CVE-2015-1191/ | CWE-22 | https://github.com/madler/pigz/commit/fdad1406b3ec809f4954ff7cdf9e99eb18c2458f | fdad1406b3ec809f4954ff7cdf9e99eb18c2458f | When decompressing with -N or -NT, strip any path from header name.
This uses the path of the compressed file combined with the name
from the header as the name of the decompressed output file. Any
path information in the header name is stripped. This avoids a
possible vulnerability where absolute or descending paths are put
in the gzip header. | local void writen(int desc, unsigned char *buf, size_t len)
{
ssize_t ret;
while (len) {
ret = write(desc, buf, len);
if (ret < 1) {
complain("write error code %d", errno);
bail("write error on ", g.outf);
}
buf += ret;
len -= ret;
}
}
| local void writen(int desc, unsigned char *buf, size_t len)
{
ssize_t ret;
while (len) {
ret = write(desc, buf, len);
if (ret < 1) {
complain("write error code %d", errno);
bail("write error on ", g.outf);
}
buf += ret;
len -= ret;
}
}
| C | pigz | 0 |
CVE-2016-4998 | https://www.cvedetails.com/cve/CVE-2016-4998/ | CWE-119 | https://github.com/torvalds/linux/commit/6e94e0cfb0887e4013b3b930fa6ab1fe6bb6ba91 | 6e94e0cfb0887e4013b3b930fa6ab1fe6bb6ba91 | netfilter: x_tables: make sure e->next_offset covers remaining blob size
Otherwise this function may read data beyond the ruleset blob.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | static void cleanup_entry(struct ip6t_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
struct xt_entry_match *ematch;
/* Cleanup all matches */
xt_ematch_foreach(ematch, e)
cleanup_match(ematch, net);
t = ip6t_get_target(e);
par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_IPV6;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
xt_percpu_counter_free(e->counters.pcnt);
}
| static void cleanup_entry(struct ip6t_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
struct xt_entry_match *ematch;
/* Cleanup all matches */
xt_ematch_foreach(ematch, e)
cleanup_match(ematch, net);
t = ip6t_get_target(e);
par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_IPV6;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
xt_percpu_counter_free(e->counters.pcnt);
}
| C | linux | 0 |
CVE-2015-8324 | https://www.cvedetails.com/cve/CVE-2015-8324/ | null | https://github.com/torvalds/linux/commit/744692dc059845b2a3022119871846e74d4f6e11 | 744692dc059845b2a3022119871846e74d4f6e11 | ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <jiayingz@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> | long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
{
handle_t *handle;
ext4_lblk_t block;
loff_t new_size;
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
int retries = 0;
struct buffer_head map_bh;
unsigned int credits, blkbits = inode->i_blkbits;
/*
* currently supporting (pre)allocate mode for extent-based
* files _only_
*/
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
return -EOPNOTSUPP;
/* preallocation to directories is currently not supported */
if (S_ISDIR(inode->i_mode))
return -ENODEV;
block = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- block;
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
mutex_lock(&inode->i_mutex);
retry:
while (ret >= 0 && ret < max_blocks) {
block = block + ret;
max_blocks = max_blocks - ret;
handle = ext4_journal_start(inode, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
map_bh.b_state = 0;
ret = ext4_get_blocks(handle, inode, block,
max_blocks, &map_bh,
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
if (ret <= 0) {
#ifdef EXT4FS_DEBUG
WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_get_blocks "
"returned error inode#%lu, block=%u, "
"max_blocks=%u", __func__,
inode->i_ino, block, max_blocks);
#endif
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
break;
}
if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
blkbits) >> blkbits))
new_size = offset + len;
else
new_size = (block + ret) << blkbits;
ext4_falloc_update_inode(inode, mode, new_size,
buffer_new(&map_bh));
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret2)
break;
}
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries)) {
ret = 0;
goto retry;
}
mutex_unlock(&inode->i_mutex);
return ret > 0 ? ret2 : ret;
}
| long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
{
handle_t *handle;
ext4_lblk_t block;
loff_t new_size;
unsigned int max_blocks;
int ret = 0;
int ret2 = 0;
int retries = 0;
struct buffer_head map_bh;
unsigned int credits, blkbits = inode->i_blkbits;
/*
* currently supporting (pre)allocate mode for extent-based
* files _only_
*/
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
return -EOPNOTSUPP;
/* preallocation to directories is currently not supported */
if (S_ISDIR(inode->i_mode))
return -ENODEV;
block = offset >> blkbits;
/*
* We can't just convert len to max_blocks because
* If blocksize = 4096 offset = 3072 and len = 2048
*/
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
- block;
/*
* credits to insert 1 extent into extent tree
*/
credits = ext4_chunk_trans_blocks(inode, max_blocks);
mutex_lock(&inode->i_mutex);
retry:
while (ret >= 0 && ret < max_blocks) {
block = block + ret;
max_blocks = max_blocks - ret;
handle = ext4_journal_start(inode, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
break;
}
map_bh.b_state = 0;
ret = ext4_get_blocks(handle, inode, block,
max_blocks, &map_bh,
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
if (ret <= 0) {
#ifdef EXT4FS_DEBUG
WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_get_blocks "
"returned error inode#%lu, block=%u, "
"max_blocks=%u", __func__,
inode->i_ino, block, max_blocks);
#endif
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
break;
}
if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
blkbits) >> blkbits))
new_size = offset + len;
else
new_size = (block + ret) << blkbits;
ext4_falloc_update_inode(inode, mode, new_size,
buffer_new(&map_bh));
ext4_mark_inode_dirty(handle, inode);
ret2 = ext4_journal_stop(handle);
if (ret2)
break;
}
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries)) {
ret = 0;
goto retry;
}
mutex_unlock(&inode->i_mutex);
return ret > 0 ? ret2 : ret;
}
| C | linux | 0 |
CVE-2018-16427 | https://www.cvedetails.com/cve/CVE-2018-16427/ | CWE-125 | https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | 8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes. | authentic_match_card(struct sc_card *card)
{
struct sc_context *ctx = card->ctx;
int i;
sc_log_hex(ctx, "try to match card with ATR", card->atr.value, card->atr.len);
i = _sc_match_atr(card, authentic_known_atrs, &card->type);
if (i < 0) {
sc_log(ctx, "card not matched");
return 0;
}
sc_log(ctx, "'%s' card matched", authentic_known_atrs[i].name);
return 1;
}
| authentic_match_card(struct sc_card *card)
{
struct sc_context *ctx = card->ctx;
int i;
sc_log_hex(ctx, "try to match card with ATR", card->atr.value, card->atr.len);
i = _sc_match_atr(card, authentic_known_atrs, &card->type);
if (i < 0) {
sc_log(ctx, "card not matched");
return 0;
}
sc_log(ctx, "'%s' card matched", authentic_known_atrs[i].name);
return 1;
}
| C | OpenSC | 0 |
CVE-2016-1690 | https://www.cvedetails.com/cve/CVE-2016-1690/ | null | https://github.com/chromium/chromium/commit/9de81f45c73a8f9f215fc234a6adfe087b0eab74 | 9de81f45c73a8f9f215fc234a6adfe087b0eab74 | Remove WeakPtrFactory from PasswordAutofillAgent
Unlike in AutofillAgent, the factory is no longer used in PAA.
R=dvadym@chromium.org
BUG=609010,609007,608100,608101,433486
Review-Url: https://codereview.chromium.org/1945723003
Cr-Commit-Position: refs/heads/master@{#391475} | void PasswordAutofillAgent::FrameClosing() {
for (auto const& iter : web_input_to_password_info_) {
password_to_username_.erase(iter.second.password_field);
}
web_input_to_password_info_.clear();
provisionally_saved_form_.reset();
nonscript_modified_values_.clear();
}
| void PasswordAutofillAgent::FrameClosing() {
for (auto const& iter : web_input_to_password_info_) {
password_to_username_.erase(iter.second.password_field);
}
web_input_to_password_info_.clear();
provisionally_saved_form_.reset();
nonscript_modified_values_.clear();
}
| C | Chrome | 0 |
CVE-2017-7177 | https://www.cvedetails.com/cve/CVE-2017-7177/ | CWE-358 | https://github.com/inliniac/suricata/commit/4a04f814b15762eb446a5ead4d69d021512df6f8 | 4a04f814b15762eb446a5ead4d69d021512df6f8 | defrag - take protocol into account during re-assembly
The IP protocol was not being used to match fragments with
their packets allowing a carefully constructed packet
with a different protocol to be matched, allowing re-assembly
to complete, creating a packet that would not be re-assembled
by the destination host. | IPV6BuildTestPacket(uint32_t id, uint16_t off, int mf, const char content,
IPV6BuildTestPacket(uint8_t proto, uint32_t id, uint16_t off, int mf,
const char content, int content_len)
{
Packet *p = NULL;
uint8_t *pcontent;
IPV6Hdr ip6h;
p = SCCalloc(1, sizeof(*p) + default_packet_size);
if (unlikely(p == NULL))
return NULL;
PACKET_INITIALIZE(p);
gettimeofday(&p->ts, NULL);
ip6h.s_ip6_nxt = 44;
ip6h.s_ip6_hlim = 2;
/* Source and dest address - very bogus addresses. */
ip6h.s_ip6_src[0] = 0x01010101;
ip6h.s_ip6_src[1] = 0x01010101;
ip6h.s_ip6_src[2] = 0x01010101;
ip6h.s_ip6_src[3] = 0x01010101;
ip6h.s_ip6_dst[0] = 0x02020202;
ip6h.s_ip6_dst[1] = 0x02020202;
ip6h.s_ip6_dst[2] = 0x02020202;
ip6h.s_ip6_dst[3] = 0x02020202;
/* copy content_len crap, we need full length */
PacketCopyData(p, (uint8_t *)&ip6h, sizeof(IPV6Hdr));
p->ip6h = (IPV6Hdr *)GET_PKT_DATA(p);
IPV6_SET_RAW_VER(p->ip6h, 6);
/* Fragmentation header. */
IPV6FragHdr *fh = (IPV6FragHdr *)(GET_PKT_DATA(p) + sizeof(IPV6Hdr));
fh->ip6fh_nxt = proto;
fh->ip6fh_ident = htonl(id);
fh->ip6fh_offlg = htons((off << 3) | mf);
DecodeIPV6FragHeader(p, (uint8_t *)fh, 8, 8 + content_len, 0);
pcontent = SCCalloc(1, content_len);
if (unlikely(pcontent == NULL))
return NULL;
memset(pcontent, content, content_len);
PacketCopyDataOffset(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr), pcontent, content_len);
SET_PKT_LEN(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr) + content_len);
SCFree(pcontent);
p->ip6h->s_ip6_plen = htons(sizeof(IPV6FragHdr) + content_len);
SET_IPV6_SRC_ADDR(p, &p->src);
SET_IPV6_DST_ADDR(p, &p->dst);
/* Self test. */
if (IPV6_GET_VER(p) != 6)
goto error;
if (IPV6_GET_NH(p) != 44)
goto error;
if (IPV6_GET_PLEN(p) != sizeof(IPV6FragHdr) + content_len)
goto error;
return p;
error:
fprintf(stderr, "Error building test packet.\n");
if (p != NULL)
SCFree(p);
return NULL;
}
| IPV6BuildTestPacket(uint32_t id, uint16_t off, int mf, const char content,
int content_len)
{
Packet *p = NULL;
uint8_t *pcontent;
IPV6Hdr ip6h;
p = SCCalloc(1, sizeof(*p) + default_packet_size);
if (unlikely(p == NULL))
return NULL;
PACKET_INITIALIZE(p);
gettimeofday(&p->ts, NULL);
ip6h.s_ip6_nxt = 44;
ip6h.s_ip6_hlim = 2;
/* Source and dest address - very bogus addresses. */
ip6h.s_ip6_src[0] = 0x01010101;
ip6h.s_ip6_src[1] = 0x01010101;
ip6h.s_ip6_src[2] = 0x01010101;
ip6h.s_ip6_src[3] = 0x01010101;
ip6h.s_ip6_dst[0] = 0x02020202;
ip6h.s_ip6_dst[1] = 0x02020202;
ip6h.s_ip6_dst[2] = 0x02020202;
ip6h.s_ip6_dst[3] = 0x02020202;
/* copy content_len crap, we need full length */
PacketCopyData(p, (uint8_t *)&ip6h, sizeof(IPV6Hdr));
p->ip6h = (IPV6Hdr *)GET_PKT_DATA(p);
IPV6_SET_RAW_VER(p->ip6h, 6);
/* Fragmentation header. */
IPV6FragHdr *fh = (IPV6FragHdr *)(GET_PKT_DATA(p) + sizeof(IPV6Hdr));
fh->ip6fh_nxt = IPPROTO_ICMP;
fh->ip6fh_ident = htonl(id);
fh->ip6fh_offlg = htons((off << 3) | mf);
DecodeIPV6FragHeader(p, (uint8_t *)fh, 8, 8 + content_len, 0);
pcontent = SCCalloc(1, content_len);
if (unlikely(pcontent == NULL))
return NULL;
memset(pcontent, content, content_len);
PacketCopyDataOffset(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr), pcontent, content_len);
SET_PKT_LEN(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr) + content_len);
SCFree(pcontent);
p->ip6h->s_ip6_plen = htons(sizeof(IPV6FragHdr) + content_len);
SET_IPV6_SRC_ADDR(p, &p->src);
SET_IPV6_DST_ADDR(p, &p->dst);
/* Self test. */
if (IPV6_GET_VER(p) != 6)
goto error;
if (IPV6_GET_NH(p) != 44)
goto error;
if (IPV6_GET_PLEN(p) != sizeof(IPV6FragHdr) + content_len)
goto error;
return p;
error:
fprintf(stderr, "Error building test packet.\n");
if (p != NULL)
SCFree(p);
return NULL;
}
| C | suricata | 1 |
CVE-2019-5797 | null | null | https://github.com/chromium/chromium/commit/ba169c14aa9cc2efd708a878ae21ff34f3898fe0 | ba169c14aa9cc2efd708a878ae21ff34f3898fe0 | Fixing BadMessageCallback usage by SessionStorage
TBR: jam@chromium.org
Bug: 916523
Change-Id: I027cc818cfba917906844ad2ec0edd7fa4761bd1
Reviewed-on: https://chromium-review.googlesource.com/c/1401604
Commit-Queue: Daniel Murphy <dmurph@chromium.org>
Reviewed-by: Marijn Kruisselbrink <mek@chromium.org>
Reviewed-by: Ken Rockot <rockot@google.com>
Cr-Commit-Position: refs/heads/master@{#621772} | int StoragePartitionImpl::GenerateQuotaClientMask(uint32_t remove_mask) {
int quota_client_mask = 0;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_FILE_SYSTEMS)
quota_client_mask |= storage::QuotaClient::kFileSystem;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_WEBSQL)
quota_client_mask |= storage::QuotaClient::kDatabase;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_APPCACHE)
quota_client_mask |= storage::QuotaClient::kAppcache;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_INDEXEDDB)
quota_client_mask |= storage::QuotaClient::kIndexedDatabase;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_SERVICE_WORKERS)
quota_client_mask |= storage::QuotaClient::kServiceWorker;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_CACHE_STORAGE)
quota_client_mask |= storage::QuotaClient::kServiceWorkerCache;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_BACKGROUND_FETCH)
quota_client_mask |= storage::QuotaClient::kBackgroundFetch;
return quota_client_mask;
}
| int StoragePartitionImpl::GenerateQuotaClientMask(uint32_t remove_mask) {
int quota_client_mask = 0;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_FILE_SYSTEMS)
quota_client_mask |= storage::QuotaClient::kFileSystem;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_WEBSQL)
quota_client_mask |= storage::QuotaClient::kDatabase;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_APPCACHE)
quota_client_mask |= storage::QuotaClient::kAppcache;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_INDEXEDDB)
quota_client_mask |= storage::QuotaClient::kIndexedDatabase;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_SERVICE_WORKERS)
quota_client_mask |= storage::QuotaClient::kServiceWorker;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_CACHE_STORAGE)
quota_client_mask |= storage::QuotaClient::kServiceWorkerCache;
if (remove_mask & StoragePartition::REMOVE_DATA_MASK_BACKGROUND_FETCH)
quota_client_mask |= storage::QuotaClient::kBackgroundFetch;
return quota_client_mask;
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/4da7eefcaad044a6f919947a2a0e3d4fed87834c | 4da7eefcaad044a6f919947a2a0e3d4fed87834c | [Qt] Remove QOpenGL specific code from GraphicsSurfaceGLX.
https://bugs.webkit.org/show_bug.cgi?id=100492
This patch removes most of the QOpenGLContext related code
from GraphicsSurfaceGLX. This allows sharing almost all
GraphicsSurfaceGLX code with EFL, by relying on pure GLX.
Patch by Zeno Albisser <zeno@webkit.org> on 2012-10-26
Reviewed by Kenneth Rohde Christiansen.
* platform/graphics/surfaces/qt/GraphicsSurfaceGLX.cpp:
(WebCore::OffScreenRootWindow::get):
(WebCore::OffScreenRootWindow::~OffScreenRootWindow):
(OffScreenRootWindow):
(WebCore):
(WebCore::GraphicsSurfacePrivate::GraphicsSurfacePrivate):
(WebCore::GraphicsSurfacePrivate::createSurface):
(WebCore::GraphicsSurfacePrivate::makeCurrent):
(WebCore::GraphicsSurfacePrivate::doneCurrent):
(WebCore::GraphicsSurfacePrivate::swapBuffers):
(WebCore::GraphicsSurfacePrivate::copyFromTexture):
(GraphicsSurfacePrivate):
(WebCore::resolveGLMethods):
git-svn-id: svn://svn.chromium.org/blink/trunk@132628 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | QWindow* get(Display* dpy)
| QWindow* get(Display* dpy)
{
if (!window) {
window = new QWindow;
window->setGeometry(QRect(-1, -1, 1, 1));
window->create();
XSetWindowAttributes attributes;
attributes.override_redirect = true;
XChangeWindowAttributes(dpy, window->handle()->winId(), X11OverrideRedirect, &attributes);
window->show();
}
return window;
}
| C | Chrome | 1 |
CVE-2018-14361 | https://www.cvedetails.com/cve/CVE-2018-14361/ | CWE-20 | https://github.com/neomutt/neomutt/commit/9e927affe3a021175f354af5fa01d22657c20585 | 9e927affe3a021175f354af5fa01d22657c20585 | Add alloc fail check in nntp_fetch_headers | static int nntp_mbox_open(struct Context *ctx)
{
struct NntpServer *nserv = NULL;
struct NntpData *nntp_data = NULL;
char buf[HUGE_STRING];
char server[LONG_STRING];
char *group = NULL;
int rc;
void *hc = NULL;
anum_t first, last, count = 0;
struct Url url;
mutt_str_strfcpy(buf, ctx->path, sizeof(buf));
if (url_parse(&url, buf) < 0 || !url.host || !url.path ||
!(url.scheme == U_NNTP || url.scheme == U_NNTPS))
{
url_free(&url);
mutt_error(_("%s is an invalid newsgroup specification!"), ctx->path);
return -1;
}
group = url.path;
url.path = strchr(url.path, '\0');
url_tostring(&url, server, sizeof(server), 0);
nserv = nntp_select_server(server, true);
url_free(&url);
if (!nserv)
return -1;
CurrentNewsSrv = nserv;
/* find news group data structure */
nntp_data = mutt_hash_find(nserv->groups_hash, group);
if (!nntp_data)
{
nntp_newsrc_close(nserv);
mutt_error(_("Newsgroup %s not found on the server."), group);
return -1;
}
mutt_bit_unset(ctx->rights, MUTT_ACL_INSERT);
if (!nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed)
ctx->readonly = true;
/* select newsgroup */
mutt_message(_("Selecting %s..."), group);
buf[0] = '\0';
if (nntp_query(nntp_data, buf, sizeof(buf)) < 0)
{
nntp_newsrc_close(nserv);
return -1;
}
/* newsgroup not found, remove it */
if (mutt_str_strncmp("411", buf, 3) == 0)
{
mutt_error(_("Newsgroup %s has been removed from the server."), nntp_data->group);
if (!nntp_data->deleted)
{
nntp_data->deleted = true;
nntp_active_save_cache(nserv);
}
if (nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed)
{
FREE(&nntp_data->newsrc_ent);
nntp_data->newsrc_len = 0;
nntp_delete_group_cache(nntp_data);
nntp_newsrc_update(nserv);
}
}
/* parse newsgroup info */
else
{
if (sscanf(buf, "211 " ANUM " " ANUM " " ANUM, &count, &first, &last) != 3)
{
nntp_newsrc_close(nserv);
mutt_error("GROUP: %s", buf);
return -1;
}
nntp_data->first_message = first;
nntp_data->last_message = last;
nntp_data->deleted = false;
/* get description if empty */
if (NntpLoadDescription && !nntp_data->desc)
{
if (get_description(nntp_data, NULL, NULL) < 0)
{
nntp_newsrc_close(nserv);
return -1;
}
if (nntp_data->desc)
nntp_active_save_cache(nserv);
}
}
time(&nserv->check_time);
ctx->data = nntp_data;
if (!nntp_data->bcache && (nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed))
nntp_data->bcache = mutt_bcache_open(&nserv->conn->account, nntp_data->group);
/* strip off extra articles if adding context is greater than $nntp_context */
first = nntp_data->first_message;
if (NntpContext && nntp_data->last_message - first + 1 > NntpContext)
first = nntp_data->last_message - NntpContext + 1;
nntp_data->last_loaded = first ? first - 1 : 0;
count = nntp_data->first_message;
nntp_data->first_message = first;
nntp_bcache_update(nntp_data);
nntp_data->first_message = count;
#ifdef USE_HCACHE
hc = nntp_hcache_open(nntp_data);
nntp_hcache_update(nntp_data, hc);
#endif
if (!hc)
{
mutt_bit_unset(ctx->rights, MUTT_ACL_WRITE);
mutt_bit_unset(ctx->rights, MUTT_ACL_DELETE);
}
nntp_newsrc_close(nserv);
rc = nntp_fetch_headers(ctx, hc, first, nntp_data->last_message, 0);
#ifdef USE_HCACHE
mutt_hcache_close(hc);
#endif
if (rc < 0)
return -1;
nntp_data->last_loaded = nntp_data->last_message;
nserv->newsrc_modified = false;
return 0;
}
| static int nntp_mbox_open(struct Context *ctx)
{
struct NntpServer *nserv = NULL;
struct NntpData *nntp_data = NULL;
char buf[HUGE_STRING];
char server[LONG_STRING];
char *group = NULL;
int rc;
void *hc = NULL;
anum_t first, last, count = 0;
struct Url url;
mutt_str_strfcpy(buf, ctx->path, sizeof(buf));
if (url_parse(&url, buf) < 0 || !url.host || !url.path ||
!(url.scheme == U_NNTP || url.scheme == U_NNTPS))
{
url_free(&url);
mutt_error(_("%s is an invalid newsgroup specification!"), ctx->path);
return -1;
}
group = url.path;
url.path = strchr(url.path, '\0');
url_tostring(&url, server, sizeof(server), 0);
nserv = nntp_select_server(server, true);
url_free(&url);
if (!nserv)
return -1;
CurrentNewsSrv = nserv;
/* find news group data structure */
nntp_data = mutt_hash_find(nserv->groups_hash, group);
if (!nntp_data)
{
nntp_newsrc_close(nserv);
mutt_error(_("Newsgroup %s not found on the server."), group);
return -1;
}
mutt_bit_unset(ctx->rights, MUTT_ACL_INSERT);
if (!nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed)
ctx->readonly = true;
/* select newsgroup */
mutt_message(_("Selecting %s..."), group);
buf[0] = '\0';
if (nntp_query(nntp_data, buf, sizeof(buf)) < 0)
{
nntp_newsrc_close(nserv);
return -1;
}
/* newsgroup not found, remove it */
if (mutt_str_strncmp("411", buf, 3) == 0)
{
mutt_error(_("Newsgroup %s has been removed from the server."), nntp_data->group);
if (!nntp_data->deleted)
{
nntp_data->deleted = true;
nntp_active_save_cache(nserv);
}
if (nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed)
{
FREE(&nntp_data->newsrc_ent);
nntp_data->newsrc_len = 0;
nntp_delete_group_cache(nntp_data);
nntp_newsrc_update(nserv);
}
}
/* parse newsgroup info */
else
{
if (sscanf(buf, "211 " ANUM " " ANUM " " ANUM, &count, &first, &last) != 3)
{
nntp_newsrc_close(nserv);
mutt_error("GROUP: %s", buf);
return -1;
}
nntp_data->first_message = first;
nntp_data->last_message = last;
nntp_data->deleted = false;
/* get description if empty */
if (NntpLoadDescription && !nntp_data->desc)
{
if (get_description(nntp_data, NULL, NULL) < 0)
{
nntp_newsrc_close(nserv);
return -1;
}
if (nntp_data->desc)
nntp_active_save_cache(nserv);
}
}
time(&nserv->check_time);
ctx->data = nntp_data;
if (!nntp_data->bcache && (nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed))
nntp_data->bcache = mutt_bcache_open(&nserv->conn->account, nntp_data->group);
/* strip off extra articles if adding context is greater than $nntp_context */
first = nntp_data->first_message;
if (NntpContext && nntp_data->last_message - first + 1 > NntpContext)
first = nntp_data->last_message - NntpContext + 1;
nntp_data->last_loaded = first ? first - 1 : 0;
count = nntp_data->first_message;
nntp_data->first_message = first;
nntp_bcache_update(nntp_data);
nntp_data->first_message = count;
#ifdef USE_HCACHE
hc = nntp_hcache_open(nntp_data);
nntp_hcache_update(nntp_data, hc);
#endif
if (!hc)
{
mutt_bit_unset(ctx->rights, MUTT_ACL_WRITE);
mutt_bit_unset(ctx->rights, MUTT_ACL_DELETE);
}
nntp_newsrc_close(nserv);
rc = nntp_fetch_headers(ctx, hc, first, nntp_data->last_message, 0);
#ifdef USE_HCACHE
mutt_hcache_close(hc);
#endif
if (rc < 0)
return -1;
nntp_data->last_loaded = nntp_data->last_message;
nserv->newsrc_modified = false;
return 0;
}
| C | neomutt | 0 |
CVE-2016-3819 | https://www.cvedetails.com/cve/CVE-2016-3819/ | CWE-119 | https://android.googlesource.com/platform/frameworks/av/+/590d1729883f700ab905cdc9ad850f3ddd7e1f56 | 590d1729883f700ab905cdc9ad850f3ddd7e1f56 | Fix potential overflow
Bug: 28533562
Change-Id: I798ab24caa4c81f3ba564cad7c9ee019284fb702
| u32 h264bsdResetDpb(
dpbStorage_t *dpb,
u32 picSizeInMbs,
u32 dpbSize,
u32 maxRefFrames,
u32 maxFrameNum,
u32 noReordering)
{
/* Code */
ASSERT(picSizeInMbs);
ASSERT(maxRefFrames <= MAX_NUM_REF_PICS);
ASSERT(maxRefFrames <= dpbSize);
ASSERT(maxFrameNum);
ASSERT(dpbSize);
h264bsdFreeDpb(dpb);
return h264bsdInitDpb(dpb, picSizeInMbs, dpbSize, maxRefFrames,
maxFrameNum, noReordering);
}
| u32 h264bsdResetDpb(
dpbStorage_t *dpb,
u32 picSizeInMbs,
u32 dpbSize,
u32 maxRefFrames,
u32 maxFrameNum,
u32 noReordering)
{
/* Code */
ASSERT(picSizeInMbs);
ASSERT(maxRefFrames <= MAX_NUM_REF_PICS);
ASSERT(maxRefFrames <= dpbSize);
ASSERT(maxFrameNum);
ASSERT(dpbSize);
h264bsdFreeDpb(dpb);
return h264bsdInitDpb(dpb, picSizeInMbs, dpbSize, maxRefFrames,
maxFrameNum, noReordering);
}
| C | Android | 0 |
CVE-2013-1929 | https://www.cvedetails.com/cve/CVE-2013-1929/ | CWE-119 | https://github.com/torvalds/linux/commit/715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <keescook@chromium.org>
Reported-by: Oded Horovitz <oded@privatecore.com>
Reported-by: Brad Spengler <spender@grsecurity.net>
Cc: stable@vger.kernel.org
Cc: Matt Carlson <mcarlson@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
{
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_frob_aux_power_5717(tp, include_wol ?
tg3_flag(tp, WOL_ENABLE) != 0 : 0);
return;
}
if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
struct net_device *dev_peer;
dev_peer = pci_get_drvdata(tp->pdev_peer);
/* remove_one() may have been run on the peer. */
if (dev_peer) {
struct tg3 *tp_peer = netdev_priv(dev_peer);
if (tg3_flag(tp_peer, INIT_COMPLETE))
return;
if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
tg3_flag(tp_peer, ENABLE_ASF))
need_vaux = true;
}
}
if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
tg3_flag(tp, ENABLE_ASF))
need_vaux = true;
if (need_vaux)
tg3_pwrsrc_switch_to_vaux(tp);
else
tg3_pwrsrc_die_with_vmain(tp);
}
| static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
{
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_frob_aux_power_5717(tp, include_wol ?
tg3_flag(tp, WOL_ENABLE) != 0 : 0);
return;
}
if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
struct net_device *dev_peer;
dev_peer = pci_get_drvdata(tp->pdev_peer);
/* remove_one() may have been run on the peer. */
if (dev_peer) {
struct tg3 *tp_peer = netdev_priv(dev_peer);
if (tg3_flag(tp_peer, INIT_COMPLETE))
return;
if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
tg3_flag(tp_peer, ENABLE_ASF))
need_vaux = true;
}
}
if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
tg3_flag(tp, ENABLE_ASF))
need_vaux = true;
if (need_vaux)
tg3_pwrsrc_switch_to_vaux(tp);
else
tg3_pwrsrc_die_with_vmain(tp);
}
| C | linux | 0 |
CVE-2013-6378 | https://www.cvedetails.com/cve/CVE-2013-6378/ | CWE-189 | https://github.com/torvalds/linux/commit/a497e47d4aec37aaf8f13509f3ef3d1f6a717d88 | a497e47d4aec37aaf8f13509f3ef3d1f6a717d88 | libertas: potential oops in debugfs
If we do a zero size allocation then it will oops. Also we can't be
sure the user passes us a NUL terminated string so I've added a
terminator.
This code can only be triggered by root.
Reported-by: Nico Golde <nico@ngolde.de>
Reported-by: Fabian Yamaguchi <fabs@goesec.de>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Acked-by: Dan Williams <dcbw@redhat.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com> | static ssize_t lbs_lowsnr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW,
file, userbuf, count, ppos);
}
| static ssize_t lbs_lowsnr_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
return lbs_threshold_read(TLV_TYPE_SNR_LOW, CMD_SUBSCRIBE_SNR_LOW,
file, userbuf, count, ppos);
}
| C | linux | 0 |
CVE-2018-17205 | https://www.cvedetails.com/cve/CVE-2018-17205/ | CWE-617 | https://github.com/openvswitch/ovs/commit/0befd1f3745055c32940f5faf9559be6a14395e6 | 0befd1f3745055c32940f5faf9559be6a14395e6 | ofproto: Fix OVS crash when reverting old flows in bundle commit
During bundle commit flows which are added in bundle are applied
to ofproto in-order. In case if a flow cannot be added (e.g. flow
action is go-to group id which does not exist), OVS tries to
revert back all previous flows which were successfully applied
from the same bundle. This is possible since OVS maintains list
of old flows which were replaced by flows from the bundle.
While reinserting old flows ovs asserts due to check on rule
state != RULE_INITIALIZED. This will work only for new flows, but
for old flow the rule state will be RULE_REMOVED. This is causing
an assert and OVS crash.
The ovs assert check should be modified to != RULE_INSERTED to prevent
any existing rule being re-inserted and allow new rules and old rules
(in case of revert) to get inserted.
Here is an example to trigger the assert:
$ ovs-vsctl add-br br-test -- set Bridge br-test datapath_type=netdev
$ cat flows.txt
flow add table=1,priority=0,in_port=2,actions=NORMAL
flow add table=1,priority=0,in_port=3,actions=NORMAL
$ ovs-ofctl dump-flows -OOpenflow13 br-test
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=2 actions=NORMAL
cookie=0x0, duration=2.465s, table=1, n_packets=0, n_bytes=0, priority=0,in_port=3 actions=NORMAL
$ cat flow-modify.txt
flow modify table=1,priority=0,in_port=2,actions=drop
flow modify table=1,priority=0,in_port=3,actions=group:10
$ ovs-ofctl bundle br-test flow-modify.txt -OOpenflow13
First flow rule will be modified since it is a valid rule. However second
rule is invalid since no group with id 10 exists. Bundle commit tries to
revert (insert) the first rule to old flow which results in ovs_assert at
ofproto_rule_insert__() since old rule->state = RULE_REMOVED.
Signed-off-by: Vishal Deep Ajmera <vishal.deep.ajmera@ericsson.com>
Signed-off-by: Ben Pfaff <blp@ovn.org> | query_tables_desc(struct ofproto *ofproto, struct ofputil_table_desc **descp)
{
struct ofputil_table_desc *table_desc;
size_t i;
table_desc = *descp = xcalloc(ofproto->n_tables, sizeof *table_desc);
for (i = 0; i < ofproto->n_tables; i++) {
struct ofputil_table_desc *td = &table_desc[i];
query_table_desc__(td, ofproto, i);
}
}
| query_tables_desc(struct ofproto *ofproto, struct ofputil_table_desc **descp)
{
struct ofputil_table_desc *table_desc;
size_t i;
table_desc = *descp = xcalloc(ofproto->n_tables, sizeof *table_desc);
for (i = 0; i < ofproto->n_tables; i++) {
struct ofputil_table_desc *td = &table_desc[i];
query_table_desc__(td, ofproto, i);
}
}
| C | ovs | 0 |
CVE-2011-3209 | https://www.cvedetails.com/cve/CVE-2011-3209/ | CWE-189 | https://github.com/torvalds/linux/commit/f8bd2258e2d520dff28c855658bd24bdafb5102d | f8bd2258e2d520dff28c855658bd24bdafb5102d | remove div_long_long_rem
x86 is the only arch right now, which provides an optimized for
div_long_long_rem and it has the downside that one has to be very careful that
the divide doesn't overflow.
The API is a little akward, as the arguments for the unsigned divide are
signed. The signed version also doesn't handle a negative divisor and
produces worse code on 64bit archs.
There is little incentive to keep this API alive, so this converts the few
users to the new API.
Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | u64 nsec_to_clock_t(u64 x)
{
#if (NSEC_PER_SEC % USER_HZ) == 0
return div_u64(x, NSEC_PER_SEC / USER_HZ);
#elif (USER_HZ % 512) == 0
return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
* overflow after 64.99 years.
* exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
*/
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
| u64 nsec_to_clock_t(u64 x)
{
#if (NSEC_PER_SEC % USER_HZ) == 0
return div_u64(x, NSEC_PER_SEC / USER_HZ);
#elif (USER_HZ % 512) == 0
return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
#else
/*
* max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
* overflow after 64.99 years.
* exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
*/
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
| C | linux | 0 |
CVE-2013-7421 | https://www.cvedetails.com/cve/CVE-2013-7421/ | CWE-264 | https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b | 5d26a105b5a73e5635eae0629b42fa0a90e07b7b | crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> | static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_shash_ctx(hash);
if (keylen != sizeof(u32)) {
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
| static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_shash_ctx(hash);
if (keylen != sizeof(u32)) {
crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
*mctx = le32_to_cpup((__le32 *)key);
return 0;
}
| C | linux | 0 |
CVE-2018-6138 | https://www.cvedetails.com/cve/CVE-2018-6138/ | CWE-20 | https://github.com/chromium/chromium/commit/0aca6bc05a263ea9eafee515fc6ba14da94c1964 | 0aca6bc05a263ea9eafee515fc6ba14da94c1964 | [Extensions] Restrict tabs.captureVisibleTab()
Modify the permissions for tabs.captureVisibleTab(). Instead of just
checking for <all_urls> and assuming its safe, do the following:
- If the page is a "normal" web page (e.g., http/https), allow the
capture if the extension has activeTab granted or <all_urls>.
- If the page is a file page (file:///), allow the capture if the
extension has file access *and* either of the <all_urls> or
activeTab permissions.
- If the page is a chrome:// page, allow the capture only if the
extension has activeTab granted.
Bug: 810220
Change-Id: I1e2f71281e2f331d641ba0e435df10d66d721304
Reviewed-on: https://chromium-review.googlesource.com/981195
Commit-Queue: Devlin <rdevlin.cronin@chromium.org>
Reviewed-by: Karan Bhatia <karandeepb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#548891} | bool PermissionsData::HasWithheldImpliedAllHosts() const {
base::AutoLock auto_lock(runtime_lock_);
return !withheld_permissions_unsafe_->explicit_hosts().is_empty() ||
!withheld_permissions_unsafe_->scriptable_hosts().is_empty();
}
| bool PermissionsData::HasWithheldImpliedAllHosts() const {
base::AutoLock auto_lock(runtime_lock_);
return !withheld_permissions_unsafe_->explicit_hosts().is_empty() ||
!withheld_permissions_unsafe_->scriptable_hosts().is_empty();
}
| C | Chrome | 0 |
CVE-2011-3956 | https://www.cvedetails.com/cve/CVE-2011-3956/ | CWE-264 | https://github.com/chromium/chromium/commit/04915c26ea193247b8a29aa24bfa34578ef5d39e | 04915c26ea193247b8a29aa24bfa34578ef5d39e | [Qt] Remove an unnecessary masking from swapBgrToRgb()
https://bugs.webkit.org/show_bug.cgi?id=103630
Reviewed by Zoltan Herczeg.
Get rid of a masking command in swapBgrToRgb() to speed up a little bit.
* platform/graphics/qt/GraphicsContext3DQt.cpp:
(WebCore::swapBgrToRgb):
git-svn-id: svn://svn.chromium.org/blink/trunk@136375 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void GraphicsContext3DPrivate::initializeANGLE()
{
ShBuiltInResources ANGLEResources;
ShInitBuiltInResources(&ANGLEResources);
m_context->getIntegerv(GraphicsContext3D::MAX_VERTEX_ATTRIBS, &ANGLEResources.MaxVertexAttribs);
m_context->getIntegerv(GraphicsContext3D::MAX_VERTEX_UNIFORM_VECTORS, &ANGLEResources.MaxVertexUniformVectors);
m_context->getIntegerv(GraphicsContext3D::MAX_VARYING_VECTORS, &ANGLEResources.MaxVaryingVectors);
m_context->getIntegerv(GraphicsContext3D::MAX_VERTEX_TEXTURE_IMAGE_UNITS, &ANGLEResources.MaxVertexTextureImageUnits);
m_context->getIntegerv(GraphicsContext3D::MAX_COMBINED_TEXTURE_IMAGE_UNITS, &ANGLEResources.MaxCombinedTextureImageUnits);
m_context->getIntegerv(GraphicsContext3D::MAX_TEXTURE_IMAGE_UNITS, &ANGLEResources.MaxTextureImageUnits);
m_context->getIntegerv(GraphicsContext3D::MAX_FRAGMENT_UNIFORM_VECTORS, &ANGLEResources.MaxFragmentUniformVectors);
ANGLEResources.MaxDrawBuffers = 1;
Extensions3D* extensions = m_context->getExtensions();
if (extensions->supports("GL_ARB_texture_rectangle"))
ANGLEResources.ARB_texture_rectangle = 1;
m_context->m_compiler.setResources(ANGLEResources);
}
| void GraphicsContext3DPrivate::initializeANGLE()
{
ShBuiltInResources ANGLEResources;
ShInitBuiltInResources(&ANGLEResources);
m_context->getIntegerv(GraphicsContext3D::MAX_VERTEX_ATTRIBS, &ANGLEResources.MaxVertexAttribs);
m_context->getIntegerv(GraphicsContext3D::MAX_VERTEX_UNIFORM_VECTORS, &ANGLEResources.MaxVertexUniformVectors);
m_context->getIntegerv(GraphicsContext3D::MAX_VARYING_VECTORS, &ANGLEResources.MaxVaryingVectors);
m_context->getIntegerv(GraphicsContext3D::MAX_VERTEX_TEXTURE_IMAGE_UNITS, &ANGLEResources.MaxVertexTextureImageUnits);
m_context->getIntegerv(GraphicsContext3D::MAX_COMBINED_TEXTURE_IMAGE_UNITS, &ANGLEResources.MaxCombinedTextureImageUnits);
m_context->getIntegerv(GraphicsContext3D::MAX_TEXTURE_IMAGE_UNITS, &ANGLEResources.MaxTextureImageUnits);
m_context->getIntegerv(GraphicsContext3D::MAX_FRAGMENT_UNIFORM_VECTORS, &ANGLEResources.MaxFragmentUniformVectors);
ANGLEResources.MaxDrawBuffers = 1;
Extensions3D* extensions = m_context->getExtensions();
if (extensions->supports("GL_ARB_texture_rectangle"))
ANGLEResources.ARB_texture_rectangle = 1;
m_context->m_compiler.setResources(ANGLEResources);
}
| C | Chrome | 0 |
CVE-2013-2871 | https://www.cvedetails.com/cve/CVE-2013-2871/ | CWE-20 | https://github.com/chromium/chromium/commit/bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9 | bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9 | Setting input.x-webkit-speech should not cause focus change
In r150866, we introduced element()->focus() in destroyShadowSubtree()
to retain focus on <input> when its type attribute gets changed.
But when x-webkit-speech attribute is changed, the element is detached
before calling destroyShadowSubtree() and element()->focus() failed
This patch moves detach() after destroyShadowSubtree() to fix the
problem.
BUG=243818
TEST=fast/forms/input-type-change-focusout.html
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/16084005
git-svn-id: svn://svn.chromium.org/blink/trunk@151444 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void BaseMultipleFieldsDateAndTimeInputType::didFocusOnControl()
{
element()->setFocus(true);
}
| void BaseMultipleFieldsDateAndTimeInputType::didFocusOnControl()
{
element()->setFocus(true);
}
| C | Chrome | 0 |
CVE-2016-9317 | https://www.cvedetails.com/cve/CVE-2016-9317/ | CWE-20 | https://github.com/libgd/libgd/commit/1846f48e5fcdde996e7c27a4bbac5d0aef183e4b | 1846f48e5fcdde996e7c27a4bbac5d0aef183e4b | Fix #340: System frozen
gdImageCreate() doesn't check for oversized images and as such is prone
to DoS vulnerabilities. We fix that by applying the same overflow check
that is already in place for gdImageCreateTrueColor().
CVE-2016-9317 | BGD_DECLARE(int) gdImagePaletteToTrueColor(gdImagePtr src)
{
unsigned int y;
unsigned int yy;
if (src == NULL) {
return 0;
}
if (src->trueColor == 1) {
return 1;
} else {
unsigned int x;
const unsigned int sy = gdImageSY(src);
const unsigned int sx = gdImageSX(src);
src->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
if (src->tpixels == NULL) {
return 0;
}
for (y = 0; y < sy; y++) {
const unsigned char *src_row = src->pixels[y];
int * dst_row;
/* no need to calloc it, we overwrite all pxl anyway */
src->tpixels[y] = (int *) gdMalloc(sx * sizeof(int));
if (src->tpixels[y] == NULL) {
goto clean_on_error;
}
dst_row = src->tpixels[y];
for (x = 0; x < sx; x++) {
const unsigned char c = *(src_row + x);
if (c == src->transparent) {
*(dst_row + x) = gdTrueColorAlpha(0, 0, 0, 127);
} else {
*(dst_row + x) = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
}
}
}
/* free old palette buffer (y is sy) */
for (yy = 0; yy < y; yy++) {
gdFree(src->pixels[yy]);
}
gdFree(src->pixels);
src->trueColor = 1;
src->pixels = NULL;
src->alphaBlendingFlag = 0;
src->saveAlphaFlag = 1;
if (src->transparent >= 0) {
const unsigned char c = src->transparent;
src->transparent = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
return 1;
clean_on_error:
/* free new true color buffer (y is not allocated, have failed) */
for (yy = 0; yy < y; yy++) {
gdFree(src->tpixels[yy]);
}
gdFree(src->tpixels);
return 0;
}
| BGD_DECLARE(int) gdImagePaletteToTrueColor(gdImagePtr src)
{
unsigned int y;
unsigned int yy;
if (src == NULL) {
return 0;
}
if (src->trueColor == 1) {
return 1;
} else {
unsigned int x;
const unsigned int sy = gdImageSY(src);
const unsigned int sx = gdImageSX(src);
src->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
if (src->tpixels == NULL) {
return 0;
}
for (y = 0; y < sy; y++) {
const unsigned char *src_row = src->pixels[y];
int * dst_row;
/* no need to calloc it, we overwrite all pxl anyway */
src->tpixels[y] = (int *) gdMalloc(sx * sizeof(int));
if (src->tpixels[y] == NULL) {
goto clean_on_error;
}
dst_row = src->tpixels[y];
for (x = 0; x < sx; x++) {
const unsigned char c = *(src_row + x);
if (c == src->transparent) {
*(dst_row + x) = gdTrueColorAlpha(0, 0, 0, 127);
} else {
*(dst_row + x) = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
}
}
}
/* free old palette buffer (y is sy) */
for (yy = 0; yy < y; yy++) {
gdFree(src->pixels[yy]);
}
gdFree(src->pixels);
src->trueColor = 1;
src->pixels = NULL;
src->alphaBlendingFlag = 0;
src->saveAlphaFlag = 1;
if (src->transparent >= 0) {
const unsigned char c = src->transparent;
src->transparent = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
return 1;
clean_on_error:
/* free new true color buffer (y is not allocated, have failed) */
for (yy = 0; yy < y; yy++) {
gdFree(src->tpixels[yy]);
}
gdFree(src->tpixels);
return 0;
}
| C | libgd | 0 |
CVE-2019-14459 | https://www.cvedetails.com/cve/CVE-2019-14459/ | CWE-190 | https://github.com/phaag/nfdump/commit/3b006ededaf351f1723aea6c727c9edd1b1fff9b | 3b006ededaf351f1723aea6c727c9edd1b1fff9b | Fix potential unsigned integer underflow | static void Process_ipfix_option_templates(exporter_ipfix_domain_t *exporter, void *option_template_flowset, FlowSource_t *fs) {
uint8_t *DataPtr;
uint32_t size_left, size_required, i;
uint16_t id, field_count, scope_field_count, offset;
uint16_t offset_std_sampler_interval, offset_std_sampler_algorithm, found_std_sampling;
i = 0; // keep compiler happy
size_left = GET_FLOWSET_LENGTH(option_template_flowset) - 4; // -4 for flowset header -> id and length
if ( size_left < 6 ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too small for an options template",
exporter->info.id, size_left);
return;
}
DataPtr = option_template_flowset + 4;
id = GET_OPTION_TEMPLATE_ID(DataPtr);
field_count = GET_OPTION_TEMPLATE_FIELD_COUNT(DataPtr);
scope_field_count = GET_OPTION_TEMPLATE_SCOPE_FIELD_COUNT(DataPtr);
DataPtr += 6;
size_left -= 6;
dbg_printf("Decode Option Template. id: %u, field count: %u, scope field count: %u\n",
id, field_count, scope_field_count);
if ( scope_field_count == 0 ) {
LogError("Process_ipfx: [%u] scope field count error: length must not be zero",
exporter->info.id);
dbg_printf("scope field count error: length must not be zero\n");
return;
}
size_required = 2 * field_count * sizeof(uint16_t);
dbg_printf("Size left: %u, size required: %u\n", size_left, size_required);
if ( size_left < size_required ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too small for %u scopes length and %u options length",
exporter->info.id, size_left, field_count, scope_field_count);
dbg_printf("option template length error: size left %u too small for field_count %u\n",
size_left, field_count);
return;
}
if ( scope_field_count == 0 ) {
LogError("Process_ipfxi: [%u] scope field count error: length must not be zero",
exporter->info.id);
return;
}
offset_std_sampler_interval = 0;
offset_std_sampler_algorithm = 0;
found_std_sampling = 0;
offset = 0;
for ( i=0; i<scope_field_count; i++ ) {
uint16_t id, length;
int Enterprise;
if ( size_left && size_left < 4 ) {
LogError("Process_ipfix [%u] Template size error at %s line %u" ,
exporter->info.id, __FILE__, __LINE__, strerror (errno));
return;
}
id = Get_val16(DataPtr); DataPtr += 2;
length = Get_val16(DataPtr); DataPtr += 2;
size_left -= 4;
Enterprise = id & 0x8000 ? 1 : 0;
if ( Enterprise ) {
size_required += 4;
if ( size_left < 4 ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too small",
exporter->info.id, size_left);
dbg_printf("option template length error: size left %u too small\n", size_left);
return;
}
DataPtr += 4;
size_left -= 4;
dbg_printf(" [%i] Enterprise: 1, scope id: %u, scope length %u enterprise value: %u\n",
i, id, length, Get_val32(DataPtr));
} else {
dbg_printf(" [%i] Enterprise: 0, scope id: %u, scope length %u\n", i, id, length);
}
offset += length;
}
for ( ;i<field_count; i++ ) {
uint32_t enterprise_value;
uint16_t id, length;
int Enterprise;
UNUSED(enterprise_value);
id = Get_val16(DataPtr); DataPtr += 2;
length = Get_val16(DataPtr); DataPtr += 2;
size_left -= 4;
Enterprise = id & 0x8000 ? 1 : 0;
if ( Enterprise ) {
size_required += 4;
if ( size_left < 4 ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too",
exporter->info.id, size_left);
dbg_printf("option template length error: size left %u too small\n", size_left);
return;
}
enterprise_value = Get_val32(DataPtr);
DataPtr += 4;
size_left -= 4;
dbg_printf(" [%i] Enterprise: 1, option id: %u, option length %u enterprise value: %u\n",
i, id, length, enterprise_value);
} else {
dbg_printf(" [%i] Enterprise: 0, option id: %u, option length %u\n", i, id, length);
}
switch (id) {
case IPFIX_samplingInterval: // legacy #34
case IPFIX_samplingPacketInterval: // #305
if ( length == 4 ) {
offset_std_sampler_interval = offset;
found_std_sampling++;
dbg_printf(" 4 byte sampling interval option at offset: %u\n", offset);
} else {
LogError("Process_ipfix: [%u] option template error: sampling option lenth != 4 bytes: %u",
exporter->info.id, length);
}
break;
case IPFIX_samplingAlgorithm: // legacy #35
case IPFIX_selectorAlgorithm: // #304
if ( length == 1 ) {
offset_std_sampler_algorithm = offset;
dbg_printf(" 1 byte sampling algorithm option at offset: %u\n", offset);
found_std_sampling++;
} else {
LogError("Process_ipfix: [%u] option template error: algorithm option lenth != 1 byte: %u",
exporter->info.id, length);
}
break;
}
offset += length;
}
if ( offset_std_sampler_interval ) {
dbg_printf("[%u] Std sampling interval found. offset: %u\n",
exporter->info.id, offset_std_sampler_interval);
if ( offset_std_sampler_algorithm )
dbg_printf("[%u] Std sampling algorithm found. offset: %u\n",
exporter->info.id, offset_std_sampler_algorithm);
InsertStdSamplerOffset(fs, id, offset_std_sampler_interval, offset_std_sampler_algorithm);
dbg_printf("\n");
}
processed_records++;
} // End of Process_ipfix_option_templates
| static void Process_ipfix_option_templates(exporter_ipfix_domain_t *exporter, void *option_template_flowset, FlowSource_t *fs) {
uint8_t *DataPtr;
uint32_t size_left, size_required, i;
uint16_t id, field_count, scope_field_count, offset;
uint16_t offset_std_sampler_interval, offset_std_sampler_algorithm, found_std_sampling;
i = 0; // keep compiler happy
size_left = GET_FLOWSET_LENGTH(option_template_flowset) - 4; // -4 for flowset header -> id and length
if ( size_left < 6 ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too small for an options template",
exporter->info.id, size_left);
return;
}
DataPtr = option_template_flowset + 4;
id = GET_OPTION_TEMPLATE_ID(DataPtr);
field_count = GET_OPTION_TEMPLATE_FIELD_COUNT(DataPtr);
scope_field_count = GET_OPTION_TEMPLATE_SCOPE_FIELD_COUNT(DataPtr);
DataPtr += 6;
size_left -= 6;
dbg_printf("Decode Option Template. id: %u, field count: %u, scope field count: %u\n",
id, field_count, scope_field_count);
if ( scope_field_count == 0 ) {
LogError("Process_ipfx: [%u] scope field count error: length must not be zero",
exporter->info.id);
dbg_printf("scope field count error: length must not be zero\n");
return;
}
size_required = 2 * field_count * sizeof(uint16_t);
dbg_printf("Size left: %u, size required: %u\n", size_left, size_required);
if ( size_left < size_required ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too small for %u scopes length and %u options length",
exporter->info.id, size_left, field_count, scope_field_count);
dbg_printf("option template length error: size left %u too small for field_count %u\n",
size_left, field_count);
return;
}
if ( scope_field_count == 0 ) {
LogError("Process_ipfxi: [%u] scope field count error: length must not be zero",
exporter->info.id);
return;
}
offset_std_sampler_interval = 0;
offset_std_sampler_algorithm = 0;
found_std_sampling = 0;
offset = 0;
for ( i=0; i<scope_field_count; i++ ) {
uint16_t id, length;
int Enterprise;
if ( size_left && size_left < 4 ) {
LogError("Process_ipfix [%u] Template size error at %s line %u" ,
exporter->info.id, __FILE__, __LINE__, strerror (errno));
return;
}
id = Get_val16(DataPtr); DataPtr += 2;
length = Get_val16(DataPtr); DataPtr += 2;
size_left -= 4;
Enterprise = id & 0x8000 ? 1 : 0;
if ( Enterprise ) {
size_required += 4;
if ( size_left < 4 ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too small",
exporter->info.id, size_left);
dbg_printf("option template length error: size left %u too small\n", size_left);
return;
}
DataPtr += 4;
size_left -= 4;
dbg_printf(" [%i] Enterprise: 1, scope id: %u, scope length %u enterprise value: %u\n",
i, id, length, Get_val32(DataPtr));
} else {
dbg_printf(" [%i] Enterprise: 0, scope id: %u, scope length %u\n", i, id, length);
}
offset += length;
}
for ( ;i<field_count; i++ ) {
uint32_t enterprise_value;
uint16_t id, length;
int Enterprise;
UNUSED(enterprise_value);
id = Get_val16(DataPtr); DataPtr += 2;
length = Get_val16(DataPtr); DataPtr += 2;
size_left -= 4;
Enterprise = id & 0x8000 ? 1 : 0;
if ( Enterprise ) {
size_required += 4;
if ( size_left < 4 ) {
LogError("Process_ipfix: [%u] option template length error: size left %u too",
exporter->info.id, size_left);
dbg_printf("option template length error: size left %u too small\n", size_left);
return;
}
enterprise_value = Get_val32(DataPtr);
DataPtr += 4;
size_left -= 4;
dbg_printf(" [%i] Enterprise: 1, option id: %u, option length %u enterprise value: %u\n",
i, id, length, enterprise_value);
} else {
dbg_printf(" [%i] Enterprise: 0, option id: %u, option length %u\n", i, id, length);
}
switch (id) {
case IPFIX_samplingInterval: // legacy #34
case IPFIX_samplingPacketInterval: // #305
if ( length == 4 ) {
offset_std_sampler_interval = offset;
found_std_sampling++;
dbg_printf(" 4 byte sampling interval option at offset: %u\n", offset);
} else {
LogError("Process_ipfix: [%u] option template error: sampling option lenth != 4 bytes: %u",
exporter->info.id, length);
}
break;
case IPFIX_samplingAlgorithm: // legacy #35
case IPFIX_selectorAlgorithm: // #304
if ( length == 1 ) {
offset_std_sampler_algorithm = offset;
dbg_printf(" 1 byte sampling algorithm option at offset: %u\n", offset);
found_std_sampling++;
} else {
LogError("Process_ipfix: [%u] option template error: algorithm option lenth != 1 byte: %u",
exporter->info.id, length);
}
break;
}
offset += length;
}
if ( offset_std_sampler_interval ) {
dbg_printf("[%u] Std sampling interval found. offset: %u\n",
exporter->info.id, offset_std_sampler_interval);
if ( offset_std_sampler_algorithm )
dbg_printf("[%u] Std sampling algorithm found. offset: %u\n",
exporter->info.id, offset_std_sampler_algorithm);
InsertStdSamplerOffset(fs, id, offset_std_sampler_interval, offset_std_sampler_algorithm);
dbg_printf("\n");
}
processed_records++;
} // End of Process_ipfix_option_templates
| C | nfdump | 0 |
CVE-2017-2633 | https://www.cvedetails.com/cve/CVE-2017-2633/ | CWE-125 | https://git.qemu.org/?p=qemu.git;a=commitdiff;h=9f64916da20eea67121d544698676295bbb105a7 | 9f64916da20eea67121d544698676295bbb105a7 | null | int vnc_zywrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h)
{
vs->zrle.type = VNC_ENCODING_ZYWRLE;
return zrle_send_framebuffer_update(vs, x, y, w, h);
}
| int vnc_zywrle_send_framebuffer_update(VncState *vs, int x, int y, int w, int h)
{
vs->zrle.type = VNC_ENCODING_ZYWRLE;
return zrle_send_framebuffer_update(vs, x, y, w, h);
}
| C | qemu | 0 |
CVE-2017-11664 | https://www.cvedetails.com/cve/CVE-2017-11664/ | CWE-125 | https://github.com/Mindwerks/wildmidi/commit/660b513d99bced8783a4a5984ac2f742c74ebbdd | 660b513d99bced8783a4a5984ac2f742c74ebbdd | Add a new size parameter to _WM_SetupMidiEvent() so that it knows
where to stop reading, and adjust its users properly. Fixes bug #175
(CVE-2017-11661, CVE-2017-11662, CVE-2017-11663, CVE-2017-11664.) | static int midi_setup_marker(struct _mdi *mdi, char * text) {
MIDI_EVENT_SDEBUG(__FUNCTION__,0, text);
strip_text(text);
_WM_CheckEventMemoryPool(mdi);
mdi->events[mdi->event_count].do_event = *_WM_do_meta_marker;
mdi->events[mdi->event_count].event_data.channel = 0;
mdi->events[mdi->event_count].event_data.data.string = text;
mdi->events[mdi->event_count].samples_to_next = 0;
mdi->event_count++;
return (0);
}
| static int midi_setup_marker(struct _mdi *mdi, char * text) {
MIDI_EVENT_SDEBUG(__FUNCTION__,0, text);
strip_text(text);
_WM_CheckEventMemoryPool(mdi);
mdi->events[mdi->event_count].do_event = *_WM_do_meta_marker;
mdi->events[mdi->event_count].event_data.channel = 0;
mdi->events[mdi->event_count].event_data.data.string = text;
mdi->events[mdi->event_count].samples_to_next = 0;
mdi->event_count++;
return (0);
}
| C | wildmidi | 0 |
CVE-2018-17407 | https://www.cvedetails.com/cve/CVE-2018-17407/ | CWE-119 | https://github.com/TeX-Live/texlive-source/commit/6ed0077520e2b0da1fd060c7f88db7b2e6068e4c | 6ed0077520e2b0da1fd060c7f88db7b2e6068e4c | writet1 protection against buffer overflow
git-svn-id: svn://tug.org/texlive/trunk/Build/source@48697 c570f23f-e606-0410-a88d-b1316a301751 | static int t1_getbyte(void)
{
int c = t1_getchar();
if (t1_pfa)
return c;
if (t1_block_length == 0) {
if (c != 128)
pdftex_fail("invalid marker");
c = t1_getchar();
if (c == 3) {
while (!t1_eof())
t1_getchar();
return EOF;
}
t1_block_length = t1_getchar() & 0xff;
t1_block_length |= (t1_getchar() & 0xff) << 8;
t1_block_length |= (t1_getchar() & 0xff) << 16;
t1_block_length |= (t1_getchar() & 0xff) << 24;
c = t1_getchar();
}
t1_block_length--;
return c;
}
| static int t1_getbyte(void)
{
int c = t1_getchar();
if (t1_pfa)
return c;
if (t1_block_length == 0) {
if (c != 128)
pdftex_fail("invalid marker");
c = t1_getchar();
if (c == 3) {
while (!t1_eof())
t1_getchar();
return EOF;
}
t1_block_length = t1_getchar() & 0xff;
t1_block_length |= (t1_getchar() & 0xff) << 8;
t1_block_length |= (t1_getchar() & 0xff) << 16;
t1_block_length |= (t1_getchar() & 0xff) << 24;
c = t1_getchar();
}
t1_block_length--;
return c;
}
| C | texlive-source | 0 |
CVE-2014-1690 | https://www.cvedetails.com/cve/CVE-2014-1690/ | CWE-119 | https://github.com/torvalds/linux/commit/2690d97ade05c5325cbf7c72b94b90d265659886 | 2690d97ade05c5325cbf7c72b94b90d265659886 | netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper
Commit 5901b6be885e attempted to introduce IPv6 support into
IRC NAT helper. By doing so, the following code seemed to be removed
by accident:
ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip);
sprintf(buffer, "%u %u", ip, port);
pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &ip, port);
This leads to the fact that buffer[] was left uninitialized and
contained some stack value. When we call nf_nat_mangle_tcp_packet(),
we call strlen(buffer) on excatly this uninitialized buffer. If we
are unlucky and the skb has enough tailroom, we overwrite resp. leak
contents with values that sit on our stack into the packet and send
that out to the receiver.
Since the rather informal DCC spec [1] does not seem to specify
IPv6 support right now, we log such occurences so that admins can
act accordingly, and drop the packet. I've looked into XChat source,
and IPv6 is not supported there: addresses are in u32 and print
via %u format string.
Therefore, restore old behaviour as in IPv4, use snprintf(). The
IRC helper does not support IPv6 by now. By this, we can safely use
strlen(buffer) in nf_nat_mangle_tcp_packet() and prevent a buffer
overflow. Also simplify some code as we now have ct variable anyway.
[1] http://www.irchelp.org/irchelp/rfc/ctcpspec.html
Fixes: 5901b6be885e ("netfilter: nf_nat: support IPv6 in IRC NAT helper")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: Harald Welte <laforge@gnumonks.org>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> | static int __init nf_nat_irc_init(void)
{
BUG_ON(nf_nat_irc_hook != NULL);
RCU_INIT_POINTER(nf_nat_irc_hook, help);
return 0;
}
| static int __init nf_nat_irc_init(void)
{
BUG_ON(nf_nat_irc_hook != NULL);
RCU_INIT_POINTER(nf_nat_irc_hook, help);
return 0;
}
| C | linux | 0 |
CVE-2018-17470 | https://www.cvedetails.com/cve/CVE-2018-17470/ | CWE-119 | https://github.com/chromium/chromium/commit/385508dc888ef15d272cdd2705b17996abc519d6 | 385508dc888ef15d272cdd2705b17996abc519d6 | Implement immutable texture base/max level clamping
It seems some drivers fail to handle that gracefully, so let's always clamp
to be on the safe side.
BUG=877874
TEST=test case in the bug, gpu_unittests
R=kbr@chromium.org
Cq-Include-Trybots: luci.chromium.try:android_optional_gpu_tests_rel;luci.chromium.try:linux_optional_gpu_tests_rel;luci.chromium.try:mac_optional_gpu_tests_rel;luci.chromium.try:win_optional_gpu_tests_rel
Change-Id: I6d93cb9389ea70525df4604112223604577582a2
Reviewed-on: https://chromium-review.googlesource.com/1194994
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Commit-Queue: Zhenyao Mo <zmo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#587264} | void BackTexture::Copy() {
DCHECK_NE(id(), 0u);
ScopedGLErrorSuppressor suppressor("BackTexture::Copy",
decoder_->state_.GetErrorState());
ScopedTextureBinder binder(&decoder_->state_, id(), Target());
api()->glCopyTexSubImage2DFn(Target(),
0, // level
0, 0, 0, 0, size_.width(), size_.height());
}
| void BackTexture::Copy() {
DCHECK_NE(id(), 0u);
ScopedGLErrorSuppressor suppressor("BackTexture::Copy",
decoder_->state_.GetErrorState());
ScopedTextureBinder binder(&decoder_->state_, id(), Target());
api()->glCopyTexSubImage2DFn(Target(),
0, // level
0, 0, 0, 0, size_.width(), size_.height());
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b | 3a353ebdb7753a3fbeb401c4c0e0f3358ccbb90b | Support pausing media when a context is frozen.
Media is resumed when the context is unpaused. This feature will be used
for bfcache and pausing iframes feature policy.
BUG=907125
Change-Id: Ic3925ea1a4544242b7bf0b9ad8c9cb9f63976bbd
Reviewed-on: https://chromium-review.googlesource.com/c/1410126
Commit-Queue: Dave Tapuska <dtapuska@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Mounir Lamouri <mlamouri@chromium.org>
Cr-Commit-Position: refs/heads/master@{#623319} | void HTMLMediaElement::NetworkStateChanged() {
SetNetworkState(GetWebMediaPlayer()->GetNetworkState());
}
| void HTMLMediaElement::NetworkStateChanged() {
SetNetworkState(GetWebMediaPlayer()->GetNetworkState());
}
| C | Chrome | 0 |
CVE-2014-1738 | https://www.cvedetails.com/cve/CVE-2014-1738/ | CWE-264 | https://github.com/torvalds/linux/commit/2145e15e0557a01b9195d1c7199a1b92cb9be81f | 2145e15e0557a01b9195d1c7199a1b92cb9be81f | floppy: don't write kernel-only members to FDRAWCMD ioctl output
Do not leak kernel-only floppy_raw_cmd structure members to userspace.
This includes the linked-list pointer and the pointer to the allocated
DMA space.
Signed-off-by: Matthew Daley <mattd@bugfuzz.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static int user_reset_fdc(int drive, int arg, bool interruptible)
{
int ret;
if (lock_fdc(drive, interruptible))
return -EINTR;
if (arg == FD_RESET_ALWAYS)
FDCS->reset = 1;
if (FDCS->reset) {
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
return -EINTR;
}
process_fd_request();
return 0;
}
| static int user_reset_fdc(int drive, int arg, bool interruptible)
{
int ret;
if (lock_fdc(drive, interruptible))
return -EINTR;
if (arg == FD_RESET_ALWAYS)
FDCS->reset = 1;
if (FDCS->reset) {
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
return -EINTR;
}
process_fd_request();
return 0;
}
| C | linux | 0 |
CVE-2011-2517 | https://www.cvedetails.com/cve/CVE-2011-2517/ | CWE-119 | https://github.com/torvalds/linux/commit/208c72f4fe44fe09577e7975ba0e7fa0278f3d03 | 208c72f4fe44fe09577e7975ba0e7fa0278f3d03 | nl80211: fix check for valid SSID size in scan operations
In both trigger_scan and sched_scan operations, we were checking for
the SSID length before assigning the value correctly. Since the
memory was just kzalloc'ed, the check was always failing and SSID with
over 32 characters were allowed to go through.
This was causing a buffer overflow when copying the actual SSID to the
proper place.
This bug has been there since 2.6.29-rc4.
Cc: stable@kernel.org
Signed-off-by: Luciano Coelho <coelho@ti.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com> | void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u16 reason,
const u8 *ie, size_t ie_len, bool from_ap)
{
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return;
hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT);
if (!hdr) {
nlmsg_free(msg);
return;
}
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
if (from_ap && reason)
NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason);
if (from_ap)
NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP);
if (ie)
NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
if (genlmsg_end(msg, hdr) < 0) {
nlmsg_free(msg);
return;
}
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, GFP_KERNEL);
return;
nla_put_failure:
genlmsg_cancel(msg, hdr);
nlmsg_free(msg);
}
| void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u16 reason,
const u8 *ie, size_t ie_len, bool from_ap)
{
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg)
return;
hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT);
if (!hdr) {
nlmsg_free(msg);
return;
}
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
if (from_ap && reason)
NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason);
if (from_ap)
NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP);
if (ie)
NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
if (genlmsg_end(msg, hdr) < 0) {
nlmsg_free(msg);
return;
}
genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
nl80211_mlme_mcgrp.id, GFP_KERNEL);
return;
nla_put_failure:
genlmsg_cancel(msg, hdr);
nlmsg_free(msg);
}
| C | linux | 0 |
CVE-2017-13053 | https://www.cvedetails.com/cve/CVE-2017-13053/ | CWE-125 | https://github.com/the-tcpdump-group/tcpdump/commit/bd4e697ebd6c8457efa8f28f6831fc929b88a014 | bd4e697ebd6c8457efa8f28f6831fc929b88a014 | CVE-2017-13053/BGP: fix VPN route target bounds checks
decode_rt_routing_info() didn't check bounds before fetching 4 octets of
the origin AS field and could over-read the input buffer, put it right.
It also fetched the varying number of octets of the route target field
from 4 octets lower than the correct offset, put it right.
It also used the same temporary buffer explicitly through as_printf()
and implicitly through bgp_vpn_rd_print() so the end result of snprintf()
was not what was originally intended.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s). | bgp_open_print(netdissect_options *ndo,
const u_char *dat, int length)
{
struct bgp_open bgpo;
struct bgp_opt bgpopt;
const u_char *opt;
int i;
ND_TCHECK2(dat[0], BGP_OPEN_SIZE);
memcpy(&bgpo, dat, BGP_OPEN_SIZE);
ND_PRINT((ndo, "\n\t Version %d, ", bgpo.bgpo_version));
ND_PRINT((ndo, "my AS %s, ",
as_printf(ndo, astostr, sizeof(astostr), ntohs(bgpo.bgpo_myas))));
ND_PRINT((ndo, "Holdtime %us, ", ntohs(bgpo.bgpo_holdtime)));
ND_PRINT((ndo, "ID %s", ipaddr_string(ndo, &bgpo.bgpo_id)));
ND_PRINT((ndo, "\n\t Optional parameters, length: %u", bgpo.bgpo_optlen));
/* some little sanity checking */
if (length < bgpo.bgpo_optlen+BGP_OPEN_SIZE)
return;
/* ugly! */
opt = &((const struct bgp_open *)dat)->bgpo_optlen;
opt++;
i = 0;
while (i < bgpo.bgpo_optlen) {
ND_TCHECK2(opt[i], BGP_OPT_SIZE);
memcpy(&bgpopt, &opt[i], BGP_OPT_SIZE);
if (i + 2 + bgpopt.bgpopt_len > bgpo.bgpo_optlen) {
ND_PRINT((ndo, "\n\t Option %d, length: %u", bgpopt.bgpopt_type, bgpopt.bgpopt_len));
break;
}
ND_PRINT((ndo, "\n\t Option %s (%u), length: %u",
tok2str(bgp_opt_values,"Unknown",
bgpopt.bgpopt_type),
bgpopt.bgpopt_type,
bgpopt.bgpopt_len));
/* now let's decode the options we know*/
switch(bgpopt.bgpopt_type) {
case BGP_OPT_CAP:
bgp_capabilities_print(ndo, &opt[i+BGP_OPT_SIZE],
bgpopt.bgpopt_len);
break;
case BGP_OPT_AUTH:
default:
ND_PRINT((ndo, "\n\t no decoder for option %u",
bgpopt.bgpopt_type));
break;
}
i += BGP_OPT_SIZE + bgpopt.bgpopt_len;
}
return;
trunc:
ND_PRINT((ndo, "[|BGP]"));
}
| bgp_open_print(netdissect_options *ndo,
const u_char *dat, int length)
{
struct bgp_open bgpo;
struct bgp_opt bgpopt;
const u_char *opt;
int i;
ND_TCHECK2(dat[0], BGP_OPEN_SIZE);
memcpy(&bgpo, dat, BGP_OPEN_SIZE);
ND_PRINT((ndo, "\n\t Version %d, ", bgpo.bgpo_version));
ND_PRINT((ndo, "my AS %s, ",
as_printf(ndo, astostr, sizeof(astostr), ntohs(bgpo.bgpo_myas))));
ND_PRINT((ndo, "Holdtime %us, ", ntohs(bgpo.bgpo_holdtime)));
ND_PRINT((ndo, "ID %s", ipaddr_string(ndo, &bgpo.bgpo_id)));
ND_PRINT((ndo, "\n\t Optional parameters, length: %u", bgpo.bgpo_optlen));
/* some little sanity checking */
if (length < bgpo.bgpo_optlen+BGP_OPEN_SIZE)
return;
/* ugly! */
opt = &((const struct bgp_open *)dat)->bgpo_optlen;
opt++;
i = 0;
while (i < bgpo.bgpo_optlen) {
ND_TCHECK2(opt[i], BGP_OPT_SIZE);
memcpy(&bgpopt, &opt[i], BGP_OPT_SIZE);
if (i + 2 + bgpopt.bgpopt_len > bgpo.bgpo_optlen) {
ND_PRINT((ndo, "\n\t Option %d, length: %u", bgpopt.bgpopt_type, bgpopt.bgpopt_len));
break;
}
ND_PRINT((ndo, "\n\t Option %s (%u), length: %u",
tok2str(bgp_opt_values,"Unknown",
bgpopt.bgpopt_type),
bgpopt.bgpopt_type,
bgpopt.bgpopt_len));
/* now let's decode the options we know*/
switch(bgpopt.bgpopt_type) {
case BGP_OPT_CAP:
bgp_capabilities_print(ndo, &opt[i+BGP_OPT_SIZE],
bgpopt.bgpopt_len);
break;
case BGP_OPT_AUTH:
default:
ND_PRINT((ndo, "\n\t no decoder for option %u",
bgpopt.bgpopt_type));
break;
}
i += BGP_OPT_SIZE + bgpopt.bgpopt_len;
}
return;
trunc:
ND_PRINT((ndo, "[|BGP]"));
}
| C | tcpdump | 0 |
CVE-2019-11599 | https://www.cvedetails.com/cve/CVE-2019-11599/ | CWE-362 | https://github.com/torvalds/linux/commit/04f5866e41fb70690e28397487d8bd8eea7d712a | 04f5866e41fb70690e28397487d8bd8eea7d712a | coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/alpine.LSU.2.11.1707191716030.2055@eggly.anvils
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/20190325224949.11068-1-aarcange@redhat.com
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Jann Horn <jannh@google.com>
Acked-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
/*
* The soft-dirty tracker uses #PF-s to catch writes
* to pages, so write-protect the pte as well. See the
* Documentation/admin-guide/mm/soft-dirty.rst for full description
* of how soft-dirty works.
*/
pte_t ptent = *pte;
if (pte_present(ptent)) {
pte_t old_pte;
old_pte = ptep_modify_prot_start(vma, addr, pte);
ptent = pte_wrprotect(old_pte);
ptent = pte_clear_soft_dirty(ptent);
ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent);
set_pte_at(vma->vm_mm, addr, pte, ptent);
}
}
| static inline void clear_soft_dirty(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
{
/*
* The soft-dirty tracker uses #PF-s to catch writes
* to pages, so write-protect the pte as well. See the
* Documentation/admin-guide/mm/soft-dirty.rst for full description
* of how soft-dirty works.
*/
pte_t ptent = *pte;
if (pte_present(ptent)) {
pte_t old_pte;
old_pte = ptep_modify_prot_start(vma, addr, pte);
ptent = pte_wrprotect(old_pte);
ptent = pte_clear_soft_dirty(ptent);
ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent);
set_pte_at(vma->vm_mm, addr, pte, ptent);
}
}
| C | linux | 0 |
CVE-2018-16513 | https://www.cvedetails.com/cve/CVE-2018-16513/ | CWE-704 | http://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=b326a71659b7837d3acde954b18bda1a6f5e9498 | b326a71659b7837d3acde954b18bda1a6f5e9498 | null | static int threecomponent(i_ctx_t * i_ctx_p, ref *space, int *n)
{
*n = 3;
return 0;
}
| static int threecomponent(i_ctx_t * i_ctx_p, ref *space, int *n)
{
*n = 3;
return 0;
}
| C | ghostscript | 0 |
CVE-2016-10746 | https://www.cvedetails.com/cve/CVE-2016-10746/ | CWE-254 | https://github.com/libvirt/libvirt/commit/506e9d6c2d4baaf580d489fff0690c0ff2ff588f | 506e9d6c2d4baaf580d489fff0690c0ff2ff588f | virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> | virDomainIsPersistent(virDomainPtr dom)
{
VIR_DOMAIN_DEBUG(dom);
virResetLastError();
virCheckDomainReturn(dom, -1);
if (dom->conn->driver->domainIsPersistent) {
int ret;
ret = dom->conn->driver->domainIsPersistent(dom);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
}
| virDomainIsPersistent(virDomainPtr dom)
{
VIR_DOMAIN_DEBUG(dom);
virResetLastError();
virCheckDomainReturn(dom, -1);
if (dom->conn->driver->domainIsPersistent) {
int ret;
ret = dom->conn->driver->domainIsPersistent(dom);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
}
| C | libvirt | 0 |
CVE-2016-5842 | https://www.cvedetails.com/cve/CVE-2016-5842/ | CWE-125 | https://github.com/ImageMagick/ImageMagick/commit/d8ab7f046587f2e9f734b687ba7e6e10147c294b | d8ab7f046587f2e9f734b687ba7e6e10147c294b | Improve checking of EXIF profile to prevent integer overflow (bug report from Ibrahim el-sayed) | MagickExport char *RemoveImageProperty(Image *image,
const char *property)
{
char
*value;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image->filename);
if (image->properties == (void *) NULL)
return((char *) NULL);
value=(char *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->properties,
property);
return(value);
}
| MagickExport char *RemoveImageProperty(Image *image,
const char *property)
{
char
*value;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image->filename);
if (image->properties == (void *) NULL)
return((char *) NULL);
value=(char *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->properties,
property);
return(value);
}
| C | ImageMagick | 0 |
CVE-2017-13006 | https://www.cvedetails.com/cve/CVE-2017-13006/ | CWE-125 | https://github.com/the-tcpdump-group/tcpdump/commit/cc4a7391c616be7a64ed65742ef9ed3f106eb165 | cc4a7391c616be7a64ed65742ef9ed3f106eb165 | CVE-2017-13006/L2TP: Check whether an AVP's content exceeds the AVP length.
It's not good enough to check whether all the data specified by the AVP
length was captured - you also have to check whether that length is
large enough for all the required data in the AVP.
This fixes a buffer over-read discovered by Yannick Formaggio.
Add a test using the capture file supplied by the reporter(s). | l2tp_accm_print(netdissect_options *ndo, const u_char *dat)
l2tp_accm_print(netdissect_options *ndo, const u_char *dat, u_int length)
{
const uint16_t *ptr = (const uint16_t *)dat;
uint16_t val_h, val_l;
if (length < 2) {
ND_PRINT((ndo, "AVP too short"));
return;
}
ptr++; /* skip "Reserved" */
length -= 2;
if (length < 4) {
ND_PRINT((ndo, "AVP too short"));
return;
}
val_h = EXTRACT_16BITS(ptr); ptr++; length -= 2;
val_l = EXTRACT_16BITS(ptr); ptr++; length -= 2;
ND_PRINT((ndo, "send=%08x ", (val_h<<16) + val_l));
if (length < 4) {
ND_PRINT((ndo, "AVP too short"));
return;
}
val_h = EXTRACT_16BITS(ptr); ptr++;
val_l = EXTRACT_16BITS(ptr); ptr++;
ND_PRINT((ndo, "recv=%08x ", (val_h<<16) + val_l));
}
| l2tp_accm_print(netdissect_options *ndo, const u_char *dat)
{
const uint16_t *ptr = (const uint16_t *)dat;
uint16_t val_h, val_l;
ptr++; /* skip "Reserved" */
val_h = EXTRACT_16BITS(ptr); ptr++;
val_l = EXTRACT_16BITS(ptr); ptr++;
ND_PRINT((ndo, "send=%08x ", (val_h<<16) + val_l));
val_h = EXTRACT_16BITS(ptr); ptr++;
val_l = EXTRACT_16BITS(ptr); ptr++;
ND_PRINT((ndo, "recv=%08x ", (val_h<<16) + val_l));
}
| C | tcpdump | 1 |
CVE-2013-2921 | https://www.cvedetails.com/cve/CVE-2013-2921/ | CWE-399 | https://github.com/chromium/chromium/commit/1228817ab04a14df53b5a8446085f9c03bf6e964 | 1228817ab04a14df53b5a8446085f9c03bf6e964 | repairs CopyFromCompositingSurface in HighDPI
This CL removes the DIP=>Pixel transform in
DelegatedFrameHost::CopyFromCompositingSurface(), because said
transformation seems to be happening later in the copy logic
and is currently being applied twice.
BUG=397708
Review URL: https://codereview.chromium.org/421293002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@286414 0039d316-1c4b-4281-b951-d872f2087c98 | void DelegatedFrameHost::OnLostResources() {
RenderWidgetHostImpl* host = client_->GetHost();
if (frame_provider_.get() || !surface_id_.is_null())
EvictDelegatedFrame();
idle_frame_subscriber_textures_.clear();
yuv_readback_pipeline_.reset();
host->ScheduleComposite();
}
| void DelegatedFrameHost::OnLostResources() {
RenderWidgetHostImpl* host = client_->GetHost();
if (frame_provider_.get() || !surface_id_.is_null())
EvictDelegatedFrame();
idle_frame_subscriber_textures_.clear();
yuv_readback_pipeline_.reset();
host->ScheduleComposite();
}
| C | Chrome | 0 |
CVE-2017-7376 | https://www.cvedetails.com/cve/CVE-2017-7376/ | CWE-119 | https://android.googlesource.com/platform/external/libxml2/+/51e0cb2e5ec18eaf6fb331bc573ff27b743898f4 | 51e0cb2e5ec18eaf6fb331bc573ff27b743898f4 | DO NOT MERGE: Use correct limit for port values
no upstream report yet, add it here when we have it
issue found & patch by nmehta@
Bug: 36555370
Change-Id: Ibf1efea554b95f514e23e939363d608021de4614
(cherry picked from commit b62884fb49fe92081e414966d9b5fe58250ae53c)
| xmlBuildRelativeURI (const xmlChar * URI, const xmlChar * base)
{
xmlChar *val = NULL;
int ret;
int ix;
int pos = 0;
int nbslash = 0;
int len;
xmlURIPtr ref = NULL;
xmlURIPtr bas = NULL;
xmlChar *bptr, *uptr, *vptr;
int remove_path = 0;
if ((URI == NULL) || (*URI == 0))
return NULL;
/*
* First parse URI into a standard form
*/
ref = xmlCreateURI ();
if (ref == NULL)
return NULL;
/* If URI not already in "relative" form */
if (URI[0] != '.') {
ret = xmlParseURIReference (ref, (const char *) URI);
if (ret != 0)
goto done; /* Error in URI, return NULL */
} else
ref->path = (char *)xmlStrdup(URI);
/*
* Next parse base into the same standard form
*/
if ((base == NULL) || (*base == 0)) {
val = xmlStrdup (URI);
goto done;
}
bas = xmlCreateURI ();
if (bas == NULL)
goto done;
if (base[0] != '.') {
ret = xmlParseURIReference (bas, (const char *) base);
if (ret != 0)
goto done; /* Error in base, return NULL */
} else
bas->path = (char *)xmlStrdup(base);
/*
* If the scheme / server on the URI differs from the base,
* just return the URI
*/
if ((ref->scheme != NULL) &&
((bas->scheme == NULL) ||
(xmlStrcmp ((xmlChar *)bas->scheme, (xmlChar *)ref->scheme)) ||
(xmlStrcmp ((xmlChar *)bas->server, (xmlChar *)ref->server)))) {
val = xmlStrdup (URI);
goto done;
}
if (xmlStrEqual((xmlChar *)bas->path, (xmlChar *)ref->path)) {
val = xmlStrdup(BAD_CAST "");
goto done;
}
if (bas->path == NULL) {
val = xmlStrdup((xmlChar *)ref->path);
goto done;
}
if (ref->path == NULL) {
ref->path = (char *) "/";
remove_path = 1;
}
/*
* At this point (at last!) we can compare the two paths
*
* First we take care of the special case where either of the
* two path components may be missing (bug 316224)
*/
if (bas->path == NULL) {
if (ref->path != NULL) {
uptr = (xmlChar *) ref->path;
if (*uptr == '/')
uptr++;
/* exception characters from xmlSaveUri */
val = xmlURIEscapeStr(uptr, BAD_CAST "/;&=+$,");
}
goto done;
}
bptr = (xmlChar *)bas->path;
if (ref->path == NULL) {
for (ix = 0; bptr[ix] != 0; ix++) {
if (bptr[ix] == '/')
nbslash++;
}
uptr = NULL;
len = 1; /* this is for a string terminator only */
} else {
/*
* Next we compare the two strings and find where they first differ
*/
if ((ref->path[pos] == '.') && (ref->path[pos+1] == '/'))
pos += 2;
if ((*bptr == '.') && (bptr[1] == '/'))
bptr += 2;
else if ((*bptr == '/') && (ref->path[pos] != '/'))
bptr++;
while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0))
pos++;
if (bptr[pos] == ref->path[pos]) {
val = xmlStrdup(BAD_CAST "");
goto done; /* (I can't imagine why anyone would do this) */
}
/*
* In URI, "back up" to the last '/' encountered. This will be the
* beginning of the "unique" suffix of URI
*/
ix = pos;
if ((ref->path[ix] == '/') && (ix > 0))
ix--;
else if ((ref->path[ix] == 0) && (ix > 1) && (ref->path[ix - 1] == '/'))
ix -= 2;
for (; ix > 0; ix--) {
if (ref->path[ix] == '/')
break;
}
if (ix == 0) {
uptr = (xmlChar *)ref->path;
} else {
ix++;
uptr = (xmlChar *)&ref->path[ix];
}
/*
* In base, count the number of '/' from the differing point
*/
if (bptr[pos] != ref->path[pos]) {/* check for trivial URI == base */
for (; bptr[ix] != 0; ix++) {
if (bptr[ix] == '/')
nbslash++;
}
}
len = xmlStrlen (uptr) + 1;
}
if (nbslash == 0) {
if (uptr != NULL)
/* exception characters from xmlSaveUri */
val = xmlURIEscapeStr(uptr, BAD_CAST "/;&=+$,");
goto done;
}
/*
* Allocate just enough space for the returned string -
* length of the remainder of the URI, plus enough space
* for the "../" groups, plus one for the terminator
*/
val = (xmlChar *) xmlMalloc (len + 3 * nbslash);
if (val == NULL) {
xmlURIErrMemory("building relative URI\n");
goto done;
}
vptr = val;
/*
* Put in as many "../" as needed
*/
for (; nbslash>0; nbslash--) {
*vptr++ = '.';
*vptr++ = '.';
*vptr++ = '/';
}
/*
* Finish up with the end of the URI
*/
if (uptr != NULL) {
if ((vptr > val) && (len > 0) &&
(uptr[0] == '/') && (vptr[-1] == '/')) {
memcpy (vptr, uptr + 1, len - 1);
vptr[len - 2] = 0;
} else {
memcpy (vptr, uptr, len);
vptr[len - 1] = 0;
}
} else {
vptr[len - 1] = 0;
}
/* escape the freshly-built path */
vptr = val;
/* exception characters from xmlSaveUri */
val = xmlURIEscapeStr(vptr, BAD_CAST "/;&=+$,");
xmlFree(vptr);
done:
/*
* Free the working variables
*/
if (remove_path != 0)
ref->path = NULL;
if (ref != NULL)
xmlFreeURI (ref);
if (bas != NULL)
xmlFreeURI (bas);
return val;
}
| xmlBuildRelativeURI (const xmlChar * URI, const xmlChar * base)
{
xmlChar *val = NULL;
int ret;
int ix;
int pos = 0;
int nbslash = 0;
int len;
xmlURIPtr ref = NULL;
xmlURIPtr bas = NULL;
xmlChar *bptr, *uptr, *vptr;
int remove_path = 0;
if ((URI == NULL) || (*URI == 0))
return NULL;
/*
* First parse URI into a standard form
*/
ref = xmlCreateURI ();
if (ref == NULL)
return NULL;
/* If URI not already in "relative" form */
if (URI[0] != '.') {
ret = xmlParseURIReference (ref, (const char *) URI);
if (ret != 0)
goto done; /* Error in URI, return NULL */
} else
ref->path = (char *)xmlStrdup(URI);
/*
* Next parse base into the same standard form
*/
if ((base == NULL) || (*base == 0)) {
val = xmlStrdup (URI);
goto done;
}
bas = xmlCreateURI ();
if (bas == NULL)
goto done;
if (base[0] != '.') {
ret = xmlParseURIReference (bas, (const char *) base);
if (ret != 0)
goto done; /* Error in base, return NULL */
} else
bas->path = (char *)xmlStrdup(base);
/*
* If the scheme / server on the URI differs from the base,
* just return the URI
*/
if ((ref->scheme != NULL) &&
((bas->scheme == NULL) ||
(xmlStrcmp ((xmlChar *)bas->scheme, (xmlChar *)ref->scheme)) ||
(xmlStrcmp ((xmlChar *)bas->server, (xmlChar *)ref->server)))) {
val = xmlStrdup (URI);
goto done;
}
if (xmlStrEqual((xmlChar *)bas->path, (xmlChar *)ref->path)) {
val = xmlStrdup(BAD_CAST "");
goto done;
}
if (bas->path == NULL) {
val = xmlStrdup((xmlChar *)ref->path);
goto done;
}
if (ref->path == NULL) {
ref->path = (char *) "/";
remove_path = 1;
}
/*
* At this point (at last!) we can compare the two paths
*
* First we take care of the special case where either of the
* two path components may be missing (bug 316224)
*/
if (bas->path == NULL) {
if (ref->path != NULL) {
uptr = (xmlChar *) ref->path;
if (*uptr == '/')
uptr++;
/* exception characters from xmlSaveUri */
val = xmlURIEscapeStr(uptr, BAD_CAST "/;&=+$,");
}
goto done;
}
bptr = (xmlChar *)bas->path;
if (ref->path == NULL) {
for (ix = 0; bptr[ix] != 0; ix++) {
if (bptr[ix] == '/')
nbslash++;
}
uptr = NULL;
len = 1; /* this is for a string terminator only */
} else {
/*
* Next we compare the two strings and find where they first differ
*/
if ((ref->path[pos] == '.') && (ref->path[pos+1] == '/'))
pos += 2;
if ((*bptr == '.') && (bptr[1] == '/'))
bptr += 2;
else if ((*bptr == '/') && (ref->path[pos] != '/'))
bptr++;
while ((bptr[pos] == ref->path[pos]) && (bptr[pos] != 0))
pos++;
if (bptr[pos] == ref->path[pos]) {
val = xmlStrdup(BAD_CAST "");
goto done; /* (I can't imagine why anyone would do this) */
}
/*
* In URI, "back up" to the last '/' encountered. This will be the
* beginning of the "unique" suffix of URI
*/
ix = pos;
if ((ref->path[ix] == '/') && (ix > 0))
ix--;
else if ((ref->path[ix] == 0) && (ix > 1) && (ref->path[ix - 1] == '/'))
ix -= 2;
for (; ix > 0; ix--) {
if (ref->path[ix] == '/')
break;
}
if (ix == 0) {
uptr = (xmlChar *)ref->path;
} else {
ix++;
uptr = (xmlChar *)&ref->path[ix];
}
/*
* In base, count the number of '/' from the differing point
*/
if (bptr[pos] != ref->path[pos]) {/* check for trivial URI == base */
for (; bptr[ix] != 0; ix++) {
if (bptr[ix] == '/')
nbslash++;
}
}
len = xmlStrlen (uptr) + 1;
}
if (nbslash == 0) {
if (uptr != NULL)
/* exception characters from xmlSaveUri */
val = xmlURIEscapeStr(uptr, BAD_CAST "/;&=+$,");
goto done;
}
/*
* Allocate just enough space for the returned string -
* length of the remainder of the URI, plus enough space
* for the "../" groups, plus one for the terminator
*/
val = (xmlChar *) xmlMalloc (len + 3 * nbslash);
if (val == NULL) {
xmlURIErrMemory("building relative URI\n");
goto done;
}
vptr = val;
/*
* Put in as many "../" as needed
*/
for (; nbslash>0; nbslash--) {
*vptr++ = '.';
*vptr++ = '.';
*vptr++ = '/';
}
/*
* Finish up with the end of the URI
*/
if (uptr != NULL) {
if ((vptr > val) && (len > 0) &&
(uptr[0] == '/') && (vptr[-1] == '/')) {
memcpy (vptr, uptr + 1, len - 1);
vptr[len - 2] = 0;
} else {
memcpy (vptr, uptr, len);
vptr[len - 1] = 0;
}
} else {
vptr[len - 1] = 0;
}
/* escape the freshly-built path */
vptr = val;
/* exception characters from xmlSaveUri */
val = xmlURIEscapeStr(vptr, BAD_CAST "/;&=+$,");
xmlFree(vptr);
done:
/*
* Free the working variables
*/
if (remove_path != 0)
ref->path = NULL;
if (ref != NULL)
xmlFreeURI (ref);
if (bas != NULL)
xmlFreeURI (bas);
return val;
}
| C | Android | 0 |
CVE-2017-8831 | https://www.cvedetails.com/cve/CVE-2017-8831/ | CWE-125 | https://github.com/stoth68000/media-tree/commit/354dd3924a2e43806774953de536257548b5002c | 354dd3924a2e43806774953de536257548b5002c | [PATCH] saa7164: Bug - Double fetch PCIe access condition
Avoid a double fetch by reusing the values from the prior transfer.
Originally reported via https://bugzilla.kernel.org/show_bug.cgi?id=195559
Thanks to Pengfei Wang <wpengfeinudt@gmail.com> for reporting.
Signed-off-by: Steven Toth <stoth@kernellabs.com> | int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
void *buf, int peekonly)
{
struct tmComResBusInfo *bus = &dev->bus;
u32 bytes_to_read, write_distance, curr_grp, curr_gwp,
new_grp, buf_size, space_rem;
struct tmComResInfo msg_tmp;
int ret = SAA_ERR_BAD_PARAMETER;
saa7164_bus_verify(dev);
if (msg == NULL)
return ret;
if (msg->size > dev->bus.m_wMaxReqSize) {
printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n",
__func__);
return ret;
}
if ((peekonly == 0) && (msg->size > 0) && (buf == NULL)) {
printk(KERN_ERR
"%s() Missing msg buf, size should be %d bytes\n",
__func__, msg->size);
return ret;
}
mutex_lock(&bus->lock);
/* Peek the bus to see if a msg exists, if it's not what we're expecting
* then return cleanly else read the message from the bus.
*/
curr_gwp = saa7164_readl(bus->m_dwGetWritePos);
curr_grp = saa7164_readl(bus->m_dwGetReadPos);
if (curr_gwp == curr_grp) {
ret = SAA_ERR_EMPTY;
goto out;
}
bytes_to_read = sizeof(*msg);
/* Calculate write distance to current read position */
write_distance = 0;
if (curr_gwp >= curr_grp)
/* Write doesn't wrap around the ring */
write_distance = curr_gwp - curr_grp;
else
/* Write wraps around the ring */
write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp;
if (bytes_to_read > write_distance) {
printk(KERN_ERR "%s() No message/response found\n", __func__);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
/* Calculate the new read position */
new_grp = curr_grp + bytes_to_read;
if (new_grp > bus->m_dwSizeGetRing) {
/* Ring wraps */
new_grp -= bus->m_dwSizeGetRing;
space_rem = bus->m_dwSizeGetRing - curr_grp;
memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem);
memcpy_fromio((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing,
bytes_to_read - space_rem);
} else {
/* No wrapping */
memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read);
}
/* Convert from little endian to CPU */
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
memcpy(msg, &msg_tmp, sizeof(*msg));
/* No need to update the read positions, because this was a peek */
/* If the caller specifically want to peek, return */
if (peekonly) {
goto peekout;
}
/* Check if the command/response matches what is expected */
if ((msg_tmp.id != msg->id) || (msg_tmp.command != msg->command) ||
(msg_tmp.controlselector != msg->controlselector) ||
(msg_tmp.seqno != msg->seqno) || (msg_tmp.size != msg->size)) {
printk(KERN_ERR "%s() Unexpected msg miss-match\n", __func__);
saa7164_bus_dumpmsg(dev, msg, buf);
saa7164_bus_dumpmsg(dev, &msg_tmp, NULL);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
/* Get the actual command and response from the bus */
buf_size = msg->size;
bytes_to_read = sizeof(*msg) + msg->size;
/* Calculate write distance to current read position */
write_distance = 0;
if (curr_gwp >= curr_grp)
/* Write doesn't wrap around the ring */
write_distance = curr_gwp - curr_grp;
else
/* Write wraps around the ring */
write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp;
if (bytes_to_read > write_distance) {
printk(KERN_ERR "%s() Invalid bus state, missing msg or mangled ring, faulty H/W / bad code?\n",
__func__);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
/* Calculate the new read position */
new_grp = curr_grp + bytes_to_read;
if (new_grp > bus->m_dwSizeGetRing) {
/* Ring wraps */
new_grp -= bus->m_dwSizeGetRing;
space_rem = bus->m_dwSizeGetRing - curr_grp;
if (space_rem < sizeof(*msg)) {
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
space_rem, buf_size);
} else if (space_rem == sizeof(*msg)) {
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
} else {
/* Additional data wraps around the ring */
if (buf) {
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
sizeof(*msg), space_rem - sizeof(*msg));
memcpy_fromio(buf + space_rem - sizeof(*msg),
bus->m_pdwGetRing, bytes_to_read -
space_rem);
}
}
} else {
/* No wrapping */
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
buf_size);
}
/* Update the read positions, adjusting the ring */
saa7164_writel(bus->m_dwGetReadPos, new_grp);
peekout:
ret = SAA_OK;
out:
mutex_unlock(&bus->lock);
saa7164_bus_verify(dev);
return ret;
}
| int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
void *buf, int peekonly)
{
struct tmComResBusInfo *bus = &dev->bus;
u32 bytes_to_read, write_distance, curr_grp, curr_gwp,
new_grp, buf_size, space_rem;
struct tmComResInfo msg_tmp;
int ret = SAA_ERR_BAD_PARAMETER;
saa7164_bus_verify(dev);
if (msg == NULL)
return ret;
if (msg->size > dev->bus.m_wMaxReqSize) {
printk(KERN_ERR "%s() Exceeded dev->bus.m_wMaxReqSize\n",
__func__);
return ret;
}
if ((peekonly == 0) && (msg->size > 0) && (buf == NULL)) {
printk(KERN_ERR
"%s() Missing msg buf, size should be %d bytes\n",
__func__, msg->size);
return ret;
}
mutex_lock(&bus->lock);
/* Peek the bus to see if a msg exists, if it's not what we're expecting
* then return cleanly else read the message from the bus.
*/
curr_gwp = saa7164_readl(bus->m_dwGetWritePos);
curr_grp = saa7164_readl(bus->m_dwGetReadPos);
if (curr_gwp == curr_grp) {
ret = SAA_ERR_EMPTY;
goto out;
}
bytes_to_read = sizeof(*msg);
/* Calculate write distance to current read position */
write_distance = 0;
if (curr_gwp >= curr_grp)
/* Write doesn't wrap around the ring */
write_distance = curr_gwp - curr_grp;
else
/* Write wraps around the ring */
write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp;
if (bytes_to_read > write_distance) {
printk(KERN_ERR "%s() No message/response found\n", __func__);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
/* Calculate the new read position */
new_grp = curr_grp + bytes_to_read;
if (new_grp > bus->m_dwSizeGetRing) {
/* Ring wraps */
new_grp -= bus->m_dwSizeGetRing;
space_rem = bus->m_dwSizeGetRing - curr_grp;
memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, space_rem);
memcpy_fromio((u8 *)&msg_tmp + space_rem, bus->m_pdwGetRing,
bytes_to_read - space_rem);
} else {
/* No wrapping */
memcpy_fromio(&msg_tmp, bus->m_pdwGetRing + curr_grp, bytes_to_read);
}
/* Convert from little endian to CPU */
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
/* No need to update the read positions, because this was a peek */
/* If the caller specifically want to peek, return */
if (peekonly) {
memcpy(msg, &msg_tmp, sizeof(*msg));
goto peekout;
}
/* Check if the command/response matches what is expected */
if ((msg_tmp.id != msg->id) || (msg_tmp.command != msg->command) ||
(msg_tmp.controlselector != msg->controlselector) ||
(msg_tmp.seqno != msg->seqno) || (msg_tmp.size != msg->size)) {
printk(KERN_ERR "%s() Unexpected msg miss-match\n", __func__);
saa7164_bus_dumpmsg(dev, msg, buf);
saa7164_bus_dumpmsg(dev, &msg_tmp, NULL);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
/* Get the actual command and response from the bus */
buf_size = msg->size;
bytes_to_read = sizeof(*msg) + msg->size;
/* Calculate write distance to current read position */
write_distance = 0;
if (curr_gwp >= curr_grp)
/* Write doesn't wrap around the ring */
write_distance = curr_gwp - curr_grp;
else
/* Write wraps around the ring */
write_distance = curr_gwp + bus->m_dwSizeGetRing - curr_grp;
if (bytes_to_read > write_distance) {
printk(KERN_ERR "%s() Invalid bus state, missing msg or mangled ring, faulty H/W / bad code?\n",
__func__);
ret = SAA_ERR_INVALID_COMMAND;
goto out;
}
/* Calculate the new read position */
new_grp = curr_grp + bytes_to_read;
if (new_grp > bus->m_dwSizeGetRing) {
/* Ring wraps */
new_grp -= bus->m_dwSizeGetRing;
space_rem = bus->m_dwSizeGetRing - curr_grp;
if (space_rem < sizeof(*msg)) {
/* msg wraps around the ring */
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
sizeof(*msg) - space_rem);
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
space_rem, buf_size);
} else if (space_rem == sizeof(*msg)) {
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
} else {
/* Additional data wraps around the ring */
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf) {
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
sizeof(*msg), space_rem - sizeof(*msg));
memcpy_fromio(buf + space_rem - sizeof(*msg),
bus->m_pdwGetRing, bytes_to_read -
space_rem);
}
}
} else {
/* No wrapping */
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
buf_size);
}
/* Convert from little endian to CPU */
msg->size = le16_to_cpu((__force __le16)msg->size);
msg->command = le32_to_cpu((__force __le32)msg->command);
msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
/* Update the read positions, adjusting the ring */
saa7164_writel(bus->m_dwGetReadPos, new_grp);
peekout:
ret = SAA_OK;
out:
mutex_unlock(&bus->lock);
saa7164_bus_verify(dev);
return ret;
}
| C | media-tree | 1 |
CVE-2016-5094 | https://www.cvedetails.com/cve/CVE-2016-5094/ | CWE-190 | https://github.com/php/php-src/commit/0da8b8b801f9276359262f1ef8274c7812d3dfda?w=1 | 0da8b8b801f9276359262f1ef8274c7812d3dfda?w=1 | Fix bug #72135 - don't create strings with lengths outside int range | static inline int unicode_cp_is_allowed(unsigned uni_cp, int document_type)
{
/* XML 1.0 HTML 4.01 HTML 5
* 0x09..0x0A 0x09..0x0A 0x09..0x0A
* 0x0D 0x0D 0x0C..0x0D
* 0x0020..0xD7FF 0x20..0x7E 0x20..0x7E
* 0x00A0..0xD7FF 0x00A0..0xD7FF
* 0xE000..0xFFFD 0xE000..0x10FFFF 0xE000..0xFDCF
* 0x010000..0x10FFFF 0xFDF0..0x10FFFF (*)
*
* (*) exclude code points where ((code & 0xFFFF) >= 0xFFFE)
*
* References:
* XML 1.0: <http://www.w3.org/TR/REC-xml/#charsets>
* HTML 4.01: <http://www.w3.org/TR/1999/PR-html40-19990824/sgml/sgmldecl.html>
* HTML 5: <http://dev.w3.org/html5/spec/Overview.html#preprocessing-the-input-stream>
*
* Not sure this is the relevant part for HTML 5, though. I opted to
* disallow the characters that would result in a parse error when
* preprocessing of the input stream. See also section 8.1.3.
*
* It's unclear if XHTML 1.0 allows C1 characters. I'll opt to apply to
* XHTML 1.0 the same rules as for XML 1.0.
* See <http://cmsmcq.com/2007/C1.xml>.
*/
switch (document_type) {
case ENT_HTML_DOC_HTML401:
return (uni_cp >= 0x20 && uni_cp <= 0x7E) ||
(uni_cp == 0x0A || uni_cp == 0x09 || uni_cp == 0x0D) ||
(uni_cp >= 0xA0 && uni_cp <= 0xD7FF) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF);
case ENT_HTML_DOC_HTML5:
return (uni_cp >= 0x20 && uni_cp <= 0x7E) ||
(uni_cp >= 0x09 && uni_cp <= 0x0D && uni_cp != 0x0B) || /* form feed U+0C allowed */
(uni_cp >= 0xA0 && uni_cp <= 0xD7FF) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF &&
((uni_cp & 0xFFFF) < 0xFFFE) && /* last two of each plane (nonchars) disallowed */
(uni_cp < 0xFDD0 || uni_cp > 0xFDEF)); /* U+FDD0-U+FDEF (nonchars) disallowed */
case ENT_HTML_DOC_XHTML:
case ENT_HTML_DOC_XML1:
return (uni_cp >= 0x20 && uni_cp <= 0xD7FF) ||
(uni_cp == 0x0A || uni_cp == 0x09 || uni_cp == 0x0D) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF && uni_cp != 0xFFFE && uni_cp != 0xFFFF);
default:
return 1;
}
}
| static inline int unicode_cp_is_allowed(unsigned uni_cp, int document_type)
{
/* XML 1.0 HTML 4.01 HTML 5
* 0x09..0x0A 0x09..0x0A 0x09..0x0A
* 0x0D 0x0D 0x0C..0x0D
* 0x0020..0xD7FF 0x20..0x7E 0x20..0x7E
* 0x00A0..0xD7FF 0x00A0..0xD7FF
* 0xE000..0xFFFD 0xE000..0x10FFFF 0xE000..0xFDCF
* 0x010000..0x10FFFF 0xFDF0..0x10FFFF (*)
*
* (*) exclude code points where ((code & 0xFFFF) >= 0xFFFE)
*
* References:
* XML 1.0: <http://www.w3.org/TR/REC-xml/#charsets>
* HTML 4.01: <http://www.w3.org/TR/1999/PR-html40-19990824/sgml/sgmldecl.html>
* HTML 5: <http://dev.w3.org/html5/spec/Overview.html#preprocessing-the-input-stream>
*
* Not sure this is the relevant part for HTML 5, though. I opted to
* disallow the characters that would result in a parse error when
* preprocessing of the input stream. See also section 8.1.3.
*
* It's unclear if XHTML 1.0 allows C1 characters. I'll opt to apply to
* XHTML 1.0 the same rules as for XML 1.0.
* See <http://cmsmcq.com/2007/C1.xml>.
*/
switch (document_type) {
case ENT_HTML_DOC_HTML401:
return (uni_cp >= 0x20 && uni_cp <= 0x7E) ||
(uni_cp == 0x0A || uni_cp == 0x09 || uni_cp == 0x0D) ||
(uni_cp >= 0xA0 && uni_cp <= 0xD7FF) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF);
case ENT_HTML_DOC_HTML5:
return (uni_cp >= 0x20 && uni_cp <= 0x7E) ||
(uni_cp >= 0x09 && uni_cp <= 0x0D && uni_cp != 0x0B) || /* form feed U+0C allowed */
(uni_cp >= 0xA0 && uni_cp <= 0xD7FF) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF &&
((uni_cp & 0xFFFF) < 0xFFFE) && /* last two of each plane (nonchars) disallowed */
(uni_cp < 0xFDD0 || uni_cp > 0xFDEF)); /* U+FDD0-U+FDEF (nonchars) disallowed */
case ENT_HTML_DOC_XHTML:
case ENT_HTML_DOC_XML1:
return (uni_cp >= 0x20 && uni_cp <= 0xD7FF) ||
(uni_cp == 0x0A || uni_cp == 0x09 || uni_cp == 0x0D) ||
(uni_cp >= 0xE000 && uni_cp <= 0x10FFFF && uni_cp != 0xFFFE && uni_cp != 0xFFFF);
default:
return 1;
}
}
| C | php-src | 1 |
CVE-2016-3156 | https://www.cvedetails.com/cve/CVE-2016-3156/ | CWE-399 | https://github.com/torvalds/linux/commit/fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <solar@openwall.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Tested-by: Cyrill Gorcunov <gorcunov@openvz.org> | static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy, struct nlmsghdr *nlh, u32 portid)
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1 = *ifap;
struct in_ifaddr *last_prim = in_dev->ifa_list;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
if (in_dev->dead)
goto no_promotions;
/* 1. Deleting primary ifaddr forces deletion all secondaries
* unless alias promotion is set
**/
if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
struct in_ifaddr **ifap1 = &ifa1->ifa_next;
while ((ifa = *ifap1) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
last_prim = ifa;
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa)) {
ifap1 = &ifa->ifa_next;
prev_prom = ifa;
continue;
}
if (!do_promote) {
inet_hash_remove(ifa);
*ifap1 = ifa->ifa_next;
rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain,
NETDEV_DOWN, ifa);
inet_free_ifa(ifa);
} else {
promote = ifa;
break;
}
}
}
/* On promotion all secondaries from subnet are changing
* the primary IP, we must remove all their routes silently
* and later to add them back with new prefsrc. Do this
* while all addresses are on the device list.
*/
for (ifa = promote; ifa; ifa = ifa->ifa_next) {
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa))
fib_del_ifaddr(ifa, ifa1);
}
no_promotions:
/* 2. Unlink it */
*ifap = ifa1->ifa_next;
inet_hash_remove(ifa1);
/* 3. Announce address deletion */
/* Send message first, then call notifier.
At first sight, FIB update triggered by notifier
will refer to already deleted ifaddr, that could confuse
netlink listeners. It is not true: look, gated sees
that route deleted and if it still thinks that ifaddr
is valid, it will try to restore deleted routes... Grr.
So that, this order is correct.
*/
rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) {
struct in_ifaddr *next_sec = promote->ifa_next;
if (prev_prom) {
prev_prom->ifa_next = promote->ifa_next;
promote->ifa_next = last_prim->ifa_next;
last_prim->ifa_next = promote;
}
promote->ifa_flags &= ~IFA_F_SECONDARY;
rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain,
NETDEV_UP, promote);
for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
if (ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa))
continue;
fib_add_ifaddr(ifa);
}
}
if (destroy)
inet_free_ifa(ifa1);
}
| static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy, struct nlmsghdr *nlh, u32 portid)
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1 = *ifap;
struct in_ifaddr *last_prim = in_dev->ifa_list;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
/* 1. Deleting primary ifaddr forces deletion all secondaries
* unless alias promotion is set
**/
if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
struct in_ifaddr **ifap1 = &ifa1->ifa_next;
while ((ifa = *ifap1) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
last_prim = ifa;
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa)) {
ifap1 = &ifa->ifa_next;
prev_prom = ifa;
continue;
}
if (!do_promote) {
inet_hash_remove(ifa);
*ifap1 = ifa->ifa_next;
rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain,
NETDEV_DOWN, ifa);
inet_free_ifa(ifa);
} else {
promote = ifa;
break;
}
}
}
/* On promotion all secondaries from subnet are changing
* the primary IP, we must remove all their routes silently
* and later to add them back with new prefsrc. Do this
* while all addresses are on the device list.
*/
for (ifa = promote; ifa; ifa = ifa->ifa_next) {
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa))
fib_del_ifaddr(ifa, ifa1);
}
/* 2. Unlink it */
*ifap = ifa1->ifa_next;
inet_hash_remove(ifa1);
/* 3. Announce address deletion */
/* Send message first, then call notifier.
At first sight, FIB update triggered by notifier
will refer to already deleted ifaddr, that could confuse
netlink listeners. It is not true: look, gated sees
that route deleted and if it still thinks that ifaddr
is valid, it will try to restore deleted routes... Grr.
So that, this order is correct.
*/
rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) {
struct in_ifaddr *next_sec = promote->ifa_next;
if (prev_prom) {
prev_prom->ifa_next = promote->ifa_next;
promote->ifa_next = last_prim->ifa_next;
last_prim->ifa_next = promote;
}
promote->ifa_flags &= ~IFA_F_SECONDARY;
rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain,
NETDEV_UP, promote);
for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
if (ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa))
continue;
fib_add_ifaddr(ifa);
}
}
if (destroy)
inet_free_ifa(ifa1);
}
| C | linux | 1 |
CVE-2014-9904 | https://www.cvedetails.com/cve/CVE-2014-9904/ | null | https://github.com/torvalds/linux/commit/6217e5ede23285ddfee10d2e4ba0cc2d4c046205 | 6217e5ede23285ddfee10d2e4ba0cc2d4c046205 | ALSA: compress: fix an integer overflow check
I previously added an integer overflow check here but looking at it now,
it's still buggy.
The bug happens in snd_compr_allocate_buffer(). We multiply
".fragments" and ".fragment_size" and that doesn't overflow but then we
save it in an unsigned int so it truncates the high bits away and we
allocate a smaller than expected size.
Fixes: b35cc8225845 ('ALSA: compress_core: integer overflow in snd_compr_allocate_buffer()')
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | static ssize_t snd_compr_write(struct file *f, const char __user *buf,
size_t count, loff_t *offset)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
int retval;
if (snd_BUG_ON(!data))
return -EFAULT;
stream = &data->stream;
mutex_lock(&stream->device->lock);
/* write is allowed when stream is running or has been steup */
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
mutex_unlock(&stream->device->lock);
return -EBADFD;
}
avail = snd_compr_get_avail(stream);
pr_debug("avail returned %ld\n", (unsigned long)avail);
/* calculate how much we can write to buffer */
if (avail > count)
avail = count;
if (stream->ops->copy) {
char __user* cbuf = (char __user*)buf;
retval = stream->ops->copy(stream, cbuf, avail);
} else {
retval = snd_compr_write_data(stream, buf, avail);
}
if (retval > 0)
stream->runtime->total_bytes_available += retval;
/* while initiating the stream, write should be called before START
* call, so in setup move state */
if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
pr_debug("stream prepared, Houston we are good to go\n");
}
mutex_unlock(&stream->device->lock);
return retval;
}
| static ssize_t snd_compr_write(struct file *f, const char __user *buf,
size_t count, loff_t *offset)
{
struct snd_compr_file *data = f->private_data;
struct snd_compr_stream *stream;
size_t avail;
int retval;
if (snd_BUG_ON(!data))
return -EFAULT;
stream = &data->stream;
mutex_lock(&stream->device->lock);
/* write is allowed when stream is running or has been steup */
if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
mutex_unlock(&stream->device->lock);
return -EBADFD;
}
avail = snd_compr_get_avail(stream);
pr_debug("avail returned %ld\n", (unsigned long)avail);
/* calculate how much we can write to buffer */
if (avail > count)
avail = count;
if (stream->ops->copy) {
char __user* cbuf = (char __user*)buf;
retval = stream->ops->copy(stream, cbuf, avail);
} else {
retval = snd_compr_write_data(stream, buf, avail);
}
if (retval > 0)
stream->runtime->total_bytes_available += retval;
/* while initiating the stream, write should be called before START
* call, so in setup move state */
if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
pr_debug("stream prepared, Houston we are good to go\n");
}
mutex_unlock(&stream->device->lock);
return retval;
}
| C | linux | 0 |
CVE-2015-8952 | https://www.cvedetails.com/cve/CVE-2015-8952/ | CWE-19 | https://github.com/torvalds/linux/commit/82939d7999dfc1f1998c4b1c12e2f19edbdff272 | 82939d7999dfc1f1998c4b1c12e2f19edbdff272 | ext4: convert to mbcache2
The conversion is generally straightforward. The only tricky part is
that xattr block corresponding to found mbcache entry can get freed
before we get buffer lock for that block. So we have to check whether
the entry is still valid after getting buffer lock.
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Theodore Ts'o <tytso@mit.edu> | ext4_xattr_set(struct inode *inode, int name_index, const char *name,
const void *value, size_t value_len, int flags)
{
handle_t *handle;
int error, retries = 0;
int credits = ext4_jbd2_credits_xattr(inode);
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
} else {
int error2;
error = ext4_xattr_set_handle(handle, inode, name_index, name,
value, value_len, flags);
error2 = ext4_journal_stop(handle);
if (error == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
if (error == 0)
error = error2;
}
return error;
}
| ext4_xattr_set(struct inode *inode, int name_index, const char *name,
const void *value, size_t value_len, int flags)
{
handle_t *handle;
int error, retries = 0;
int credits = ext4_jbd2_credits_xattr(inode);
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle)) {
error = PTR_ERR(handle);
} else {
int error2;
error = ext4_xattr_set_handle(handle, inode, name_index, name,
value, value_len, flags);
error2 = ext4_journal_stop(handle);
if (error == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
if (error == 0)
error = error2;
}
return error;
}
| C | linux | 0 |
CVE-2016-0850 | https://www.cvedetails.com/cve/CVE-2016-0850/ | CWE-264 | https://android.googlesource.com/platform/external/bluetooth/bluedroid/+/c677ee92595335233eb0e7b59809a1a94e7a678a | c677ee92595335233eb0e7b59809a1a94e7a678a | DO NOT MERGE Remove Porsche car-kit pairing workaround
Bug: 26551752
Change-Id: I14c5e3fcda0849874c8a94e48aeb7d09585617e1
| void btm_io_capabilities_rsp (UINT8 *p)
{
tBTM_SEC_DEV_REC *p_dev_rec;
tBTM_SP_IO_RSP evt_data;
STREAM_TO_BDADDR (evt_data.bd_addr, p);
STREAM_TO_UINT8 (evt_data.io_cap, p);
STREAM_TO_UINT8 (evt_data.oob_data, p);
STREAM_TO_UINT8 (evt_data.auth_req, p);
/* Allocate a new device record or reuse the oldest one */
p_dev_rec = btm_find_or_alloc_dev (evt_data.bd_addr);
/* If no security is in progress, this indicates incoming security */
if (btm_cb.pairing_state == BTM_PAIR_STATE_IDLE)
{
memcpy (btm_cb.pairing_bda, evt_data.bd_addr, BD_ADDR_LEN);
btm_sec_change_pairing_state (BTM_PAIR_STATE_INCOMING_SSP);
/* Make sure we reset the trusted mask to help against attacks */
BTM_SEC_CLR_TRUSTED_DEVICE(p_dev_rec->trusted_mask);
/* work around for FW bug */
btm_inq_stop_on_ssp();
}
/* Notify L2CAP to increase timeout */
l2c_pin_code_request (evt_data.bd_addr);
/* We must have a device record here.
* Use the connecting device's CoD for the connection */
/* coverity[uninit_use_in_call]
Event uninit_use_in_call: Using uninitialized element of array "evt_data.bd_addr" in call to function "memcmp"
FALSE-POSITIVE error from Coverity test-tool. evt_data.bd_addr is set at the beginning with: STREAM_TO_BDADDR (evt_data.bd_addr, p);
*/
if (!memcmp (evt_data.bd_addr, btm_cb.connecting_bda, BD_ADDR_LEN))
memcpy (p_dev_rec->dev_class, btm_cb.connecting_dc, DEV_CLASS_LEN);
/* peer sets dedicated bonding bit and we did not initiate dedicated bonding */
if (btm_cb.pairing_state == BTM_PAIR_STATE_INCOMING_SSP /* peer initiated bonding */
&& (evt_data.auth_req & BTM_AUTH_DD_BOND) ) /* and dedicated bonding bit is set */
{
btm_cb.pairing_flags |= BTM_PAIR_FLAGS_PEER_STARTED_DD;
}
/* save the IO capability in the device record */
p_dev_rec->rmt_io_caps = evt_data.io_cap;
p_dev_rec->rmt_auth_req = evt_data.auth_req;
if (btm_cb.api.p_sp_callback)
(*btm_cb.api.p_sp_callback) (BTM_SP_IO_RSP_EVT, (tBTM_SP_EVT_DATA *)&evt_data);
}
| void btm_io_capabilities_rsp (UINT8 *p)
{
tBTM_SEC_DEV_REC *p_dev_rec;
tBTM_SP_IO_RSP evt_data;
STREAM_TO_BDADDR (evt_data.bd_addr, p);
STREAM_TO_UINT8 (evt_data.io_cap, p);
STREAM_TO_UINT8 (evt_data.oob_data, p);
STREAM_TO_UINT8 (evt_data.auth_req, p);
/* Allocate a new device record or reuse the oldest one */
p_dev_rec = btm_find_or_alloc_dev (evt_data.bd_addr);
/* If no security is in progress, this indicates incoming security */
if (btm_cb.pairing_state == BTM_PAIR_STATE_IDLE)
{
memcpy (btm_cb.pairing_bda, evt_data.bd_addr, BD_ADDR_LEN);
btm_sec_change_pairing_state (BTM_PAIR_STATE_INCOMING_SSP);
/* Make sure we reset the trusted mask to help against attacks */
BTM_SEC_CLR_TRUSTED_DEVICE(p_dev_rec->trusted_mask);
/* work around for FW bug */
btm_inq_stop_on_ssp();
}
/* Notify L2CAP to increase timeout */
l2c_pin_code_request (evt_data.bd_addr);
/* We must have a device record here.
* Use the connecting device's CoD for the connection */
/* coverity[uninit_use_in_call]
Event uninit_use_in_call: Using uninitialized element of array "evt_data.bd_addr" in call to function "memcmp"
FALSE-POSITIVE error from Coverity test-tool. evt_data.bd_addr is set at the beginning with: STREAM_TO_BDADDR (evt_data.bd_addr, p);
*/
if (!memcmp (evt_data.bd_addr, btm_cb.connecting_bda, BD_ADDR_LEN))
memcpy (p_dev_rec->dev_class, btm_cb.connecting_dc, DEV_CLASS_LEN);
/* peer sets dedicated bonding bit and we did not initiate dedicated bonding */
if (btm_cb.pairing_state == BTM_PAIR_STATE_INCOMING_SSP /* peer initiated bonding */
&& (evt_data.auth_req & BTM_AUTH_DD_BOND) ) /* and dedicated bonding bit is set */
{
btm_cb.pairing_flags |= BTM_PAIR_FLAGS_PEER_STARTED_DD;
}
/* save the IO capability in the device record */
p_dev_rec->rmt_io_caps = evt_data.io_cap;
p_dev_rec->rmt_auth_req = evt_data.auth_req;
if (btm_cb.api.p_sp_callback)
(*btm_cb.api.p_sp_callback) (BTM_SP_IO_RSP_EVT, (tBTM_SP_EVT_DATA *)&evt_data);
}
| C | Android | 0 |
CVE-2017-0380 | https://www.cvedetails.com/cve/CVE-2017-0380/ | CWE-532 | https://github.com/torproject/tor/commit/09ea89764a4d3a907808ed7d4fe42abfe64bd486 | 09ea89764a4d3a907808ed7d4fe42abfe64bd486 | Fix log-uninitialized-stack bug in rend_service_intro_established.
Fixes bug 23490; bugfix on 0.2.7.2-alpha.
TROVE-2017-008
CVE-2017-0380 | rend_service_add_filenames_to_list(smartlist_t *lst, const rend_service_t *s)
{
tor_assert(lst);
tor_assert(s);
tor_assert(s->directory);
smartlist_add(lst, rend_service_path(s, private_key_fname));
smartlist_add(lst, rend_service_path(s, hostname_fname));
smartlist_add(lst, rend_service_path(s, client_keys_fname));
smartlist_add(lst, rend_service_sos_poison_path(s));
}
| rend_service_add_filenames_to_list(smartlist_t *lst, const rend_service_t *s)
{
tor_assert(lst);
tor_assert(s);
tor_assert(s->directory);
smartlist_add(lst, rend_service_path(s, private_key_fname));
smartlist_add(lst, rend_service_path(s, hostname_fname));
smartlist_add(lst, rend_service_path(s, client_keys_fname));
smartlist_add(lst, rend_service_sos_poison_path(s));
}
| C | tor | 0 |
CVE-2012-2880 | https://www.cvedetails.com/cve/CVE-2012-2880/ | CWE-362 | https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5 | fcd3a7a671ecf2d5f46ea34787d27507a914d2f5 | [Sync] Cleanup all tab sync enabling logic now that its on by default.
BUG=none
TEST=
Review URL: https://chromiumcodereview.appspot.com/10443046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98 | SyncBackendHost::Status ProfileSyncService::QueryDetailedSyncStatus() {
if (backend_.get() && backend_initialized_) {
return backend_->GetDetailedStatus();
} else {
SyncBackendHost::Status status;
status.sync_protocol_error = last_actionable_error_;
return status;
}
}
| SyncBackendHost::Status ProfileSyncService::QueryDetailedSyncStatus() {
if (backend_.get() && backend_initialized_) {
return backend_->GetDetailedStatus();
} else {
SyncBackendHost::Status status;
status.sync_protocol_error = last_actionable_error_;
return status;
}
}
| C | Chrome | 0 |
CVE-2014-3157 | https://www.cvedetails.com/cve/CVE-2014-3157/ | CWE-119 | https://github.com/chromium/chromium/commit/96e8ffb4e805c7266a2fc1fbe0e470052019bad9 | 96e8ffb4e805c7266a2fc1fbe0e470052019bad9 | Replicate FFmpeg's video frame allocation strategy.
This should avoid accidental overreads and overwrites due to our
VideoFrame's not being as large as FFmpeg expects.
BUG=368980
TEST=new regression test
Review URL: https://codereview.chromium.org/270193002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268831 0039d316-1c4b-4281-b951-d872f2087c98 | void FFmpegVideoDecoder::ReleaseFFmpegResources() {
codec_context_.reset();
av_frame_.reset();
}
| void FFmpegVideoDecoder::ReleaseFFmpegResources() {
codec_context_.reset();
av_frame_.reset();
}
| C | Chrome | 0 |
CVE-2013-2857 | https://www.cvedetails.com/cve/CVE-2013-2857/ | CWE-416 | https://github.com/chromium/chromium/commit/7d085fbb43b21e959900b94f191588fd10546a94 | 7d085fbb43b21e959900b94f191588fd10546a94 | Error event was fired synchronously blowing away the input element from underneath. Remove the FIXME and fire it asynchronously using errorEventSender().
BUG=240124
Review URL: https://chromiumcodereview.appspot.com/14741011
git-svn-id: svn://svn.chromium.org/blink/trunk@150232 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | ImageLoader::ImageLoader(Element* element)
: m_element(element)
, m_image(0)
, m_derefElementTimer(this, &ImageLoader::timerFired)
, m_hasPendingBeforeLoadEvent(false)
, m_hasPendingLoadEvent(false)
, m_hasPendingErrorEvent(false)
, m_imageComplete(true)
, m_loadManually(false)
, m_elementIsProtected(false)
{
}
| ImageLoader::ImageLoader(Element* element)
: m_element(element)
, m_image(0)
, m_derefElementTimer(this, &ImageLoader::timerFired)
, m_hasPendingBeforeLoadEvent(false)
, m_hasPendingLoadEvent(false)
, m_hasPendingErrorEvent(false)
, m_imageComplete(true)
, m_loadManually(false)
, m_elementIsProtected(false)
{
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/9ad7483d8e7c20e9f1a5a08d00150fb51899f14c | 9ad7483d8e7c20e9f1a5a08d00150fb51899f14c | Shutdown Timebomb - In canary, get a callstack if it takes longer than
10 minutes. In Dev, get callstack if it takes longer than 20 minutes.
In Beta (50 minutes) and Stable (100 minutes) it is same as before.
BUG=519321
R=asvitkine@chromium.org
Review URL: https://codereview.chromium.org/1409333005
Cr-Commit-Position: refs/heads/master@{#355586} | void ThreadWatcherList::WakeUpAll() {
DCHECK(WatchDogThread::CurrentlyOnWatchDogThread());
if (!g_thread_watcher_list_)
return;
for (RegistrationList::iterator it =
g_thread_watcher_list_->registered_.begin();
g_thread_watcher_list_->registered_.end() != it;
++it)
it->second->WakeUp();
}
| void ThreadWatcherList::WakeUpAll() {
DCHECK(WatchDogThread::CurrentlyOnWatchDogThread());
if (!g_thread_watcher_list_)
return;
for (RegistrationList::iterator it =
g_thread_watcher_list_->registered_.begin();
g_thread_watcher_list_->registered_.end() != it;
++it)
it->second->WakeUp();
}
| C | Chrome | 0 |
CVE-2017-13030 | https://www.cvedetails.com/cve/CVE-2017-13030/ | CWE-125 | https://github.com/the-tcpdump-group/tcpdump/commit/5dc1860d8267b1e0cb78c9ffa2a40bea2fdb3ddc | 5dc1860d8267b1e0cb78c9ffa2a40bea2fdb3ddc | CVE-2017-13030/PIM: Redo bounds checks and add length checks.
Use ND_TCHECK macros to do bounds checking, and add length checks before
the bounds checks.
Add a bounds check that the review process found was missing.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s), modified
so the capture file won't be rejected as an invalid capture.
Update one test output file to reflect the changes. | pimv1_join_prune_print(netdissect_options *ndo,
register const u_char *bp, register u_int len)
{
int ngroups, njoin, nprune;
int njp;
/* If it's a single group and a single source, use 1-line output. */
if (ND_TTEST2(bp[0], 30) && bp[11] == 1 &&
((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) {
int hold;
ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp)));
hold = EXTRACT_16BITS(&bp[6]);
if (hold != 180) {
ND_PRINT((ndo, "Hold "));
unsigned_relts_print(ndo, hold);
}
ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune",
ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f,
ipaddr_string(ndo, &bp[12])));
if (EXTRACT_32BITS(&bp[16]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16])));
ND_PRINT((ndo, ") %s%s %s",
(bp[24] & 0x01) ? "Sparse" : "Dense",
(bp[25] & 0x80) ? " WC" : "",
(bp[25] & 0x40) ? "RP" : "SPT"));
return;
}
if (len < sizeof(struct in_addr))
goto trunc;
ND_TCHECK2(bp[0], sizeof(struct in_addr));
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp)));
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[2], 2);
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Hold time: "));
unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[2]));
if (ndo->ndo_vflag < 2)
return;
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], 4);
ngroups = bp[3];
bp += 4;
len -= 4;
while (ngroups--) {
/*
* XXX - does the address have length "addrlen" and the
* mask length "maddrlen"?
*/
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], sizeof(struct in_addr));
ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp)));
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], sizeof(struct in_addr));
if (EXTRACT_32BITS(&bp[0]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[0])));
bp += 4;
len -= 4;
if (len < 4)
goto trunc;
ND_TCHECK2(bp[0], 4);
njoin = EXTRACT_16BITS(&bp[0]);
nprune = EXTRACT_16BITS(&bp[2]);
ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune));
bp += 4;
len -= 4;
for (njp = 0; njp < (njoin + nprune); njp++) {
const char *type;
if (njp < njoin)
type = "Join ";
else
type = "Prune";
if (len < 6)
goto trunc;
ND_TCHECK2(bp[0], 6);
ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type,
(bp[0] & 0x01) ? "Sparse " : "Dense ",
(bp[1] & 0x80) ? "WC " : "",
(bp[1] & 0x40) ? "RP " : "SPT ",
ipaddr_string(ndo, &bp[2]),
bp[1] & 0x3f));
bp += 6;
len -= 6;
}
}
return;
trunc:
ND_PRINT((ndo, "[|pim]"));
return;
}
| pimv1_join_prune_print(netdissect_options *ndo,
register const u_char *bp, register u_int len)
{
int ngroups, njoin, nprune;
int njp;
/* If it's a single group and a single source, use 1-line output. */
if (ND_TTEST2(bp[0], 30) && bp[11] == 1 &&
((njoin = EXTRACT_16BITS(&bp[20])) + EXTRACT_16BITS(&bp[22])) == 1) {
int hold;
ND_PRINT((ndo, " RPF %s ", ipaddr_string(ndo, bp)));
hold = EXTRACT_16BITS(&bp[6]);
if (hold != 180) {
ND_PRINT((ndo, "Hold "));
unsigned_relts_print(ndo, hold);
}
ND_PRINT((ndo, "%s (%s/%d, %s", njoin ? "Join" : "Prune",
ipaddr_string(ndo, &bp[26]), bp[25] & 0x3f,
ipaddr_string(ndo, &bp[12])));
if (EXTRACT_32BITS(&bp[16]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[16])));
ND_PRINT((ndo, ") %s%s %s",
(bp[24] & 0x01) ? "Sparse" : "Dense",
(bp[25] & 0x80) ? " WC" : "",
(bp[25] & 0x40) ? "RP" : "SPT"));
return;
}
ND_TCHECK2(bp[0], sizeof(struct in_addr));
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Upstream Nbr: %s", ipaddr_string(ndo, bp)));
ND_TCHECK2(bp[6], 2);
if (ndo->ndo_vflag > 1)
ND_PRINT((ndo, "\n"));
ND_PRINT((ndo, " Hold time: "));
unsigned_relts_print(ndo, EXTRACT_16BITS(&bp[6]));
if (ndo->ndo_vflag < 2)
return;
bp += 8;
len -= 8;
ND_TCHECK2(bp[0], 4);
ngroups = bp[3];
bp += 4;
len -= 4;
while (ngroups--) {
/*
* XXX - does the address have length "addrlen" and the
* mask length "maddrlen"?
*/
ND_TCHECK2(bp[0], sizeof(struct in_addr));
ND_PRINT((ndo, "\n\tGroup: %s", ipaddr_string(ndo, bp)));
ND_TCHECK2(bp[4], sizeof(struct in_addr));
if (EXTRACT_32BITS(&bp[4]) != 0xffffffff)
ND_PRINT((ndo, "/%s", ipaddr_string(ndo, &bp[4])));
ND_TCHECK2(bp[8], 4);
njoin = EXTRACT_16BITS(&bp[8]);
nprune = EXTRACT_16BITS(&bp[10]);
ND_PRINT((ndo, " joined: %d pruned: %d", njoin, nprune));
bp += 12;
len -= 12;
for (njp = 0; njp < (njoin + nprune); njp++) {
const char *type;
if (njp < njoin)
type = "Join ";
else
type = "Prune";
ND_TCHECK2(bp[0], 6);
ND_PRINT((ndo, "\n\t%s %s%s%s%s/%d", type,
(bp[0] & 0x01) ? "Sparse " : "Dense ",
(bp[1] & 0x80) ? "WC " : "",
(bp[1] & 0x40) ? "RP " : "SPT ",
ipaddr_string(ndo, &bp[2]), bp[1] & 0x3f));
bp += 6;
len -= 6;
}
}
return;
trunc:
ND_PRINT((ndo, "[|pim]"));
return;
}
| C | tcpdump | 1 |
CVE-2018-14017 | https://www.cvedetails.com/cve/CVE-2018-14017/ | CWE-125 | https://github.com/radare/radare2/commit/eb0fb72b3c5307ec8e33effb6bf947e38cfdffe8 | eb0fb72b3c5307ec8e33effb6bf947e38cfdffe8 | Fix #10498 - Crash in fuzzed java file | static char *convert_string(const char *bytes, ut32 len) {
ut32 idx = 0, pos = 0;
ut32 str_sz = 32 * len + 1;
char *cpy_buffer = len > 0 ? malloc (str_sz) : NULL;
if (!cpy_buffer) {
return cpy_buffer;
}
memset (cpy_buffer, 0, str_sz);
while (idx < len && pos < len) {
if (dso_json_char_needs_hexing (bytes[idx])) {
if (pos + 2 < len) {
free (cpy_buffer);
return NULL;
}
sprintf (cpy_buffer + pos, "\\x%02x", bytes[idx]);
pos += 4;
} else {
cpy_buffer[pos] = bytes[idx];
pos++;
}
idx++;
}
return cpy_buffer;
}
| static char *convert_string(const char *bytes, ut32 len) {
ut32 idx = 0, pos = 0;
ut32 str_sz = 32 * len + 1;
char *cpy_buffer = len > 0 ? malloc (str_sz) : NULL;
if (!cpy_buffer) {
return cpy_buffer;
}
memset (cpy_buffer, 0, str_sz);
while (idx < len && pos < len) {
if (dso_json_char_needs_hexing (bytes[idx])) {
if (pos + 2 < len) {
free (cpy_buffer);
return NULL;
}
sprintf (cpy_buffer + pos, "\\x%02x", bytes[idx]);
pos += 4;
} else {
cpy_buffer[pos] = bytes[idx];
pos++;
}
idx++;
}
return cpy_buffer;
}
| C | radare2 | 0 |
CVE-2011-1019 | https://www.cvedetails.com/cve/CVE-2011-1019/ | CWE-264 | https://github.com/torvalds/linux/commit/8909c9ad8ff03611c9c96c9a92656213e4bb495b | 8909c9ad8ff03611c9c96c9a92656213e4bb495b | net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules
Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with
CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean
that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are
limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't
allow anybody load any module not related to networking.
This patch restricts an ability of autoloading modules to netdev modules
with explicit aliases. This fixes CVE-2011-1019.
Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior
of loading netdev modules by name (without any prefix) for processes
with CAP_SYS_MODULE to maintain the compatibility with network scripts
that use autoloading netdev modules by aliases like "eth0", "wlan0".
Currently there are only three users of the feature in the upstream
kernel: ipip, ip_gre and sit.
root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) --
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: fffffff800001000
CapEff: fffffff800001000
CapBnd: fffffff800001000
root@albatros:~# modprobe xfs
FATAL: Error inserting xfs
(/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted
root@albatros:~# lsmod | grep xfs
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit
sit: error fetching interface information: Device not found
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit0
sit0 Link encap:IPv6-in-IPv4
NOARP MTU:1480 Metric:1
root@albatros:~# lsmod | grep sit
sit 10457 0
tunnel4 2957 1 sit
For CAP_SYS_MODULE module loading is still relaxed:
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: ffffffffffffffff
CapEff: ffffffffffffffff
CapBnd: ffffffffffffffff
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
xfs 745319 0
Reference: https://lkml.org/lkml/2011/2/24/203
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Kees Cook <kees.cook@canonical.com>
Signed-off-by: James Morris <jmorris@namei.org> | int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
int features;
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
skb_orphan_try(skb);
features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) &&
!(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb))
goto out;
skb->vlan_tci = 0;
}
if (netif_needs_gso(skb, features)) {
if (unlikely(dev_gso_segment(skb, features)))
goto out_kfree_skb;
if (skb->next)
goto gso;
} else {
if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb))
goto out_kfree_skb;
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
if (!(features & NETIF_F_ALL_CSUM) &&
skb_checksum_help(skb))
goto out_kfree_skb;
}
}
rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
}
gso:
do {
struct sk_buff *nskb = skb->next;
skb->next = nskb->next;
nskb->next = NULL;
/*
* If device doesnt need nskb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(nskb);
rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
nskb->next = skb->next;
skb->next = nskb;
return rc;
}
txq_trans_update(txq);
if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);
out_kfree_gso_skb:
if (likely(skb->next == NULL))
skb->destructor = DEV_GSO_CB(skb)->destructor;
out_kfree_skb:
kfree_skb(skb);
out:
return rc;
}
| int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
int features;
/*
* If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
if (!list_empty(&ptype_all))
dev_queue_xmit_nit(skb, dev);
skb_orphan_try(skb);
features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) &&
!(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb))
goto out;
skb->vlan_tci = 0;
}
if (netif_needs_gso(skb, features)) {
if (unlikely(dev_gso_segment(skb, features)))
goto out_kfree_skb;
if (skb->next)
goto gso;
} else {
if (skb_needs_linearize(skb, features) &&
__skb_linearize(skb))
goto out_kfree_skb;
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
if (!(features & NETIF_F_ALL_CSUM) &&
skb_checksum_help(skb))
goto out_kfree_skb;
}
}
rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
}
gso:
do {
struct sk_buff *nskb = skb->next;
skb->next = nskb->next;
nskb->next = NULL;
/*
* If device doesnt need nskb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(nskb);
rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
nskb->next = skb->next;
skb->next = nskb;
return rc;
}
txq_trans_update(txq);
if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
return NETDEV_TX_BUSY;
} while (skb->next);
out_kfree_gso_skb:
if (likely(skb->next == NULL))
skb->destructor = DEV_GSO_CB(skb)->destructor;
out_kfree_skb:
kfree_skb(skb);
out:
return rc;
}
| C | linux | 0 |
CVE-2017-11719 | https://www.cvedetails.com/cve/CVE-2017-11719/ | CWE-125 | https://github.com/FFmpeg/FFmpeg/commit/296debd213bd6dce7647cedd34eb64e5b94cdc92 | 296debd213bd6dce7647cedd34eb64e5b94cdc92 | avcodec/dnxhddec: Move mb height check out of non hr branch
Fixes: out of array access
Fixes: poc.dnxhd
Found-by: Bingchang, Liu@VARAS of IIE
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> | static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
{
DNXHDContext *ctx = avctx->priv_data;
ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc);
av_freep(&ctx->rows);
return 0;
}
| static av_cold int dnxhd_decode_close(AVCodecContext *avctx)
{
DNXHDContext *ctx = avctx->priv_data;
ff_free_vlc(&ctx->ac_vlc);
ff_free_vlc(&ctx->dc_vlc);
ff_free_vlc(&ctx->run_vlc);
av_freep(&ctx->rows);
return 0;
}
| C | FFmpeg | 0 |
CVE-2016-1620 | https://www.cvedetails.com/cve/CVE-2016-1620/ | null | https://github.com/chromium/chromium/commit/b90c7c8c335a2e2a4abdd7bde17a44f92c8b3a54 | b90c7c8c335a2e2a4abdd7bde17a44f92c8b3a54 | Fix GPU process fallback logic.
1. In GpuProcessHost::OnProcessCrashed() record the process crash first.
This means the GPU mode fallback will happen before a new GPU process
is started.
2. Don't call FallBackToNextGpuMode() if GPU process initialization
fails for an unsandboxed GPU process. The unsandboxed GPU is only
used for collect information and it's failure doesn't indicate a need
to change GPU modes.
Bug: 869419
Change-Id: I8bd0a03268f0ea8809f3df8458d4e6a92db9391f
Reviewed-on: https://chromium-review.googlesource.com/1157164
Reviewed-by: Zhenyao Mo <zmo@chromium.org>
Commit-Queue: kylechar <kylechar@chromium.org>
Cr-Commit-Position: refs/heads/master@{#579625} | void GpuProcessHost::DidCreateContextSuccessfully() {
#if defined(OS_ANDROID)
hardware_accelerated_recent_crash_count_ = 0;
#endif
}
| void GpuProcessHost::DidCreateContextSuccessfully() {
#if defined(OS_ANDROID)
hardware_accelerated_recent_crash_count_ = 0;
#endif
}
| C | Chrome | 0 |
CVE-2012-0045 | https://www.cvedetails.com/cve/CVE-2012-0045/ | null | https://github.com/torvalds/linux/commit/c2226fc9e87ba3da060e47333657cd6616652b84 | c2226fc9e87ba3da060e47333657cd6616652b84 | KVM: x86: fix missing checks in syscall emulation
On hosts without this patch, 32bit guests will crash (and 64bit guests
may behave in a wrong way) for example by simply executing following
nasm-demo-application:
[bits 32]
global _start
SECTION .text
_start: syscall
(I tested it with winxp and linux - both always crashed)
Disassembly of section .text:
00000000 <_start>:
0: 0f 05 syscall
The reason seems a missing "invalid opcode"-trap (int6) for the
syscall opcode "0f05", which is not available on Intel CPUs
within non-longmodes, as also on some AMD CPUs within legacy-mode.
(depending on CPU vendor, MSR_EFER and cpuid)
Because previous mentioned OSs may not engage corresponding
syscall target-registers (STAR, LSTAR, CSTAR), they remain
NULL and (non trapping) syscalls are leading to multiple
faults and finally crashs.
Depending on the architecture (AMD or Intel) pretended by
guests, various checks according to vendor's documentation
are implemented to overcome the current issue and behave
like the CPUs physical counterparts.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <stephan.baerwolf@tu-ilmenau.de>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> | static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
{
struct fetch_cache *fc = &ctxt->fetch;
int rc;
int size, cur_size;
if (ctxt->_eip == fc->end) {
unsigned long linear;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->_eip };
cur_size = fc->end - fc->start;
size = min(15UL - cur_size,
PAGE_SIZE - offset_in_page(ctxt->_eip));
rc = __linearize(ctxt, addr, size, false, true, &linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
fc->end += size;
}
*dest = fc->data[ctxt->_eip - fc->start];
ctxt->_eip++;
return X86EMUL_CONTINUE;
}
| static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
{
struct fetch_cache *fc = &ctxt->fetch;
int rc;
int size, cur_size;
if (ctxt->_eip == fc->end) {
unsigned long linear;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->_eip };
cur_size = fc->end - fc->start;
size = min(15UL - cur_size,
PAGE_SIZE - offset_in_page(ctxt->_eip));
rc = __linearize(ctxt, addr, size, false, true, &linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
fc->end += size;
}
*dest = fc->data[ctxt->_eip - fc->start];
ctxt->_eip++;
return X86EMUL_CONTINUE;
}
| C | linux | 0 |
CVE-2018-1999014 | https://www.cvedetails.com/cve/CVE-2018-1999014/ | CWE-125 | https://github.com/FFmpeg/FFmpeg/commit/bab0716c7f4793ec42e05a5aa7e80d82a0dd4e75 | bab0716c7f4793ec42e05a5aa7e80d82a0dd4e75 | avformat/mxfdec: Fix av_log context
Fixes: out of array access
Fixes: mxf-crash-1c2e59bf07a34675bfb3ada5e1ec22fa9f38f923
Found-by: Paul Ch <paulcher@icloud.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> | static MXFStructuralComponent* mxf_resolve_essence_group_choice(MXFContext *mxf, MXFEssenceGroup *essence_group)
{
MXFStructuralComponent *component = NULL;
MXFPackage *package = NULL;
MXFDescriptor *descriptor = NULL;
int i;
if (!essence_group || !essence_group->structural_components_count)
return NULL;
/* essence groups contains multiple representations of the same media,
this return the first components with a valid Descriptor typically index 0 */
for (i =0; i < essence_group->structural_components_count; i++){
component = mxf_resolve_strong_ref(mxf, &essence_group->structural_components_refs[i], SourceClip);
if (!component)
continue;
if (!(package = mxf_resolve_source_package(mxf, component->source_package_ul, component->source_package_uid)))
continue;
descriptor = mxf_resolve_strong_ref(mxf, &package->descriptor_ref, Descriptor);
if (descriptor)
return component;
}
return NULL;
}
| static MXFStructuralComponent* mxf_resolve_essence_group_choice(MXFContext *mxf, MXFEssenceGroup *essence_group)
{
MXFStructuralComponent *component = NULL;
MXFPackage *package = NULL;
MXFDescriptor *descriptor = NULL;
int i;
if (!essence_group || !essence_group->structural_components_count)
return NULL;
/* essence groups contains multiple representations of the same media,
this return the first components with a valid Descriptor typically index 0 */
for (i =0; i < essence_group->structural_components_count; i++){
component = mxf_resolve_strong_ref(mxf, &essence_group->structural_components_refs[i], SourceClip);
if (!component)
continue;
if (!(package = mxf_resolve_source_package(mxf, component->source_package_ul, component->source_package_uid)))
continue;
descriptor = mxf_resolve_strong_ref(mxf, &package->descriptor_ref, Descriptor);
if (descriptor)
return component;
}
return NULL;
}
| C | FFmpeg | 0 |
CVE-2017-8924 | https://www.cvedetails.com/cve/CVE-2017-8924/ | CWE-191 | https://github.com/torvalds/linux/commit/654b404f2a222f918af9b0cd18ad469d0c941a8e | 654b404f2a222f918af9b0cd18ad469d0c941a8e | USB: serial: io_ti: fix information leak in completion handler
Add missing sanity check to the bulk-in completion handler to avoid an
integer underflow that can be triggered by a malicious device.
This avoids leaking 128 kB of memory content from after the URB transfer
buffer to user space.
Fixes: 8c209e6782ca ("USB: make actual_length in struct urb field u32")
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Cc: stable <stable@vger.kernel.org> # 2.6.30
Signed-off-by: Johan Hovold <johan@kernel.org> | static int edge_suspend(struct usb_serial *serial, pm_message_t message)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
cancel_delayed_work_sync(&edge_serial->heartbeat_work);
return 0;
}
| static int edge_suspend(struct usb_serial *serial, pm_message_t message)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
cancel_delayed_work_sync(&edge_serial->heartbeat_work);
return 0;
}
| C | linux | 0 |
CVE-2013-2915 | https://www.cvedetails.com/cve/CVE-2013-2915/ | null | https://github.com/chromium/chromium/commit/b12eb22a27110f49a2ad54b9e4ffd0ccb6cf9ce9 | b12eb22a27110f49a2ad54b9e4ffd0ccb6cf9ce9 | Delete unneeded pending entries in DidFailProvisionalLoad to prevent a spoof.
BUG=280512
BUG=278899
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/23978003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@222146 0039d316-1c4b-4281-b951-d872f2087c98 | void NavigationControllerImpl::RendererDidNavigateNewSubframe(
const ViewHostMsg_FrameNavigate_Params& params) {
if (PageTransitionCoreTypeIs(params.transition,
PAGE_TRANSITION_AUTO_SUBFRAME)) {
DiscardNonCommittedEntriesInternal();
return;
}
DCHECK(GetLastCommittedEntry()) << "ClassifyNavigation should guarantee "
<< "that a last committed entry exists.";
NavigationEntryImpl* new_entry = new NavigationEntryImpl(
*NavigationEntryImpl::FromNavigationEntry(GetLastCommittedEntry()));
new_entry->SetPageID(params.page_id);
InsertOrReplaceEntry(new_entry, false);
}
| void NavigationControllerImpl::RendererDidNavigateNewSubframe(
const ViewHostMsg_FrameNavigate_Params& params) {
if (PageTransitionCoreTypeIs(params.transition,
PAGE_TRANSITION_AUTO_SUBFRAME)) {
DiscardNonCommittedEntriesInternal();
return;
}
DCHECK(GetLastCommittedEntry()) << "ClassifyNavigation should guarantee "
<< "that a last committed entry exists.";
NavigationEntryImpl* new_entry = new NavigationEntryImpl(
*NavigationEntryImpl::FromNavigationEntry(GetLastCommittedEntry()));
new_entry->SetPageID(params.page_id);
InsertOrReplaceEntry(new_entry, false);
}
| C | Chrome | 0 |
CVE-2018-20067 | https://www.cvedetails.com/cve/CVE-2018-20067/ | CWE-254 | https://github.com/chromium/chromium/commit/a7d715ae5b654d1f98669fd979a00282a7229044 | a7d715ae5b654d1f98669fd979a00282a7229044 | Prevent renderer initiated back navigation to cancel a browser one.
Renderer initiated back/forward navigations must not be able to cancel ongoing
browser initiated navigation if they are not user initiated.
Note: 'normal' renderer initiated navigation uses the
FrameHost::BeginNavigation() path. A code similar to this patch is done
in NavigatorImpl::OnBeginNavigation().
Test:
-----
Added: NavigationBrowserTest.
* HistoryBackInBeforeUnload
* HistoryBackInBeforeUnloadAfterSetTimeout
* HistoryBackCancelPendingNavigationNoUserGesture
* HistoryBackCancelPendingNavigationUserGesture
Fixed:
* (WPT) .../the-history-interface/traverse_the_history_2.html
* (WPT) .../the-history-interface/traverse_the_history_3.html
* (WPT) .../the-history-interface/traverse_the_history_4.html
* (WPT) .../the-history-interface/traverse_the_history_5.html
Bug: 879965
Change-Id: I1a9bfaaea1ffc219e6c32f6e676b660e746c578c
Reviewed-on: https://chromium-review.googlesource.com/1209744
Commit-Queue: Arthur Sonzogni <arthursonzogni@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Mustaq Ahmed <mustaq@chromium.org>
Reviewed-by: Camille Lamy <clamy@chromium.org>
Reviewed-by: Charlie Reis <creis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#592823} | ukm::SourceId WebContentsImpl::GetUkmSourceIdForLastCommittedSource() const {
return last_committed_source_id_;
}
| ukm::SourceId WebContentsImpl::GetUkmSourceIdForLastCommittedSource() const {
return last_committed_source_id_;
}
| C | Chrome | 0 |
CVE-2012-0045 | https://www.cvedetails.com/cve/CVE-2012-0045/ | null | https://github.com/torvalds/linux/commit/c2226fc9e87ba3da060e47333657cd6616652b84 | c2226fc9e87ba3da060e47333657cd6616652b84 | KVM: x86: fix missing checks in syscall emulation
On hosts without this patch, 32bit guests will crash (and 64bit guests
may behave in a wrong way) for example by simply executing following
nasm-demo-application:
[bits 32]
global _start
SECTION .text
_start: syscall
(I tested it with winxp and linux - both always crashed)
Disassembly of section .text:
00000000 <_start>:
0: 0f 05 syscall
The reason seems a missing "invalid opcode"-trap (int6) for the
syscall opcode "0f05", which is not available on Intel CPUs
within non-longmodes, as also on some AMD CPUs within legacy-mode.
(depending on CPU vendor, MSR_EFER and cpuid)
Because previous mentioned OSs may not engage corresponding
syscall target-registers (STAR, LSTAR, CSTAR), they remain
NULL and (non trapping) syscalls are leading to multiple
faults and finally crashs.
Depending on the architecture (AMD or Intel) pretended by
guests, various checks according to vendor's documentation
are implemented to overcome the current issue and behave
like the CPUs physical counterparts.
[mtosatti: cleanup/beautify code]
Signed-off-by: Stephan Baerwolf <stephan.baerwolf@tu-ilmenau.de>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> | static int em_adc(struct x86_emulate_ctxt *ctxt)
{
emulate_2op_SrcV(ctxt, "adc");
return X86EMUL_CONTINUE;
}
| static int em_adc(struct x86_emulate_ctxt *ctxt)
{
emulate_2op_SrcV(ctxt, "adc");
return X86EMUL_CONTINUE;
}
| C | linux | 0 |
CVE-2012-5112 | https://www.cvedetails.com/cve/CVE-2012-5112/ | CWE-399 | https://github.com/chromium/chromium/commit/d65b01ca819881a507b5e60c25a2f9caff58cd57 | d65b01ca819881a507b5e60c25a2f9caff58cd57 | Wipe out QuotaThreadTask.
This is a one of a series of refactoring patches for QuotaManager.
http://codereview.chromium.org/10872054/
http://codereview.chromium.org/10917060/
BUG=139270
Review URL: https://chromiumcodereview.appspot.com/10919070
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@154987 0039d316-1c4b-4281-b951-d872f2087c98 | void QuotaManagerProxy::NotifyStorageModified(
QuotaClient::ID client_id,
const GURL& origin,
StorageType type,
int64 delta) {
if (!io_thread_->BelongsToCurrentThread()) {
io_thread_->PostTask(
FROM_HERE,
base::Bind(&QuotaManagerProxy::NotifyStorageModified, this, client_id,
origin, type, delta));
return;
}
if (manager_)
manager_->NotifyStorageModified(client_id, origin, type, delta);
}
| void QuotaManagerProxy::NotifyStorageModified(
QuotaClient::ID client_id,
const GURL& origin,
StorageType type,
int64 delta) {
if (!io_thread_->BelongsToCurrentThread()) {
io_thread_->PostTask(
FROM_HERE,
base::Bind(&QuotaManagerProxy::NotifyStorageModified, this, client_id,
origin, type, delta));
return;
}
if (manager_)
manager_->NotifyStorageModified(client_id, origin, type, delta);
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d | d1a59e4e845a01d7d7b80ef184b672752a9eae4d | Fixing cross-process postMessage replies on more than two iterations.
When two frames are replying to each other using event.source across processes,
after the first two replies, things break down. The root cause is that in
RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now
properly searching for the remote frame id and returning the local one.
BUG=153445
Review URL: https://chromiumcodereview.appspot.com/11040015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98 | WebKit::WebPlugin* RenderViewImpl::CreatePluginReplacement(
const FilePath& file_path) {
return content::GetContentClient()->renderer()->CreatePluginReplacement(
this, file_path);
}
| WebKit::WebPlugin* RenderViewImpl::CreatePluginReplacement(
const FilePath& file_path) {
return content::GetContentClient()->renderer()->CreatePluginReplacement(
this, file_path);
}
| C | Chrome | 0 |
CVE-2016-10196 | https://www.cvedetails.com/cve/CVE-2016-10196/ | CWE-119 | https://github.com/libevent/libevent/commit/329acc18a0768c21ba22522f01a5c7f46cacc4d5 | 329acc18a0768c21ba22522f01a5c7f46cacc4d5 | evutil_parse_sockaddr_port(): fix buffer overflow
@asn-the-goblin-slayer:
"Length between '[' and ']' is cast to signed 32 bit integer on line 1815. Is
the length is more than 2<<31 (INT_MAX), len will hold a negative value.
Consequently, it will pass the check at line 1816. Segfault happens at line
1819.
Generate a resolv.conf with generate-resolv.conf, then compile and run
poc.c. See entry-functions.txt for functions in tor that might be
vulnerable.
Please credit 'Guido Vranken' for this discovery through the Tor bug bounty
program."
Reproducer for gdb (https://gist.github.com/azat/be2b0d5e9417ba0dfe2c):
start
p (1ULL<<31)+1ULL
# $1 = 2147483649
p malloc(sizeof(struct sockaddr))
# $2 = (void *) 0x646010
p malloc(sizeof(int))
# $3 = (void *) 0x646030
p malloc($1)
# $4 = (void *) 0x7fff76a2a010
p memset($4, 1, $1)
# $5 = 1990369296
p (char *)$4
# $6 = 0x7fff76a2a010 '\001' <repeats 200 times>...
set $6[0]='['
set $6[$1]=']'
p evutil_parse_sockaddr_port($4, $2, $3)
# $7 = -1
Before:
$ gdb bin/http-connect < gdb
(gdb) $1 = 2147483649
(gdb) (gdb) $2 = (void *) 0x646010
(gdb) (gdb) $3 = (void *) 0x646030
(gdb) (gdb) $4 = (void *) 0x7fff76a2a010
(gdb) (gdb) $5 = 1990369296
(gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>...
(gdb) (gdb) (gdb) (gdb)
Program received signal SIGSEGV, Segmentation fault.
__memcpy_sse2_unaligned () at memcpy-sse2-unaligned.S:36
After:
$ gdb bin/http-connect < gdb
(gdb) $1 = 2147483649
(gdb) (gdb) $2 = (void *) 0x646010
(gdb) (gdb) $3 = (void *) 0x646030
(gdb) (gdb) $4 = (void *) 0x7fff76a2a010
(gdb) (gdb) $5 = 1990369296
(gdb) (gdb) $6 = 0x7fff76a2a010 '\001' <repeats 200 times>...
(gdb) (gdb) (gdb) (gdb) $7 = -1
(gdb) (gdb) quit
Fixes: #318 | evutil_hex_char_to_int_(char c)
{
switch(c)
{
case '0': return 0;
case '1': return 1;
case '2': return 2;
case '3': return 3;
case '4': return 4;
case '5': return 5;
case '6': return 6;
case '7': return 7;
case '8': return 8;
case '9': return 9;
case 'A': case 'a': return 10;
case 'B': case 'b': return 11;
case 'C': case 'c': return 12;
case 'D': case 'd': return 13;
case 'E': case 'e': return 14;
case 'F': case 'f': return 15;
}
return -1;
}
| evutil_hex_char_to_int_(char c)
{
switch(c)
{
case '0': return 0;
case '1': return 1;
case '2': return 2;
case '3': return 3;
case '4': return 4;
case '5': return 5;
case '6': return 6;
case '7': return 7;
case '8': return 8;
case '9': return 9;
case 'A': case 'a': return 10;
case 'B': case 'b': return 11;
case 'C': case 'c': return 12;
case 'D': case 'd': return 13;
case 'E': case 'e': return 14;
case 'F': case 'f': return 15;
}
return -1;
}
| C | libevent | 0 |
CVE-2012-2890 | https://www.cvedetails.com/cve/CVE-2012-2890/ | CWE-399 | https://github.com/chromium/chromium/commit/a6f7726de20450074a01493e4e85409ce3f2595a | a6f7726de20450074a01493e4e85409ce3f2595a | Unreviewed, rolling out r147402.
http://trac.webkit.org/changeset/147402
https://bugs.webkit.org/show_bug.cgi?id=112903
Source/WebCore:
* dom/Document.cpp:
(WebCore::Document::processHttpEquiv):
* loader/DocumentLoader.cpp:
(WebCore::DocumentLoader::responseReceived):
LayoutTests:
* http/tests/security/XFrameOptions/x-frame-options-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body.html:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag.html:
* http/tests/security/XFrameOptions/x-frame-options-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny.html:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny-expected.txt:
git-svn-id: svn://svn.chromium.org/blink/trunk@147450 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void Document::didAddWheelEventHandler()
{
++m_wheelEventHandlerCount;
Frame* mainFrame = page() ? page()->mainFrame() : 0;
if (mainFrame)
mainFrame->notifyChromeClientWheelEventHandlerCountChanged();
wheelEventHandlerCountChanged(this);
}
| void Document::didAddWheelEventHandler()
{
++m_wheelEventHandlerCount;
Frame* mainFrame = page() ? page()->mainFrame() : 0;
if (mainFrame)
mainFrame->notifyChromeClientWheelEventHandlerCountChanged();
wheelEventHandlerCountChanged(this);
}
| C | Chrome | 0 |
CVE-2016-2449 | https://www.cvedetails.com/cve/CVE-2016-2449/ | CWE-264 | https://android.googlesource.com/platform/frameworks/av/+/b04aee833c5cfb6b31b8558350feb14bb1a0f353 | b04aee833c5cfb6b31b8558350feb14bb1a0f353 | Camera3Device: Validate template ID
Validate template ID before creating a default request.
Bug: 26866110
Bug: 27568958
Change-Id: Ifda457024f1d5c2b1382f189c1a8d5fda852d30d
| void Camera3Device::notifyError(const camera3_error_msg_t &msg,
NotificationListener *listener) {
static const ICameraDeviceCallbacks::CameraErrorCode
halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
};
ICameraDeviceCallbacks::CameraErrorCode errorCode =
((msg.error_code >= 0) &&
(msg.error_code < CAMERA3_MSG_NUM_ERRORS)) ?
halErrorMap[msg.error_code] :
ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
int streamId = 0;
if (msg.error_stream != NULL) {
Camera3Stream *stream =
Camera3Stream::cast(msg.error_stream);
streamId = stream->getId();
}
ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
mId, __FUNCTION__, msg.frame_number,
streamId, msg.error_code);
CaptureResultExtras resultExtras;
switch (errorCode) {
case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
SET_ERR("Camera HAL reported serious device error");
break;
case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
{
Mutex::Autolock l(mInFlightLock);
ssize_t idx = mInFlightMap.indexOfKey(msg.frame_number);
if (idx >= 0) {
InFlightRequest &r = mInFlightMap.editValueAt(idx);
r.requestStatus = msg.error_code;
resultExtras = r.resultExtras;
} else {
resultExtras.frameNumber = msg.frame_number;
ALOGE("Camera %d: %s: cannot find in-flight request on "
"frame %" PRId64 " error", mId, __FUNCTION__,
resultExtras.frameNumber);
}
}
if (listener != NULL) {
listener->notifyError(errorCode, resultExtras);
} else {
ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
}
break;
default:
SET_ERR("Unknown error message from HAL: %d", msg.error_code);
break;
}
}
| void Camera3Device::notifyError(const camera3_error_msg_t &msg,
NotificationListener *listener) {
static const ICameraDeviceCallbacks::CameraErrorCode
halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
};
ICameraDeviceCallbacks::CameraErrorCode errorCode =
((msg.error_code >= 0) &&
(msg.error_code < CAMERA3_MSG_NUM_ERRORS)) ?
halErrorMap[msg.error_code] :
ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
int streamId = 0;
if (msg.error_stream != NULL) {
Camera3Stream *stream =
Camera3Stream::cast(msg.error_stream);
streamId = stream->getId();
}
ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
mId, __FUNCTION__, msg.frame_number,
streamId, msg.error_code);
CaptureResultExtras resultExtras;
switch (errorCode) {
case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
SET_ERR("Camera HAL reported serious device error");
break;
case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
{
Mutex::Autolock l(mInFlightLock);
ssize_t idx = mInFlightMap.indexOfKey(msg.frame_number);
if (idx >= 0) {
InFlightRequest &r = mInFlightMap.editValueAt(idx);
r.requestStatus = msg.error_code;
resultExtras = r.resultExtras;
} else {
resultExtras.frameNumber = msg.frame_number;
ALOGE("Camera %d: %s: cannot find in-flight request on "
"frame %" PRId64 " error", mId, __FUNCTION__,
resultExtras.frameNumber);
}
}
if (listener != NULL) {
listener->notifyError(errorCode, resultExtras);
} else {
ALOGE("Camera %d: %s: no listener available", mId, __FUNCTION__);
}
break;
default:
SET_ERR("Unknown error message from HAL: %d", msg.error_code);
break;
}
}
| C | Android | 0 |
CVE-2017-6001 | https://www.cvedetails.com/cve/CVE-2017-6001/ | CWE-362 | https://github.com/torvalds/linux/commit/321027c1fe77f892f4ea07846aeae08cefbbb290 | 321027c1fe77f892f4ea07846aeae08cefbbb290 | perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
Di Shen reported a race between two concurrent sys_perf_event_open()
calls where both try and move the same pre-existing software group
into a hardware context.
The problem is exactly that described in commit:
f63a8daa5812 ("perf: Fix event->ctx locking")
... where, while we wait for a ctx->mutex acquisition, the event->ctx
relation can have changed under us.
That very same commit failed to recognise sys_perf_event_context() as an
external access vector to the events and thereby didn't apply the
established locking rules correctly.
So while one sys_perf_event_open() call is stuck waiting on
mutex_lock_double(), the other (which owns said locks) moves the group
about. So by the time the former sys_perf_event_open() acquires the
locks, the context we've acquired is stale (and possibly dead).
Apply the established locking rules as per perf_event_ctx_lock_nested()
to the mutex_lock_double() for the 'move_group' case. This obviously means
we need to validate state after we acquire the locks.
Reported-by: Di Shen (Keen Lab)
Tested-by: John Dias <joaodias@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Min Chong <mchong@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
Link: http://lkml.kernel.org/r/20170106131444.GZ3174@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org> | static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
bool reprogram = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
reprogram = (ctx->task == current);
/*
* If the task is running, it must be running on this CPU,
* otherwise we cannot reprogram things.
*
* If its not running, we don't care, ctx->lock will
* serialize against it becoming runnable.
*/
if (task_curr(ctx->task) && !reprogram) {
ret = -ESRCH;
goto unlock;
}
WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx);
} else {
add_event_to_ctx(event, ctx);
}
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
return ret;
}
| static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
bool reprogram = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
reprogram = (ctx->task == current);
/*
* If the task is running, it must be running on this CPU,
* otherwise we cannot reprogram things.
*
* If its not running, we don't care, ctx->lock will
* serialize against it becoming runnable.
*/
if (task_curr(ctx->task) && !reprogram) {
ret = -ESRCH;
goto unlock;
}
WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx);
} else {
add_event_to_ctx(event, ctx);
}
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
return ret;
}
| C | linux | 0 |
CVE-2012-2880 | https://www.cvedetails.com/cve/CVE-2012-2880/ | CWE-362 | https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5 | fcd3a7a671ecf2d5f46ea34787d27507a914d2f5 | [Sync] Cleanup all tab sync enabling logic now that its on by default.
BUG=none
TEST=
Review URL: https://chromiumcodereview.appspot.com/10443046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98 | void SyncManager::SetEncryptionPassphrase(const std::string& passphrase,
bool is_explicit) {
DCHECK(thread_checker_.CalledOnValidThread());
data_->SetEncryptionPassphrase(passphrase, is_explicit);
}
| void SyncManager::SetEncryptionPassphrase(const std::string& passphrase,
bool is_explicit) {
DCHECK(thread_checker_.CalledOnValidThread());
data_->SetEncryptionPassphrase(passphrase, is_explicit);
}
| C | Chrome | 0 |
CVE-2011-1799 | https://www.cvedetails.com/cve/CVE-2011-1799/ | CWE-20 | https://github.com/chromium/chromium/commit/5fd35e5359c6345b8709695cd71fba307318e6aa | 5fd35e5359c6345b8709695cd71fba307318e6aa | Source/WebCore: Fix for bug 64046 - Wrong image height in absolutely positioned div in
relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <kulanthaivel@codeaurora.org> on 2011-07-21
Reviewed by David Hyatt.
Test: fast/css/absolute-child-with-percent-height-inside-relative-parent.html
* rendering/RenderBox.cpp:
(WebCore::RenderBox::availableLogicalHeightUsing):
LayoutTests: Test to cover absolutely positioned child with percentage height
in relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <kulanthaivel@codeaurora.org> on 2011-07-21
Reviewed by David Hyatt.
* fast/css/absolute-child-with-percent-height-inside-relative-parent-expected.txt: Added.
* fast/css/absolute-child-with-percent-height-inside-relative-parent.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@91533 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void RenderBox::addLayoutOverflow(const LayoutRect& rect)
{
LayoutRect clientBox = clientBoxRect();
if (clientBox.contains(rect) || rect.isEmpty())
return;
LayoutRect overflowRect(rect);
if (hasOverflowClip() || isRenderView()) {
bool hasTopOverflow = !style()->isLeftToRightDirection() && !isHorizontalWritingMode();
bool hasLeftOverflow = !style()->isLeftToRightDirection() && isHorizontalWritingMode();
if (!hasTopOverflow)
overflowRect.shiftYEdgeTo(max(overflowRect.y(), clientBox.y()));
else
overflowRect.shiftMaxYEdgeTo(min(overflowRect.maxY(), clientBox.maxY()));
if (!hasLeftOverflow)
overflowRect.shiftXEdgeTo(max(overflowRect.x(), clientBox.x()));
else
overflowRect.shiftMaxXEdgeTo(min(overflowRect.maxX(), clientBox.maxX()));
if (clientBox.contains(overflowRect) || overflowRect.isEmpty())
return;
}
if (!m_overflow)
m_overflow = adoptPtr(new RenderOverflow(clientBox, borderBoxRect()));
m_overflow->addLayoutOverflow(overflowRect);
}
| void RenderBox::addLayoutOverflow(const LayoutRect& rect)
{
LayoutRect clientBox = clientBoxRect();
if (clientBox.contains(rect) || rect.isEmpty())
return;
LayoutRect overflowRect(rect);
if (hasOverflowClip() || isRenderView()) {
bool hasTopOverflow = !style()->isLeftToRightDirection() && !isHorizontalWritingMode();
bool hasLeftOverflow = !style()->isLeftToRightDirection() && isHorizontalWritingMode();
if (!hasTopOverflow)
overflowRect.shiftYEdgeTo(max(overflowRect.y(), clientBox.y()));
else
overflowRect.shiftMaxYEdgeTo(min(overflowRect.maxY(), clientBox.maxY()));
if (!hasLeftOverflow)
overflowRect.shiftXEdgeTo(max(overflowRect.x(), clientBox.x()));
else
overflowRect.shiftMaxXEdgeTo(min(overflowRect.maxX(), clientBox.maxX()));
if (clientBox.contains(overflowRect) || overflowRect.isEmpty())
return;
}
if (!m_overflow)
m_overflow = adoptPtr(new RenderOverflow(clientBox, borderBoxRect()));
m_overflow->addLayoutOverflow(overflowRect);
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/a03d4448faf2c40f4ef444a88cb9aace5b98e8c4 | a03d4448faf2c40f4ef444a88cb9aace5b98e8c4 | Introduce background.scripts feature for extension manifests.
This optimizes for the common use case where background pages
just include a reference to one or more script files and no
additional HTML.
BUG=107791
Review URL: http://codereview.chromium.org/9150008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@117110 0039d316-1c4b-4281-b951-d872f2087c98 | void TestingAutomationProvider::GoBack(
DictionaryValue* args,
IPC::Message* reply_message) {
if (SendErrorIfModalDialogActive(this, reply_message))
return;
WebContents* web_contents;
std::string error;
if (!GetTabFromJSONArgs(args, &web_contents, &error)) {
AutomationJSONReply(this, reply_message).SendError(error);
return;
}
NavigationController& controller = web_contents->GetController();
if (!controller.CanGoBack()) {
DictionaryValue dict;
dict.SetBoolean("did_go_back", false);
AutomationJSONReply(this, reply_message).SendSuccess(&dict);
return;
}
new NavigationNotificationObserver(&controller, this, reply_message,
1, false, true);
controller.GoBack();
}
| void TestingAutomationProvider::GoBack(
DictionaryValue* args,
IPC::Message* reply_message) {
if (SendErrorIfModalDialogActive(this, reply_message))
return;
WebContents* web_contents;
std::string error;
if (!GetTabFromJSONArgs(args, &web_contents, &error)) {
AutomationJSONReply(this, reply_message).SendError(error);
return;
}
NavigationController& controller = web_contents->GetController();
if (!controller.CanGoBack()) {
DictionaryValue dict;
dict.SetBoolean("did_go_back", false);
AutomationJSONReply(this, reply_message).SendSuccess(&dict);
return;
}
new NavigationNotificationObserver(&controller, this, reply_message,
1, false, true);
controller.GoBack();
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/87c15175997b0103166020d79fe9048dcf4025f4 | 87c15175997b0103166020d79fe9048dcf4025f4 | Add support for horizontal mouse wheel messages in Windows Desktop Aura.
This is simply a matter of recognizing the WM_MOUSEHWHEEL message as a valid mouse wheel message.
Tested this on web pages with horizontal scrollbars and it works well.
BUG=332797
R=sky@chromium.org, sky
Review URL: https://codereview.chromium.org/140653006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@245651 0039d316-1c4b-4281-b951-d872f2087c98 | base::NativeEvent CreateNoopEvent() {
MSG event = { NULL };
event.message = WM_USER + 310;
return event;
}
| base::NativeEvent CreateNoopEvent() {
MSG event = { NULL };
event.message = WM_USER + 310;
return event;
}
| C | Chrome | 0 |
CVE-2018-9508 | https://www.cvedetails.com/cve/CVE-2018-9508/ | CWE-125 | https://android.googlesource.com/platform/system/bt/+/e8bbf5b0889790cf8616f4004867f0ff656f0551 | e8bbf5b0889790cf8616f4004867f0ff656f0551 | DO NOT MERGE Fix OOB read before buffer length check
Bug: 111936834
Test: manual
Change-Id: Ib98528fb62db0d724ebd9112d071e367f78e369d
(cherry picked from commit 4548f34c90803c6544f6bed03399f2eabeab2a8e)
| void smp_process_secure_connection_oob_data(tSMP_CB* p_cb,
tSMP_INT_DATA* p_data) {
SMP_TRACE_DEBUG("%s", __func__);
tSMP_SC_OOB_DATA* p_sc_oob_data = &p_cb->sc_oob_data;
if (p_sc_oob_data->loc_oob_data.present) {
memcpy(p_cb->local_random, p_sc_oob_data->loc_oob_data.randomizer,
sizeof(p_cb->local_random));
} else {
SMP_TRACE_EVENT("%s: local OOB randomizer is absent", __func__);
memset(p_cb->local_random, 0, sizeof(p_cb->local_random));
}
if (!p_sc_oob_data->peer_oob_data.present) {
SMP_TRACE_EVENT("%s: peer OOB data is absent", __func__);
memset(p_cb->peer_random, 0, sizeof(p_cb->peer_random));
} else {
memcpy(p_cb->peer_random, p_sc_oob_data->peer_oob_data.randomizer,
sizeof(p_cb->peer_random));
memcpy(p_cb->remote_commitment, p_sc_oob_data->peer_oob_data.commitment,
sizeof(p_cb->remote_commitment));
uint8_t reason = SMP_CONFIRM_VALUE_ERR;
/* check commitment */
if (!smp_check_commitment(p_cb)) {
p_cb->failure = reason;
smp_sm_event(p_cb, SMP_AUTH_CMPL_EVT, &reason);
return;
}
if (p_cb->peer_oob_flag != SMP_OOB_PRESENT) {
/* the peer doesn't have local randomiser */
SMP_TRACE_EVENT(
"%s: peer didn't receive local OOB data, set local randomizer to 0",
__func__);
memset(p_cb->local_random, 0, sizeof(p_cb->local_random));
}
}
print128(p_cb->local_random, (const uint8_t*)"local OOB randomizer");
print128(p_cb->peer_random, (const uint8_t*)"peer OOB randomizer");
smp_start_nonce_generation(p_cb);
}
| void smp_process_secure_connection_oob_data(tSMP_CB* p_cb,
tSMP_INT_DATA* p_data) {
SMP_TRACE_DEBUG("%s", __func__);
tSMP_SC_OOB_DATA* p_sc_oob_data = &p_cb->sc_oob_data;
if (p_sc_oob_data->loc_oob_data.present) {
memcpy(p_cb->local_random, p_sc_oob_data->loc_oob_data.randomizer,
sizeof(p_cb->local_random));
} else {
SMP_TRACE_EVENT("%s: local OOB randomizer is absent", __func__);
memset(p_cb->local_random, 0, sizeof(p_cb->local_random));
}
if (!p_sc_oob_data->peer_oob_data.present) {
SMP_TRACE_EVENT("%s: peer OOB data is absent", __func__);
memset(p_cb->peer_random, 0, sizeof(p_cb->peer_random));
} else {
memcpy(p_cb->peer_random, p_sc_oob_data->peer_oob_data.randomizer,
sizeof(p_cb->peer_random));
memcpy(p_cb->remote_commitment, p_sc_oob_data->peer_oob_data.commitment,
sizeof(p_cb->remote_commitment));
uint8_t reason = SMP_CONFIRM_VALUE_ERR;
/* check commitment */
if (!smp_check_commitment(p_cb)) {
p_cb->failure = reason;
smp_sm_event(p_cb, SMP_AUTH_CMPL_EVT, &reason);
return;
}
if (p_cb->peer_oob_flag != SMP_OOB_PRESENT) {
/* the peer doesn't have local randomiser */
SMP_TRACE_EVENT(
"%s: peer didn't receive local OOB data, set local randomizer to 0",
__func__);
memset(p_cb->local_random, 0, sizeof(p_cb->local_random));
}
}
print128(p_cb->local_random, (const uint8_t*)"local OOB randomizer");
print128(p_cb->peer_random, (const uint8_t*)"peer OOB randomizer");
smp_start_nonce_generation(p_cb);
}
| C | Android | 0 |
CVE-2016-2476 | https://www.cvedetails.com/cve/CVE-2016-2476/ | CWE-119 | https://android.googlesource.com/platform/frameworks/av/+/94d9e646454f6246bf823b6897bd6aea5f08eda3 | 94d9e646454f6246bf823b6897bd6aea5f08eda3 | Fix initialization of AAC presentation struct
Otherwise the new size checks trip on this.
Bug: 27207275
Change-Id: I1f8f01097e3a88ff041b69279a6121be842f1766
| void ACodec::PortDescription::addBuffer(
IOMX::buffer_id id, const sp<ABuffer> &buffer) {
mBufferIDs.push_back(id);
mBuffers.push_back(buffer);
}
| void ACodec::PortDescription::addBuffer(
IOMX::buffer_id id, const sp<ABuffer> &buffer) {
mBufferIDs.push_back(id);
mBuffers.push_back(buffer);
}
| C | Android | 0 |
CVE-2016-3074 | https://www.cvedetails.com/cve/CVE-2016-3074/ | CWE-189 | https://github.com/libgd/libgd/commit/2bb97f407c1145c850416a3bfbcc8cf124e68a19 | 2bb97f407c1145c850416a3bfbcc8cf124e68a19 | gd2: handle corrupt images better (CVE-2016-3074)
Make sure we do some range checking on corrupted chunks.
Thanks to Hans Jerry Illikainen <hji@dyntopia.com> for indepth report
and reproducer information. Made for easy test case writing :). | BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile)
{
_noLibzError();
return NULL;
}
| BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile)
{
_noLibzError();
return NULL;
}
| C | libgd | 0 |
CVE-2015-8324 | https://www.cvedetails.com/cve/CVE-2015-8324/ | null | https://github.com/torvalds/linux/commit/744692dc059845b2a3022119871846e74d4f6e11 | 744692dc059845b2a3022119871846e74d4f6e11 | ext4: use ext4_get_block_write in buffer write
Allocate uninitialized extent before ext4 buffer write and
convert the extent to initialized after io completes.
The purpose is to make sure an extent can only be marked
initialized after it has been written with new data so
we can safely drop the i_mutex lock in ext4 DIO read without
exposing stale data. This helps to improve multi-thread DIO
read performance on high-speed disks.
Skip the nobh and data=journal mount cases to make things simple for now.
Signed-off-by: Jiaying Zhang <jiayingz@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> | static void ext4_clear_inode(struct inode *inode)
{
ext4_discard_preallocations(inode);
if (EXT4_JOURNAL(inode))
jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
&EXT4_I(inode)->jinode);
}
| static void ext4_clear_inode(struct inode *inode)
{
ext4_discard_preallocations(inode);
if (EXT4_JOURNAL(inode))
jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
&EXT4_I(inode)->jinode);
}
| C | linux | 0 |
CVE-2017-18222 | https://www.cvedetails.com/cve/CVE-2017-18222/ | CWE-119 | https://github.com/torvalds/linux/commit/412b65d15a7f8a93794653968308fc100f2aa87c | 412b65d15a7f8a93794653968308fc100f2aa87c | net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <lixiaoping3@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int hns_xgmac_get_regs_count(void)
{
return HNS_XGMAC_DUMP_NUM;
}
| static int hns_xgmac_get_regs_count(void)
{
return HNS_XGMAC_DUMP_NUM;
}
| C | linux | 0 |
CVE-2011-2789 | https://www.cvedetails.com/cve/CVE-2011-2789/ | CWE-399 | https://github.com/chromium/chromium/commit/55ef04e135edaa9abfbf3647634b11ed57dc49e9 | 55ef04e135edaa9abfbf3647634b11ed57dc49e9 | Maintain a map of all resources in the resource tracker and clear instance back pointers when needed,
BUG=85808
Review URL: http://codereview.chromium.org/7196001
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@89746 0039d316-1c4b-4281-b951-d872f2087c98 | PPB_URLLoader_API* PPB_URLLoader_Impl::AsPPB_URLLoader_API() {
return this;
}
| PPB_URLLoader_API* PPB_URLLoader_Impl::AsPPB_URLLoader_API() {
return this;
}
| C | Chrome | 0 |
CVE-2017-18203 | https://www.cvedetails.com/cve/CVE-2017-18203/ | CWE-362 | https://github.com/torvalds/linux/commit/b9a41d21dceadf8104812626ef85dc56ee8a60ed | b9a41d21dceadf8104812626ef85dc56ee8a60ed | dm: fix race between dm_get_from_kobject() and __dm_destroy()
The following BUG_ON was hit when testing repeat creation and removal of
DM devices:
kernel BUG at drivers/md/dm.c:2919!
CPU: 7 PID: 750 Comm: systemd-udevd Not tainted 4.1.44
Call Trace:
[<ffffffff81649e8b>] dm_get_from_kobject+0x34/0x3a
[<ffffffff81650ef1>] dm_attr_show+0x2b/0x5e
[<ffffffff817b46d1>] ? mutex_lock+0x26/0x44
[<ffffffff811df7f5>] sysfs_kf_seq_show+0x83/0xcf
[<ffffffff811de257>] kernfs_seq_show+0x23/0x25
[<ffffffff81199118>] seq_read+0x16f/0x325
[<ffffffff811de994>] kernfs_fop_read+0x3a/0x13f
[<ffffffff8117b625>] __vfs_read+0x26/0x9d
[<ffffffff8130eb59>] ? security_file_permission+0x3c/0x44
[<ffffffff8117bdb8>] ? rw_verify_area+0x83/0xd9
[<ffffffff8117be9d>] vfs_read+0x8f/0xcf
[<ffffffff81193e34>] ? __fdget_pos+0x12/0x41
[<ffffffff8117c686>] SyS_read+0x4b/0x76
[<ffffffff817b606e>] system_call_fastpath+0x12/0x71
The bug can be easily triggered, if an extra delay (e.g. 10ms) is added
between the test of DMF_FREEING & DMF_DELETING and dm_get() in
dm_get_from_kobject().
To fix it, we need to ensure the test of DMF_FREEING & DMF_DELETING and
dm_get() are done in an atomic way, so _minor_lock is used.
The other callers of dm_get() have also been checked to be OK: some
callers invoke dm_get() under _minor_lock, some callers invoke it under
_hash_lock, and dm_start_request() invoke it after increasing
md->open_count.
Cc: stable@vger.kernel.org
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com> | void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
| void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
BUG_ON(bi_size > *tio->len_ptr);
BUG_ON(n_sectors > bi_size);
*tio->len_ptr -= bi_size - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
}
| C | linux | 0 |
CVE-2019-14763 | https://www.cvedetails.com/cve/CVE-2019-14763/ | CWE-189 | https://github.com/torvalds/linux/commit/c91815b596245fd7da349ecc43c8def670d2269e | c91815b596245fd7da349ecc43c8def670d2269e | usb: dwc3: gadget: never call ->complete() from ->ep_queue()
This is a requirement which has always existed but, somehow, wasn't
reflected in the documentation and problems weren't found until now
when Tuba Yavuz found a possible deadlock happening between dwc3 and
f_hid. She described the situation as follows:
spin_lock_irqsave(&hidg->write_spinlock, flags); // first acquire
/* we our function has been disabled by host */
if (!hidg->req) {
free_ep_req(hidg->in_ep, hidg->req);
goto try_again;
}
[...]
status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC);
=>
[...]
=> usb_gadget_giveback_request
=>
f_hidg_req_complete
=>
spin_lock_irqsave(&hidg->write_spinlock, flags); // second acquire
Note that this happens because dwc3 would call ->complete() on a
failed usb_ep_queue() due to failed Start Transfer command. This is,
anyway, a theoretical situation because dwc3 currently uses "No
Response Update Transfer" command for Bulk and Interrupt endpoints.
It's still good to make this case impossible to happen even if the "No
Reponse Update Transfer" command is changed.
Reported-by: Tuba Yavuz <tuba@ece.ufl.edu>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | int dwc3_gadget_resume(struct dwc3 *dwc)
{
int ret;
if (!dwc->gadget_driver)
return 0;
ret = __dwc3_gadget_start(dwc);
if (ret < 0)
goto err0;
ret = dwc3_gadget_run_stop(dwc, true, false);
if (ret < 0)
goto err1;
return 0;
err1:
__dwc3_gadget_stop(dwc);
err0:
return ret;
}
| int dwc3_gadget_resume(struct dwc3 *dwc)
{
int ret;
if (!dwc->gadget_driver)
return 0;
ret = __dwc3_gadget_start(dwc);
if (ret < 0)
goto err0;
ret = dwc3_gadget_run_stop(dwc, true, false);
if (ret < 0)
goto err1;
return 0;
err1:
__dwc3_gadget_stop(dwc);
err0:
return ret;
}
| C | linux | 0 |
CVE-2016-5219 | https://www.cvedetails.com/cve/CVE-2016-5219/ | CWE-416 | https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae | a4150b688a754d3d10d2ca385155b1c95d77d6ae | Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568} | void GLES2Implementation::BufferSubData(GLenum target,
GLintptr offset,
GLsizeiptr size,
const void* data) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferSubData("
<< GLES2Util::GetStringBufferTarget(target) << ", "
<< offset << ", " << size << ", "
<< static_cast<const void*>(data) << ")");
BufferSubDataHelper(target, offset, size, data);
CheckGLError();
}
| void GLES2Implementation::BufferSubData(GLenum target,
GLintptr offset,
GLsizeiptr size,
const void* data) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBufferSubData("
<< GLES2Util::GetStringBufferTarget(target) << ", "
<< offset << ", " << size << ", "
<< static_cast<const void*>(data) << ")");
BufferSubDataHelper(target, offset, size, data);
CheckGLError();
}
| C | Chrome | 0 |
CVE-2013-0250 | https://www.cvedetails.com/cve/CVE-2013-0250/ | null | https://github.com/corosync/corosync/commit/b3f456a8ceefac6e9f2e9acc2ea0c159d412b595 | b3f456a8ceefac6e9f2e9acc2ea0c159d412b595 | totemcrypto: fix hmac key initialization
Signed-off-by: Fabio M. Di Nitto <fdinitto@redhat.com>
Reviewed-by: Jan Friesse <jfriesse@redhat.com> | static int init_nss_db(struct crypto_instance *instance)
{
if ((!cipher_to_nss[instance->crypto_cipher_type]) &&
(!hash_to_nss[instance->crypto_hash_type])) {
return 0;
}
if (NSS_NoDB_Init(".") != SECSuccess) {
log_printf(instance->log_level_security, "NSS DB initialization failed (err %d)",
PR_GetError());
return -1;
}
return 0;
}
| static int init_nss_db(struct crypto_instance *instance)
{
if ((!cipher_to_nss[instance->crypto_cipher_type]) &&
(!hash_to_nss[instance->crypto_hash_type])) {
return 0;
}
if (NSS_NoDB_Init(".") != SECSuccess) {
log_printf(instance->log_level_security, "NSS DB initialization failed (err %d)",
PR_GetError());
return -1;
}
return 0;
}
| C | corosync | 0 |
CVE-2016-10030 | https://www.cvedetails.com/cve/CVE-2016-10030/ | CWE-284 | https://github.com/SchedMD/slurm/commit/92362a92fffe60187df61f99ab11c249d44120ee | 92362a92fffe60187df61f99ab11c249d44120ee | Fix security issue in _prolog_error().
Fix security issue caused by insecure file path handling triggered by
the failure of a Prolog script. To exploit this a user needs to
anticipate or cause the Prolog to fail for their job.
(This commit is slightly different from the fix to the 15.08 branch.)
CVE-2016-10030. | _get_step_list(void)
{
char tmp[64];
char *step_list = NULL;
List steps;
ListIterator i;
step_loc_t *stepd;
steps = stepd_available(conf->spooldir, conf->node_name);
i = list_iterator_create(steps);
while ((stepd = list_next(i))) {
int fd;
fd = stepd_connect(stepd->directory, stepd->nodename,
stepd->jobid, stepd->stepid,
&stepd->protocol_version);
if (fd == -1)
continue;
if (stepd_state(fd, stepd->protocol_version)
== SLURMSTEPD_NOT_RUNNING) {
debug("stale domain socket for stepd %u.%u ",
stepd->jobid, stepd->stepid);
close(fd);
continue;
}
close(fd);
if (step_list)
xstrcat(step_list, ", ");
if (stepd->stepid == NO_VAL) {
snprintf(tmp, sizeof(tmp), "%u",
stepd->jobid);
xstrcat(step_list, tmp);
} else {
snprintf(tmp, sizeof(tmp), "%u.%u",
stepd->jobid, stepd->stepid);
xstrcat(step_list, tmp);
}
}
list_iterator_destroy(i);
FREE_NULL_LIST(steps);
if (step_list == NULL)
xstrcat(step_list, "NONE");
return step_list;
}
| _get_step_list(void)
{
char tmp[64];
char *step_list = NULL;
List steps;
ListIterator i;
step_loc_t *stepd;
steps = stepd_available(conf->spooldir, conf->node_name);
i = list_iterator_create(steps);
while ((stepd = list_next(i))) {
int fd;
fd = stepd_connect(stepd->directory, stepd->nodename,
stepd->jobid, stepd->stepid,
&stepd->protocol_version);
if (fd == -1)
continue;
if (stepd_state(fd, stepd->protocol_version)
== SLURMSTEPD_NOT_RUNNING) {
debug("stale domain socket for stepd %u.%u ",
stepd->jobid, stepd->stepid);
close(fd);
continue;
}
close(fd);
if (step_list)
xstrcat(step_list, ", ");
if (stepd->stepid == NO_VAL) {
snprintf(tmp, sizeof(tmp), "%u",
stepd->jobid);
xstrcat(step_list, tmp);
} else {
snprintf(tmp, sizeof(tmp), "%u.%u",
stepd->jobid, stepd->stepid);
xstrcat(step_list, tmp);
}
}
list_iterator_destroy(i);
FREE_NULL_LIST(steps);
if (step_list == NULL)
xstrcat(step_list, "NONE");
return step_list;
}
| C | slurm | 0 |
null | null | null | https://github.com/chromium/chromium/commit/cfcce91cfc1a14a91a672ad0f052b86a5c1768a3 | cfcce91cfc1a14a91a672ad0f052b86a5c1768a3 | Fix indents. I am used to webkit style, so did it wrong in last commit.
BUG=68244
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@70637 0039d316-1c4b-4281-b951-d872f2087c98 | static void FoldChannels(Format* buf_out,
int sample_count,
const float volume,
int channels) {
Format* buf_in = buf_out;
const int center_volume = static_cast<int>(volume * 0.707f * 65536);
const int fixed_volume = static_cast<int>(volume * 65536);
for (int i = 0; i < sample_count; ++i) {
int center = static_cast<int>(buf_in[kChannel_C] - bias);
int left = static_cast<int>(buf_in[kChannel_L] - bias);
int right = static_cast<int>(buf_in[kChannel_R] - bias);
center = ScaleChannel<Fixed>(center, center_volume);
left = ScaleChannel<Fixed>(left, fixed_volume);
right = ScaleChannel<Fixed>(right, fixed_volume);
buf_out[0] = static_cast<Format>(
AddChannel<Fixed, min_value, max_value>(left, center) + bias);
buf_out[1] = static_cast<Format>(
AddChannel<Fixed, min_value, max_value>(right, center) + bias);
buf_out += 2;
buf_in += channels;
}
}
| static void FoldChannels(Format* buf_out,
int sample_count,
const float volume,
int channels) {
Format* buf_in = buf_out;
const int center_volume = static_cast<int>(volume * 0.707f * 65536);
const int fixed_volume = static_cast<int>(volume * 65536);
for (int i = 0; i < sample_count; ++i) {
int center = static_cast<int>(buf_in[kChannel_C] - bias);
int left = static_cast<int>(buf_in[kChannel_L] - bias);
int right = static_cast<int>(buf_in[kChannel_R] - bias);
center = ScaleChannel<Fixed>(center, center_volume);
left = ScaleChannel<Fixed>(left, fixed_volume);
right = ScaleChannel<Fixed>(right, fixed_volume);
buf_out[0] = static_cast<Format>(
AddChannel<Fixed, min_value, max_value>(left, center) + bias);
buf_out[1] = static_cast<Format>(
AddChannel<Fixed, min_value, max_value>(right, center) + bias);
buf_out += 2;
buf_in += channels;
}
}
| C | Chrome | 0 |
CVE-2012-2896 | https://www.cvedetails.com/cve/CVE-2012-2896/ | CWE-189 | https://github.com/chromium/chromium/commit/3aad1a37affb1ab70d1897f2b03eb8c077264984 | 3aad1a37affb1ab70d1897f2b03eb8c077264984 | Fix SafeAdd and SafeMultiply
BUG=145648,145544
Review URL: https://chromiumcodereview.appspot.com/10916165
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155478 0039d316-1c4b-4281-b951-d872f2087c98 | bool Texture::AllocateStorage(const gfx::Size& size, GLenum format) {
DCHECK_NE(id_, 0u);
ScopedGLErrorSuppressor suppressor(decoder_);
ScopedTexture2DBinder binder(decoder_, id_);
WrappedTexImage2D(GL_TEXTURE_2D,
0, // mip level
format,
size.width(),
size.height(),
0, // border
format,
GL_UNSIGNED_BYTE,
NULL);
size_ = size;
bool success = glGetError() == GL_NO_ERROR;
if (success) {
uint32 image_size = 0;
GLES2Util::ComputeImageDataSizes(
size.width(), size.height(), format, GL_UNSIGNED_BYTE, 4, &image_size,
NULL, NULL);
memory_tracker_.UpdateMemRepresented(image_size);
TRACE_BACKBUFFER_MEMORY_TOTAL(decoder_);
}
return success;
}
| bool Texture::AllocateStorage(const gfx::Size& size, GLenum format) {
DCHECK_NE(id_, 0u);
ScopedGLErrorSuppressor suppressor(decoder_);
ScopedTexture2DBinder binder(decoder_, id_);
WrappedTexImage2D(GL_TEXTURE_2D,
0, // mip level
format,
size.width(),
size.height(),
0, // border
format,
GL_UNSIGNED_BYTE,
NULL);
size_ = size;
bool success = glGetError() == GL_NO_ERROR;
if (success) {
uint32 image_size = 0;
GLES2Util::ComputeImageDataSizes(
size.width(), size.height(), format, GL_UNSIGNED_BYTE, 4, &image_size,
NULL, NULL);
memory_tracker_.UpdateMemRepresented(image_size);
TRACE_BACKBUFFER_MEMORY_TOTAL(decoder_);
}
return success;
}
| C | Chrome | 0 |
CVE-2017-15423 | https://www.cvedetails.com/cve/CVE-2017-15423/ | CWE-310 | https://github.com/chromium/chromium/commit/a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2 | a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2 | Roll src/third_party/boringssl/src 664e99a64..696c13bd6
https://boringssl.googlesource.com/boringssl/+log/664e99a6486c293728097c661332f92bf2d847c6..696c13bd6ab78011adfe7b775519c8b7cc82b604
BUG=778101
Change-Id: I8dda4f3db952597148e3c7937319584698d00e1c
Reviewed-on: https://chromium-review.googlesource.com/747941
Reviewed-by: Avi Drissman <avi@chromium.org>
Reviewed-by: David Benjamin <davidben@chromium.org>
Commit-Queue: Steven Valdez <svaldez@chromium.org>
Cr-Commit-Position: refs/heads/master@{#513774} | void RenderThreadImpl::OnRendererHidden() {
blink::MainThreadIsolate()->IsolateInBackgroundNotification();
if (!GetContentClient()->renderer()->RunIdleHandlerWhenWidgetsHidden())
return;
renderer_scheduler_->SetRendererHidden(true);
ScheduleIdleHandler(kInitialIdleHandlerDelayMs);
}
| void RenderThreadImpl::OnRendererHidden() {
blink::MainThreadIsolate()->IsolateInBackgroundNotification();
if (!GetContentClient()->renderer()->RunIdleHandlerWhenWidgetsHidden())
return;
renderer_scheduler_->SetRendererHidden(true);
ScheduleIdleHandler(kInitialIdleHandlerDelayMs);
}
| C | Chrome | 0 |
CVE-2018-12232 | https://www.cvedetails.com/cve/CVE-2018-12232/ | CWE-362 | https://github.com/torvalds/linux/commit/6d8c50dcb029872b298eea68cc6209c866fd3e14 | 6d8c50dcb029872b298eea68cc6209c866fd3e14 | socket: close race condition between sock_close() and sockfs_setattr()
fchownat() doesn't even hold refcnt of fd until it figures out
fd is really needed (otherwise is ignored) and releases it after
it resolves the path. This means sock_close() could race with
sockfs_setattr(), which leads to a NULL pointer dereference
since typically we set sock->sk to NULL in ->release().
As pointed out by Al, this is unique to sockfs. So we can fix this
in socket layer by acquiring inode_lock in sock_close() and
checking against NULL in sockfs_setattr().
sock_release() is called in many places, only the sock_close()
path matters here. And fortunately, this should not affect normal
sock_close() as it is only called when the last fd refcnt is gone.
It only affects sock_close() with a parallel sockfs_setattr() in
progress, which is not common.
Fixes: 86741ec25462 ("net: core: Add a UID field to struct sock.")
Reported-by: shankarapailoor <shankarapailoor@gmail.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Lorenzo Colitti <lorenzo@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct socket *sock;
struct sock *sk;
void __user *argp = (void __user *)arg;
int pid, err;
struct net *net;
sock = file->private_data;
sk = sock->sk;
net = sock_net(sk);
if (unlikely(cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))) {
struct ifreq ifr;
bool need_copyout;
if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
if (!err && need_copyout)
if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
return -EFAULT;
} else
#ifdef CONFIG_WEXT_CORE
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
err = wext_handle_ioctl(net, cmd, argp);
} else
#endif
switch (cmd) {
case FIOSETOWN:
case SIOCSPGRP:
err = -EFAULT;
if (get_user(pid, (int __user *)argp))
break;
err = f_setown(sock->file, pid, 1);
break;
case FIOGETOWN:
case SIOCGPGRP:
err = put_user(f_getown(sock->file),
(int __user *)argp);
break;
case SIOCGIFBR:
case SIOCSIFBR:
case SIOCBRADDBR:
case SIOCBRDELBR:
err = -ENOPKG;
if (!br_ioctl_hook)
request_module("bridge");
mutex_lock(&br_ioctl_mutex);
if (br_ioctl_hook)
err = br_ioctl_hook(net, cmd, argp);
mutex_unlock(&br_ioctl_mutex);
break;
case SIOCGIFVLAN:
case SIOCSIFVLAN:
err = -ENOPKG;
if (!vlan_ioctl_hook)
request_module("8021q");
mutex_lock(&vlan_ioctl_mutex);
if (vlan_ioctl_hook)
err = vlan_ioctl_hook(net, argp);
mutex_unlock(&vlan_ioctl_mutex);
break;
case SIOCADDDLCI:
case SIOCDELDLCI:
err = -ENOPKG;
if (!dlci_ioctl_hook)
request_module("dlci");
mutex_lock(&dlci_ioctl_mutex);
if (dlci_ioctl_hook)
err = dlci_ioctl_hook(cmd, argp);
mutex_unlock(&dlci_ioctl_mutex);
break;
case SIOCGSKNS:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = open_related_ns(&net->ns, get_net_ns);
break;
default:
err = sock_do_ioctl(net, sock, cmd, arg);
break;
}
return err;
}
| static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct socket *sock;
struct sock *sk;
void __user *argp = (void __user *)arg;
int pid, err;
struct net *net;
sock = file->private_data;
sk = sock->sk;
net = sock_net(sk);
if (unlikely(cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15))) {
struct ifreq ifr;
bool need_copyout;
if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
if (!err && need_copyout)
if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
return -EFAULT;
} else
#ifdef CONFIG_WEXT_CORE
if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
err = wext_handle_ioctl(net, cmd, argp);
} else
#endif
switch (cmd) {
case FIOSETOWN:
case SIOCSPGRP:
err = -EFAULT;
if (get_user(pid, (int __user *)argp))
break;
err = f_setown(sock->file, pid, 1);
break;
case FIOGETOWN:
case SIOCGPGRP:
err = put_user(f_getown(sock->file),
(int __user *)argp);
break;
case SIOCGIFBR:
case SIOCSIFBR:
case SIOCBRADDBR:
case SIOCBRDELBR:
err = -ENOPKG;
if (!br_ioctl_hook)
request_module("bridge");
mutex_lock(&br_ioctl_mutex);
if (br_ioctl_hook)
err = br_ioctl_hook(net, cmd, argp);
mutex_unlock(&br_ioctl_mutex);
break;
case SIOCGIFVLAN:
case SIOCSIFVLAN:
err = -ENOPKG;
if (!vlan_ioctl_hook)
request_module("8021q");
mutex_lock(&vlan_ioctl_mutex);
if (vlan_ioctl_hook)
err = vlan_ioctl_hook(net, argp);
mutex_unlock(&vlan_ioctl_mutex);
break;
case SIOCADDDLCI:
case SIOCDELDLCI:
err = -ENOPKG;
if (!dlci_ioctl_hook)
request_module("dlci");
mutex_lock(&dlci_ioctl_mutex);
if (dlci_ioctl_hook)
err = dlci_ioctl_hook(cmd, argp);
mutex_unlock(&dlci_ioctl_mutex);
break;
case SIOCGSKNS:
err = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = open_related_ns(&net->ns, get_net_ns);
break;
default:
err = sock_do_ioctl(net, sock, cmd, arg);
break;
}
return err;
}
| C | linux | 0 |
CVE-2013-2871 | https://www.cvedetails.com/cve/CVE-2013-2871/ | CWE-20 | https://github.com/chromium/chromium/commit/bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9 | bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9 | Setting input.x-webkit-speech should not cause focus change
In r150866, we introduced element()->focus() in destroyShadowSubtree()
to retain focus on <input> when its type attribute gets changed.
But when x-webkit-speech attribute is changed, the element is detached
before calling destroyShadowSubtree() and element()->focus() failed
This patch moves detach() after destroyShadowSubtree() to fix the
problem.
BUG=243818
TEST=fast/forms/input-type-change-focusout.html
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/16084005
git-svn-id: svn://svn.chromium.org/blink/trunk@151444 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | String HTMLInputElement::defaultToolTip() const
{
return m_inputType->defaultToolTip();
}
| String HTMLInputElement::defaultToolTip() const
{
return m_inputType->defaultToolTip();
}
| C | Chrome | 0 |
CVE-2017-17052 | https://www.cvedetails.com/cve/CVE-2017-17052/ | CWE-416 | https://github.com/torvalds/linux/commit/2b7e8665b4ff51c034c55df3cff76518d1a9ee3a | 2b7e8665b4ff51c034c55df3cff76518d1a9ee3a | fork: fix incorrect fput of ->exe_file causing use-after-free
Commit 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for
write killable") made it possible to kill a forking task while it is
waiting to acquire its ->mmap_sem for write, in dup_mmap().
However, it was overlooked that this introduced an new error path before
a reference is taken on the mm_struct's ->exe_file. Since the
->exe_file of the new mm_struct was already set to the old ->exe_file by
the memcpy() in dup_mm(), it was possible for the mmput() in the error
path of dup_mm() to drop a reference to ->exe_file which was never
taken.
This caused the struct file to later be freed prematurely.
Fix it by updating mm_init() to NULL out the ->exe_file, in the same
place it clears other things like the list of mmaps.
This bug was found by syzkaller. It can be reproduced using the
following C program:
#define _GNU_SOURCE
#include <pthread.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/wait.h>
#include <unistd.h>
static void *mmap_thread(void *_arg)
{
for (;;) {
mmap(NULL, 0x1000000, PROT_READ,
MAP_POPULATE|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
}
}
static void *fork_thread(void *_arg)
{
usleep(rand() % 10000);
fork();
}
int main(void)
{
fork();
fork();
fork();
for (;;) {
if (fork() == 0) {
pthread_t t;
pthread_create(&t, NULL, mmap_thread, NULL);
pthread_create(&t, NULL, fork_thread, NULL);
usleep(rand() % 10000);
syscall(__NR_exit_group, 0);
}
wait(NULL);
}
}
No special kernel config options are needed. It usually causes a NULL
pointer dereference in __remove_shared_vm_struct() during exit, or in
dup_mmap() (which is usually inlined into copy_process()) during fork.
Both are due to a vm_area_struct's ->vm_file being used after it's
already been freed.
Google Bug Id: 64772007
Link: http://lkml.kernel.org/r/20170823211408.31198-1-ebiggers3@gmail.com
Fixes: 7c051267931a ("mm, fork: make dup_mmap wait for mmap_sem for write killable")
Signed-off-by: Eric Biggers <ebiggers@google.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: <stable@vger.kernel.org> [v4.7+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | int sysctl_max_threads(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int ret;
int threads = max_threads;
int min = MIN_THREADS;
int max = MAX_THREADS;
t = *table;
t.data = &threads;
t.extra1 = &min;
t.extra2 = &max;
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
set_max_threads(threads);
return 0;
}
| int sysctl_max_threads(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int ret;
int threads = max_threads;
int min = MIN_THREADS;
int max = MAX_THREADS;
t = *table;
t.data = &threads;
t.extra1 = &min;
t.extra2 = &max;
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
set_max_threads(threads);
return 0;
}
| C | linux | 0 |
CVE-2011-1799 | https://www.cvedetails.com/cve/CVE-2011-1799/ | CWE-20 | https://github.com/chromium/chromium/commit/5fd35e5359c6345b8709695cd71fba307318e6aa | 5fd35e5359c6345b8709695cd71fba307318e6aa | Source/WebCore: Fix for bug 64046 - Wrong image height in absolutely positioned div in
relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <kulanthaivel@codeaurora.org> on 2011-07-21
Reviewed by David Hyatt.
Test: fast/css/absolute-child-with-percent-height-inside-relative-parent.html
* rendering/RenderBox.cpp:
(WebCore::RenderBox::availableLogicalHeightUsing):
LayoutTests: Test to cover absolutely positioned child with percentage height
in relatively positioned parent with bottom padding.
https://bugs.webkit.org/show_bug.cgi?id=64046
Patch by Kulanthaivel Palanichamy <kulanthaivel@codeaurora.org> on 2011-07-21
Reviewed by David Hyatt.
* fast/css/absolute-child-with-percent-height-inside-relative-parent-expected.txt: Added.
* fast/css/absolute-child-with-percent-height-inside-relative-parent.html: Added.
git-svn-id: svn://svn.chromium.org/blink/trunk@91533 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | LayoutPoint RenderBox::flipForWritingMode(const RenderBox* child, const LayoutPoint& point, FlippingAdjustment adjustment) const
{
if (!style()->isFlippedBlocksWritingMode())
return point;
if (isHorizontalWritingMode())
return LayoutPoint(point.x(), point.y() + height() - child->height() - child->y() - (adjustment == ParentToChildFlippingAdjustment ? child->y() : 0));
return LayoutPoint(point.x() + width() - child->width() - child->x() - (adjustment == ParentToChildFlippingAdjustment ? child->x() : 0), point.y());
}
| LayoutPoint RenderBox::flipForWritingMode(const RenderBox* child, const LayoutPoint& point, FlippingAdjustment adjustment) const
{
if (!style()->isFlippedBlocksWritingMode())
return point;
if (isHorizontalWritingMode())
return LayoutPoint(point.x(), point.y() + height() - child->height() - child->y() - (adjustment == ParentToChildFlippingAdjustment ? child->y() : 0));
return LayoutPoint(point.x() + width() - child->width() - child->x() - (adjustment == ParentToChildFlippingAdjustment ? child->x() : 0), point.y());
}
| C | Chrome | 0 |
CVE-2018-14358 | https://www.cvedetails.com/cve/CVE-2018-14358/ | CWE-119 | https://github.com/neomutt/neomutt/commit/1b0f0d0988e6df4e32e9f4bf8780846ea95d4485 | 1b0f0d0988e6df4e32e9f4bf8780846ea95d4485 | Don't overflow stack buffer in msg_parse_fetch | static void generate_seqset(struct Buffer *b, struct ImapData *idata,
unsigned int msn_begin, unsigned int msn_end)
{
int chunks = 0;
int state = 0; /* 1: single msn, 2: range of msn */
unsigned int msn, range_begin, range_end;
for (msn = msn_begin; msn <= msn_end + 1; msn++)
{
if (msn <= msn_end && !idata->msn_index[msn - 1])
{
switch (state)
{
case 1: /* single: convert to a range */
state = 2;
/* fallthrough */
case 2: /* extend range ending */
range_end = msn;
break;
default:
state = 1;
range_begin = msn;
break;
}
}
else if (state)
{
if (chunks++)
mutt_buffer_addch(b, ',');
if (chunks == 150)
break;
if (state == 1)
mutt_buffer_printf(b, "%u", range_begin);
else if (state == 2)
mutt_buffer_printf(b, "%u:%u", range_begin, range_end);
state = 0;
}
}
/* Too big. Just query the whole range then. */
if (chunks == 150 || mutt_str_strlen(b->data) > 500)
{
b->dptr = b->data;
mutt_buffer_printf(b, "%u:%u", msn_begin, msn_end);
}
}
| static void generate_seqset(struct Buffer *b, struct ImapData *idata,
unsigned int msn_begin, unsigned int msn_end)
{
int chunks = 0;
int state = 0; /* 1: single msn, 2: range of msn */
unsigned int msn, range_begin, range_end;
for (msn = msn_begin; msn <= msn_end + 1; msn++)
{
if (msn <= msn_end && !idata->msn_index[msn - 1])
{
switch (state)
{
case 1: /* single: convert to a range */
state = 2;
/* fallthrough */
case 2: /* extend range ending */
range_end = msn;
break;
default:
state = 1;
range_begin = msn;
break;
}
}
else if (state)
{
if (chunks++)
mutt_buffer_addch(b, ',');
if (chunks == 150)
break;
if (state == 1)
mutt_buffer_printf(b, "%u", range_begin);
else if (state == 2)
mutt_buffer_printf(b, "%u:%u", range_begin, range_end);
state = 0;
}
}
/* Too big. Just query the whole range then. */
if (chunks == 150 || mutt_str_strlen(b->data) > 500)
{
b->dptr = b->data;
mutt_buffer_printf(b, "%u:%u", msn_begin, msn_end);
}
}
| C | neomutt | 0 |
CVE-2016-8654 | https://www.cvedetails.com/cve/CVE-2016-8654/ | CWE-119 | https://github.com/mdadams/jasper/commit/4a59cfaf9ab3d48fca4a15c0d2674bf7138e3d1a | 4a59cfaf9ab3d48fca4a15c0d2674bf7138e3d1a | Fixed a buffer overrun problem in the QMFB code in the JPC codec
that was caused by a buffer being allocated with a size that was too small
in some cases.
Added a new regression test case. | void jpc_qmfb_join_colres(jpc_fix_t *a, int numrows, int numcols,
int stride, int parity)
{
int bufsize = JPC_CEILDIVPOW2(numrows, 1);
jpc_fix_t joinbuf[QMFB_JOINBUFSIZE * JPC_QMFB_COLGRPSIZE];
jpc_fix_t *buf = joinbuf;
jpc_fix_t *srcptr;
jpc_fix_t *dstptr;
register jpc_fix_t *srcptr2;
register jpc_fix_t *dstptr2;
register int n;
register int i;
int hstartcol;
/* Allocate memory for the join buffer from the heap. */
if (bufsize > QMFB_JOINBUFSIZE) {
if (!(buf = jas_alloc3(bufsize, numcols, sizeof(jpc_fix_t)))) {
/* We have no choice but to commit suicide. */
abort();
}
}
hstartcol = (numrows + 1 - parity) >> 1;
/* Save the samples from the lowpass channel. */
n = hstartcol;
srcptr = &a[0];
dstptr = buf;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
srcptr += stride;
dstptr += numcols;
}
/* Copy the samples from the highpass channel into place. */
srcptr = &a[hstartcol * stride];
dstptr = &a[(1 - parity) * stride];
n = numrows - hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += stride;
}
/* Copy the samples from the lowpass channel into place. */
srcptr = buf;
dstptr = &a[parity * stride];
n = hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += numcols;
}
/* If the join buffer was allocated on the heap, free this memory. */
if (buf != joinbuf) {
jas_free(buf);
}
}
| void jpc_qmfb_join_colres(jpc_fix_t *a, int numrows, int numcols,
int stride, int parity)
{
int bufsize = JPC_CEILDIVPOW2(numrows, 1);
jpc_fix_t joinbuf[QMFB_JOINBUFSIZE * JPC_QMFB_COLGRPSIZE];
jpc_fix_t *buf = joinbuf;
jpc_fix_t *srcptr;
jpc_fix_t *dstptr;
register jpc_fix_t *srcptr2;
register jpc_fix_t *dstptr2;
register int n;
register int i;
int hstartcol;
/* Allocate memory for the join buffer from the heap. */
if (bufsize > QMFB_JOINBUFSIZE) {
if (!(buf = jas_alloc3(bufsize, numcols, sizeof(jpc_fix_t)))) {
/* We have no choice but to commit suicide. */
abort();
}
}
hstartcol = (numrows + 1 - parity) >> 1;
/* Save the samples from the lowpass channel. */
n = hstartcol;
srcptr = &a[0];
dstptr = buf;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
srcptr += stride;
dstptr += numcols;
}
/* Copy the samples from the highpass channel into place. */
srcptr = &a[hstartcol * stride];
dstptr = &a[(1 - parity) * stride];
n = numrows - hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += stride;
}
/* Copy the samples from the lowpass channel into place. */
srcptr = buf;
dstptr = &a[parity * stride];
n = hstartcol;
while (n-- > 0) {
dstptr2 = dstptr;
srcptr2 = srcptr;
for (i = 0; i < numcols; ++i) {
*dstptr2 = *srcptr2;
++dstptr2;
++srcptr2;
}
dstptr += 2 * stride;
srcptr += numcols;
}
/* If the join buffer was allocated on the heap, free this memory. */
if (buf != joinbuf) {
jas_free(buf);
}
}
| C | jasper | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.