CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2013-4542 | https://www.cvedetails.com/cve/CVE-2013-4542/ | CWE-119 | https://git.qemu.org/?p=qemu.git;a=commitdiff;h=3c3ce981423e0d6c18af82ee62f1850c2cda5976 | 3c3ce981423e0d6c18af82ee62f1850c2cda5976 | null | static void virtio_scsi_bad_req(void)
{
error_report("wrong size for virtio-scsi headers");
exit(1);
}
| static void virtio_scsi_bad_req(void)
{
error_report("wrong size for virtio-scsi headers");
exit(1);
}
| C | qemu | 0 |
null | null | null | https://github.com/chromium/chromium/commit/5c9d37f8055700c36b4c9006b0d4d81f4f961a06 | 5c9d37f8055700c36b4c9006b0d4d81f4f961a06 | 2010-07-26 Tony Gentilcore <tonyg@chromium.org>
Reviewed by Darin Fisher.
Move DocumentLoadTiming struct to a new file
https://bugs.webkit.org/show_bug.cgi?id=42917
Also makes DocumentLoadTiming Noncopyable.
No new tests because no new functionality.
* GNUmakefile.am:
* WebCore.gypi:
* WebCore.vcproj/WebCore.vcproj:
* WebCore.xcodeproj/project.pbxproj:
* loader/DocumentLoadTiming.h: Added.
(WebCore::DocumentLoadTiming::DocumentLoadTiming):
* loader/DocumentLoader.h:
* loader/FrameLoader.cpp:
* loader/FrameLoaderTypes.h:
* loader/MainResourceLoader.cpp:
* page/Timing.cpp:
git-svn-id: svn://svn.chromium.org/blink/trunk@64051 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void FrameLoader::loadWithDocumentLoader(DocumentLoader* loader, FrameLoadType type, PassRefPtr<FormState> prpFormState)
{
ASSERT(m_client->hasWebView());
ASSERT(m_frame->view());
if (m_pageDismissalEventBeingDispatched)
return;
policyChecker()->setLoadType(type);
RefPtr<FormState> formState = prpFormState;
bool isFormSubmission = formState;
const KURL& newURL = loader->request().url();
if (shouldScrollToAnchor(isFormSubmission, policyChecker()->loadType(), newURL)) {
RefPtr<DocumentLoader> oldDocumentLoader = m_documentLoader;
NavigationAction action(newURL, policyChecker()->loadType(), isFormSubmission);
oldDocumentLoader->setTriggeringAction(action);
policyChecker()->stopCheck();
policyChecker()->checkNavigationPolicy(loader->request(), oldDocumentLoader.get(), formState,
callContinueFragmentScrollAfterNavigationPolicy, this);
} else {
if (Frame* parent = m_frame->tree()->parent())
loader->setOverrideEncoding(parent->loader()->documentLoader()->overrideEncoding());
policyChecker()->stopCheck();
setPolicyDocumentLoader(loader);
if (loader->triggeringAction().isEmpty())
loader->setTriggeringAction(NavigationAction(newURL, policyChecker()->loadType(), isFormSubmission));
if (Element* ownerElement = m_frame->document()->ownerElement()) {
if (!ownerElement->dispatchBeforeLoadEvent(loader->request().url().string())) {
continueLoadAfterNavigationPolicy(loader->request(), formState, false);
return;
}
}
policyChecker()->checkNavigationPolicy(loader->request(), loader, formState,
callContinueLoadAfterNavigationPolicy, this);
}
}
| void FrameLoader::loadWithDocumentLoader(DocumentLoader* loader, FrameLoadType type, PassRefPtr<FormState> prpFormState)
{
ASSERT(m_client->hasWebView());
ASSERT(m_frame->view());
if (m_pageDismissalEventBeingDispatched)
return;
policyChecker()->setLoadType(type);
RefPtr<FormState> formState = prpFormState;
bool isFormSubmission = formState;
const KURL& newURL = loader->request().url();
if (shouldScrollToAnchor(isFormSubmission, policyChecker()->loadType(), newURL)) {
RefPtr<DocumentLoader> oldDocumentLoader = m_documentLoader;
NavigationAction action(newURL, policyChecker()->loadType(), isFormSubmission);
oldDocumentLoader->setTriggeringAction(action);
policyChecker()->stopCheck();
policyChecker()->checkNavigationPolicy(loader->request(), oldDocumentLoader.get(), formState,
callContinueFragmentScrollAfterNavigationPolicy, this);
} else {
if (Frame* parent = m_frame->tree()->parent())
loader->setOverrideEncoding(parent->loader()->documentLoader()->overrideEncoding());
policyChecker()->stopCheck();
setPolicyDocumentLoader(loader);
if (loader->triggeringAction().isEmpty())
loader->setTriggeringAction(NavigationAction(newURL, policyChecker()->loadType(), isFormSubmission));
if (Element* ownerElement = m_frame->document()->ownerElement()) {
if (!ownerElement->dispatchBeforeLoadEvent(loader->request().url().string())) {
continueLoadAfterNavigationPolicy(loader->request(), formState, false);
return;
}
}
policyChecker()->checkNavigationPolicy(loader->request(), loader, formState,
callContinueLoadAfterNavigationPolicy, this);
}
}
| C | Chrome | 0 |
CVE-2015-1352 | https://www.cvedetails.com/cve/CVE-2015-1352/ | null | https://git.php.net/?p=php-src.git;a=commit;h=124fb22a13fafa3648e4e15b4f207c7096d8155e | 124fb22a13fafa3648e4e15b4f207c7096d8155e | null | PHP_FUNCTION(pg_port)
{
php_pgsql_get_link_info(INTERNAL_FUNCTION_PARAM_PASSTHRU,PHP_PG_PORT);
}
| PHP_FUNCTION(pg_port)
{
php_pgsql_get_link_info(INTERNAL_FUNCTION_PARAM_PASSTHRU,PHP_PG_PORT);
}
| C | php | 0 |
CVE-2019-15296 | https://www.cvedetails.com/cve/CVE-2019-15296/ | CWE-119 | https://github.com/knik0/faad2/commit/942c3e0aee748ea6fe97cb2c1aa5893225316174 | 942c3e0aee748ea6fe97cb2c1aa5893225316174 | Fix a couple buffer overflows
https://hackerone.com/reports/502816
https://hackerone.com/reports/507858
https://github.com/videolan/vlc/blob/master/contrib/src/faad2/faad2-fix-overflows.patch | int8_t GASpecificConfig(bitfile *ld, mp4AudioSpecificConfig *mp4ASC,
program_config *pce_out)
{
program_config pce;
/* 1024 or 960 */
mp4ASC->frameLengthFlag = faad_get1bit(ld
DEBUGVAR(1,138,"GASpecificConfig(): FrameLengthFlag"));
#ifndef ALLOW_SMALL_FRAMELENGTH
if (mp4ASC->frameLengthFlag == 1)
return -3;
#endif
mp4ASC->dependsOnCoreCoder = faad_get1bit(ld
DEBUGVAR(1,139,"GASpecificConfig(): DependsOnCoreCoder"));
if (mp4ASC->dependsOnCoreCoder == 1)
{
mp4ASC->coreCoderDelay = (uint16_t)faad_getbits(ld, 14
DEBUGVAR(1,140,"GASpecificConfig(): CoreCoderDelay"));
}
mp4ASC->extensionFlag = faad_get1bit(ld DEBUGVAR(1,141,"GASpecificConfig(): ExtensionFlag"));
if (mp4ASC->channelsConfiguration == 0)
{
if (program_config_element(&pce, ld))
return -3;
if (pce_out != NULL)
memcpy(pce_out, &pce, sizeof(program_config));
/*
if (pce.num_valid_cc_elements)
return -3;
*/
}
#ifdef ERROR_RESILIENCE
if (mp4ASC->extensionFlag == 1)
{
/* Error resilience not supported yet */
if (mp4ASC->objectTypeIndex >= ER_OBJECT_START)
{
mp4ASC->aacSectionDataResilienceFlag = faad_get1bit(ld
DEBUGVAR(1,144,"GASpecificConfig(): aacSectionDataResilienceFlag"));
mp4ASC->aacScalefactorDataResilienceFlag = faad_get1bit(ld
DEBUGVAR(1,145,"GASpecificConfig(): aacScalefactorDataResilienceFlag"));
mp4ASC->aacSpectralDataResilienceFlag = faad_get1bit(ld
DEBUGVAR(1,146,"GASpecificConfig(): aacSpectralDataResilienceFlag"));
}
/* 1 bit: extensionFlag3 */
faad_getbits(ld, 1);
}
#endif
return 0;
}
| int8_t GASpecificConfig(bitfile *ld, mp4AudioSpecificConfig *mp4ASC,
program_config *pce_out)
{
program_config pce;
/* 1024 or 960 */
mp4ASC->frameLengthFlag = faad_get1bit(ld
DEBUGVAR(1,138,"GASpecificConfig(): FrameLengthFlag"));
#ifndef ALLOW_SMALL_FRAMELENGTH
if (mp4ASC->frameLengthFlag == 1)
return -3;
#endif
mp4ASC->dependsOnCoreCoder = faad_get1bit(ld
DEBUGVAR(1,139,"GASpecificConfig(): DependsOnCoreCoder"));
if (mp4ASC->dependsOnCoreCoder == 1)
{
mp4ASC->coreCoderDelay = (uint16_t)faad_getbits(ld, 14
DEBUGVAR(1,140,"GASpecificConfig(): CoreCoderDelay"));
}
mp4ASC->extensionFlag = faad_get1bit(ld DEBUGVAR(1,141,"GASpecificConfig(): ExtensionFlag"));
if (mp4ASC->channelsConfiguration == 0)
{
if (program_config_element(&pce, ld))
return -3;
if (pce_out != NULL)
memcpy(pce_out, &pce, sizeof(program_config));
/*
if (pce.num_valid_cc_elements)
return -3;
*/
}
#ifdef ERROR_RESILIENCE
if (mp4ASC->extensionFlag == 1)
{
/* Error resilience not supported yet */
if (mp4ASC->objectTypeIndex >= ER_OBJECT_START)
{
mp4ASC->aacSectionDataResilienceFlag = faad_get1bit(ld
DEBUGVAR(1,144,"GASpecificConfig(): aacSectionDataResilienceFlag"));
mp4ASC->aacScalefactorDataResilienceFlag = faad_get1bit(ld
DEBUGVAR(1,145,"GASpecificConfig(): aacScalefactorDataResilienceFlag"));
mp4ASC->aacSpectralDataResilienceFlag = faad_get1bit(ld
DEBUGVAR(1,146,"GASpecificConfig(): aacSpectralDataResilienceFlag"));
}
/* 1 bit: extensionFlag3 */
faad_getbits(ld, 1);
}
#endif
return 0;
}
| C | faad2 | 0 |
CVE-2015-8617 | https://www.cvedetails.com/cve/CVE-2015-8617/ | CWE-134 | https://github.com/php/php-src/commit/b101a6bbd4f2181c360bd38e7683df4a03cba83e | b101a6bbd4f2181c360bd38e7683df4a03cba83e | Use format string | ZEND_API void zend_timeout(int dummy) /* {{{ */
{
if (zend_on_timeout) {
#ifdef ZEND_SIGNALS
/*
We got here because we got a timeout signal, so we are in a signal handler
at this point. However, we want to be able to timeout any user-supplied
shutdown functions, so pretend we are not in a signal handler while we are
calling these
*/
SIGG(running) = 0;
#endif
zend_on_timeout(EG(timeout_seconds));
}
zend_error_noreturn(E_ERROR, "Maximum execution time of %pd second%s exceeded", EG(timeout_seconds), EG(timeout_seconds) == 1 ? "" : "s");
}
/* }}} */
| ZEND_API void zend_timeout(int dummy) /* {{{ */
{
if (zend_on_timeout) {
#ifdef ZEND_SIGNALS
/*
We got here because we got a timeout signal, so we are in a signal handler
at this point. However, we want to be able to timeout any user-supplied
shutdown functions, so pretend we are not in a signal handler while we are
calling these
*/
SIGG(running) = 0;
#endif
zend_on_timeout(EG(timeout_seconds));
}
zend_error_noreturn(E_ERROR, "Maximum execution time of %pd second%s exceeded", EG(timeout_seconds), EG(timeout_seconds) == 1 ? "" : "s");
}
/* }}} */
| C | php-src | 0 |
CVE-2016-9137 | https://www.cvedetails.com/cve/CVE-2016-9137/ | CWE-416 | https://git.php.net/?p=php-src.git;a=commit;h=0e6fe3a4c96be2d3e88389a5776f878021b4c59f | 0e6fe3a4c96be2d3e88389a5776f878021b4c59f | null | ZEND_API int zend_update_static_property(zend_class_entry *scope, const char *name, int name_length, zval *value TSRMLS_DC) /* {{{ */
{
zval **property;
zend_class_entry *old_scope = EG(scope);
EG(scope) = scope;
property = zend_std_get_static_property(scope, name, name_length, 0, NULL TSRMLS_CC);
EG(scope) = old_scope;
if (!property) {
return FAILURE;
} else {
if (*property != value) {
if (PZVAL_IS_REF(*property)) {
zval_dtor(*property);
Z_TYPE_PP(property) = Z_TYPE_P(value);
(*property)->value = value->value;
if (Z_REFCOUNT_P(value) > 0) {
zval_copy_ctor(*property);
} else {
efree(value);
}
} else {
zval *garbage = *property;
Z_ADDREF_P(value);
if (PZVAL_IS_REF(value)) {
SEPARATE_ZVAL(&value);
}
*property = value;
zval_ptr_dtor(&garbage);
}
}
return SUCCESS;
}
}
/* }}} */
| ZEND_API int zend_update_static_property(zend_class_entry *scope, const char *name, int name_length, zval *value TSRMLS_DC) /* {{{ */
{
zval **property;
zend_class_entry *old_scope = EG(scope);
EG(scope) = scope;
property = zend_std_get_static_property(scope, name, name_length, 0, NULL TSRMLS_CC);
EG(scope) = old_scope;
if (!property) {
return FAILURE;
} else {
if (*property != value) {
if (PZVAL_IS_REF(*property)) {
zval_dtor(*property);
Z_TYPE_PP(property) = Z_TYPE_P(value);
(*property)->value = value->value;
if (Z_REFCOUNT_P(value) > 0) {
zval_copy_ctor(*property);
} else {
efree(value);
}
} else {
zval *garbage = *property;
Z_ADDREF_P(value);
if (PZVAL_IS_REF(value)) {
SEPARATE_ZVAL(&value);
}
*property = value;
zval_ptr_dtor(&garbage);
}
}
return SUCCESS;
}
}
/* }}} */
| C | php | 0 |
CVE-2016-4071 | https://www.cvedetails.com/cve/CVE-2016-4071/ | CWE-20 | https://git.php.net/?p=php-src.git;a=commit;h=6e25966544fb1d2f3d7596e060ce9c9269bbdcf8 | 6e25966544fb1d2f3d7596e060ce9c9269bbdcf8 | null | static int netsnmp_session_gen_sec_key(struct snmp_session *s, char *pass)
{
int snmp_errno;
s->securityPrivKeyLen = USM_PRIV_KU_LEN;
if ((snmp_errno = generate_Ku(s->securityAuthProto, s->securityAuthProtoLen,
(u_char *)pass, strlen(pass),
s->securityPrivKey, &(s->securityPrivKeyLen)))) {
php_error_docref(NULL, E_WARNING, "Error generating a key for privacy pass phrase '%s': %s", pass, snmp_api_errstring(snmp_errno));
return (-2);
}
return (0);
}
| static int netsnmp_session_gen_sec_key(struct snmp_session *s, char *pass)
{
int snmp_errno;
s->securityPrivKeyLen = USM_PRIV_KU_LEN;
if ((snmp_errno = generate_Ku(s->securityAuthProto, s->securityAuthProtoLen,
(u_char *)pass, strlen(pass),
s->securityPrivKey, &(s->securityPrivKeyLen)))) {
php_error_docref(NULL, E_WARNING, "Error generating a key for privacy pass phrase '%s': %s", pass, snmp_api_errstring(snmp_errno));
return (-2);
}
return (0);
}
| C | php | 0 |
null | null | null | https://github.com/chromium/chromium/commit/6c5d779aaf0dec9628da8a20751e95fd09554b14 | 6c5d779aaf0dec9628da8a20751e95fd09554b14 | Move the cancellation of blocked requests code from ResourceDispatcherHost::~ResourceDispatcherHost() to ResourceDispatcherHost::OnShutdown().
This causes the requests to be cancelled on the IO thread rather than the UI thread, which is important since cancellation may delete the URLRequest (and URLRequests should not outlive the IO thread).
BUG=39243
Review URL: http://codereview.chromium.org/1213004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42575 0039d316-1c4b-4281-b951-d872f2087c98 | void ResourceDispatcherHost::AddObserver(Observer* obs) {
observer_list_.AddObserver(obs);
}
| void ResourceDispatcherHost::AddObserver(Observer* obs) {
observer_list_.AddObserver(obs);
}
| C | Chrome | 0 |
CVE-2011-2830 | https://www.cvedetails.com/cve/CVE-2011-2830/ | CWE-399 | https://github.com/chromium/chromium/commit/08b630e66e042af3fe80015509b3238c2679ea40 | 08b630e66e042af3fe80015509b3238c2679ea40 | PopupMenuClient::multiple() should be const
https://bugs.webkit.org/show_bug.cgi?id=76771
Patch by Benjamin Poulain <bpoulain@apple.com> on 2012-01-21
Reviewed by Kent Tamura.
* platform/PopupMenuClient.h:
(WebCore::PopupMenuClient::multiple):
* rendering/RenderMenuList.cpp:
(WebCore::RenderMenuList::multiple):
* rendering/RenderMenuList.h:
git-svn-id: svn://svn.chromium.org/blink/trunk@105570 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | String RenderMenuList::itemToolTip(unsigned listIndex) const
{
const Vector<HTMLElement*>& listItems = toHTMLSelectElement(node())->listItems();
if (listIndex >= listItems.size())
return String();
return listItems[listIndex]->title();
}
| String RenderMenuList::itemToolTip(unsigned listIndex) const
{
const Vector<HTMLElement*>& listItems = toHTMLSelectElement(node())->listItems();
if (listIndex >= listItems.size())
return String();
return listItems[listIndex]->title();
}
| C | Chrome | 0 |
CVE-2018-15910 | https://www.cvedetails.com/cve/CVE-2018-15910/ | CWE-704 | http://git.ghostscript.com/?p=ghostpdl.git;a=commit;h=c3476dde7743761a4e1d39a631716199b696b880 | c3476dde7743761a4e1d39a631716199b696b880 | null | pdf_dsc_process(gx_device_pdf * pdev, const gs_param_string_array * pma)
{
/*
* The Adobe "Distiller Parameters" documentation says that Distiller
* looks at DSC comments, but it doesn't say which ones. We look at
* the ones that we see how to map directly to obvious PDF constructs.
*/
int code = 0;
uint i;
/*
* If ParseDSCComments is false, all DSC comments are ignored, even if
* ParseDSCComentsForDocInfo or PreserveEPSInfo is true.
*/
if (!pdev->ParseDSCComments)
return 0;
for (i = 0; i + 1 < pma->size && code >= 0; i += 2) {
const gs_param_string *pkey = &pma->data[i];
gs_param_string *pvalue = (gs_param_string *)&pma->data[i + 1];
const char *key;
int newsize;
/*
* %%For, %%Creator, and %%Title are recognized only if either
* ParseDSCCommentsForDocInfo or PreserveEPSInfo is true.
* The other DSC comments are always recognized.
*
* Acrobat Distiller sets CreationDate and ModDate to the current
* time, not the value of %%CreationDate. We think this is wrong,
* but we do the same -- we ignore %%CreationDate here.
*/
if (pdf_key_eq(pkey, "Creator") && pdev->CompatibilityLevel <= 1.7) {
key = "/Creator";
newsize = unescape_octals(pdev, (char *)pvalue->data, pvalue->size);
code = cos_dict_put_c_key_string(pdev->Info, key,
pvalue->data, newsize);
continue;
} else if (pdf_key_eq(pkey, "Title") && pdev->CompatibilityLevel <= 1.7) {
key = "/Title";
newsize = unescape_octals(pdev, (char *)pvalue->data, pvalue->size);
code = cos_dict_put_c_key_string(pdev->Info, key,
pvalue->data, newsize);
continue;
} else if (pdf_key_eq(pkey, "For") && pdev->CompatibilityLevel <= 1.7) {
key = "/Author";
newsize = unescape_octals(pdev, (char *)pvalue->data, pvalue->size);
code = cos_dict_put_c_key_string(pdev->Info, key,
pvalue->data, newsize);
continue;
} else {
pdf_page_dsc_info_t *ppdi;
char scan_buf[200]; /* arbitrary */
if ((ppdi = &pdev->doc_dsc_info,
pdf_key_eq(pkey, "Orientation")) ||
(ppdi = &pdev->page_dsc_info,
pdf_key_eq(pkey, "PageOrientation"))
) {
if (pvalue->size == 1 && pvalue->data[0] >= '0' &&
pvalue->data[0] <= '3'
)
ppdi->orientation = pvalue->data[0] - '0';
else
ppdi->orientation = -1;
} else if ((ppdi = &pdev->doc_dsc_info,
pdf_key_eq(pkey, "ViewingOrientation")) ||
(ppdi = &pdev->page_dsc_info,
pdf_key_eq(pkey, "PageViewingOrientation"))
) {
gs_matrix mat;
int orient;
if(pvalue->size >= sizeof(scan_buf) - 1)
continue; /* error */
memcpy(scan_buf, pvalue->data, pvalue->size);
scan_buf[pvalue->size] = 0;
if (sscanf(scan_buf, "[%g %g %g %g]",
&mat.xx, &mat.xy, &mat.yx, &mat.yy) != 4
)
continue; /* error */
for (orient = 0; orient < 4; ++orient) {
if (mat.xx == 1 && mat.xy == 0 && mat.yx == 0 && mat.yy == 1)
break;
gs_matrix_rotate(&mat, -90.0, &mat);
}
if (orient == 4) /* error */
orient = -1;
ppdi->viewing_orientation = orient;
} else {
gs_rect box;
if (pdf_key_eq(pkey, "EPSF")) {
pdev->is_EPS = (pvalue->size >= 1 && pvalue->data[0] != '0');
continue;
}
/*
* We only parse the BoundingBox for the sake of
* AutoPositionEPSFiles.
*/
if (pdf_key_eq(pkey, "BoundingBox"))
ppdi = &pdev->doc_dsc_info;
else if (pdf_key_eq(pkey, "PageBoundingBox"))
ppdi = &pdev->page_dsc_info;
else
continue;
if(pvalue->size >= sizeof(scan_buf) - 1)
continue; /* error */
memcpy(scan_buf, pvalue->data, pvalue->size);
scan_buf[pvalue->size] = 0;
if (sscanf(scan_buf, "[%lg %lg %lg %lg]",
&box.p.x, &box.p.y, &box.q.x, &box.q.y) != 4
)
continue; /* error */
ppdi->bounding_box = box;
}
continue;
}
}
return code;
}
| pdf_dsc_process(gx_device_pdf * pdev, const gs_param_string_array * pma)
{
/*
* The Adobe "Distiller Parameters" documentation says that Distiller
* looks at DSC comments, but it doesn't say which ones. We look at
* the ones that we see how to map directly to obvious PDF constructs.
*/
int code = 0;
uint i;
/*
* If ParseDSCComments is false, all DSC comments are ignored, even if
* ParseDSCComentsForDocInfo or PreserveEPSInfo is true.
*/
if (!pdev->ParseDSCComments)
return 0;
for (i = 0; i + 1 < pma->size && code >= 0; i += 2) {
const gs_param_string *pkey = &pma->data[i];
gs_param_string *pvalue = (gs_param_string *)&pma->data[i + 1];
const char *key;
int newsize;
/*
* %%For, %%Creator, and %%Title are recognized only if either
* ParseDSCCommentsForDocInfo or PreserveEPSInfo is true.
* The other DSC comments are always recognized.
*
* Acrobat Distiller sets CreationDate and ModDate to the current
* time, not the value of %%CreationDate. We think this is wrong,
* but we do the same -- we ignore %%CreationDate here.
*/
if (pdf_key_eq(pkey, "Creator") && pdev->CompatibilityLevel <= 1.7) {
key = "/Creator";
newsize = unescape_octals(pdev, (char *)pvalue->data, pvalue->size);
code = cos_dict_put_c_key_string(pdev->Info, key,
pvalue->data, newsize);
continue;
} else if (pdf_key_eq(pkey, "Title") && pdev->CompatibilityLevel <= 1.7) {
key = "/Title";
newsize = unescape_octals(pdev, (char *)pvalue->data, pvalue->size);
code = cos_dict_put_c_key_string(pdev->Info, key,
pvalue->data, newsize);
continue;
} else if (pdf_key_eq(pkey, "For") && pdev->CompatibilityLevel <= 1.7) {
key = "/Author";
newsize = unescape_octals(pdev, (char *)pvalue->data, pvalue->size);
code = cos_dict_put_c_key_string(pdev->Info, key,
pvalue->data, newsize);
continue;
} else {
pdf_page_dsc_info_t *ppdi;
char scan_buf[200]; /* arbitrary */
if ((ppdi = &pdev->doc_dsc_info,
pdf_key_eq(pkey, "Orientation")) ||
(ppdi = &pdev->page_dsc_info,
pdf_key_eq(pkey, "PageOrientation"))
) {
if (pvalue->size == 1 && pvalue->data[0] >= '0' &&
pvalue->data[0] <= '3'
)
ppdi->orientation = pvalue->data[0] - '0';
else
ppdi->orientation = -1;
} else if ((ppdi = &pdev->doc_dsc_info,
pdf_key_eq(pkey, "ViewingOrientation")) ||
(ppdi = &pdev->page_dsc_info,
pdf_key_eq(pkey, "PageViewingOrientation"))
) {
gs_matrix mat;
int orient;
if(pvalue->size >= sizeof(scan_buf) - 1)
continue; /* error */
memcpy(scan_buf, pvalue->data, pvalue->size);
scan_buf[pvalue->size] = 0;
if (sscanf(scan_buf, "[%g %g %g %g]",
&mat.xx, &mat.xy, &mat.yx, &mat.yy) != 4
)
continue; /* error */
for (orient = 0; orient < 4; ++orient) {
if (mat.xx == 1 && mat.xy == 0 && mat.yx == 0 && mat.yy == 1)
break;
gs_matrix_rotate(&mat, -90.0, &mat);
}
if (orient == 4) /* error */
orient = -1;
ppdi->viewing_orientation = orient;
} else {
gs_rect box;
if (pdf_key_eq(pkey, "EPSF")) {
pdev->is_EPS = (pvalue->size >= 1 && pvalue->data[0] != '0');
continue;
}
/*
* We only parse the BoundingBox for the sake of
* AutoPositionEPSFiles.
*/
if (pdf_key_eq(pkey, "BoundingBox"))
ppdi = &pdev->doc_dsc_info;
else if (pdf_key_eq(pkey, "PageBoundingBox"))
ppdi = &pdev->page_dsc_info;
else
continue;
if(pvalue->size >= sizeof(scan_buf) - 1)
continue; /* error */
memcpy(scan_buf, pvalue->data, pvalue->size);
scan_buf[pvalue->size] = 0;
if (sscanf(scan_buf, "[%lg %lg %lg %lg]",
&box.p.x, &box.p.y, &box.q.x, &box.q.y) != 4
)
continue; /* error */
ppdi->bounding_box = box;
}
continue;
}
}
return code;
}
| C | ghostscript | 0 |
CVE-2018-6794 | https://www.cvedetails.com/cve/CVE-2018-6794/ | CWE-693 | https://github.com/OISF/suricata/pull/3202/commits/e1ef57c848bbe4e567d5d4b66d346a742e3f77a1 | e1ef57c848bbe4e567d5d4b66d346a742e3f77a1 | stream: still inspect packets dropped by stream
The detect engine would bypass packets that are set as dropped. This
seems sane, as these packets are going to be dropped anyway.
However, it lead to the following corner case: stream events that
triggered the drop could not be matched on the rules. The packet
with the event wouldn't make it to the detect engine due to the bypass.
This patch changes the logic to not bypass DROP packets anymore.
Packets that are dropped by the stream engine will set the no payload
inspection flag, so avoid needless cost. | static inline void DetectRunGetRuleGroup(
const DetectEngineCtx *de_ctx,
Packet * const p, Flow * const pflow,
DetectRunScratchpad *scratch)
{
const SigGroupHead *sgh = NULL;
if (pflow) {
bool use_flow_sgh = false;
/* Get the stored sgh from the flow (if any). Make sure we're not using
* the sgh for icmp error packets part of the same stream. */
if (IP_GET_IPPROTO(p) == pflow->proto) { /* filter out icmp */
PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH);
if ((p->flowflags & FLOW_PKT_TOSERVER) && (pflow->flags & FLOW_SGH_TOSERVER)) {
sgh = pflow->sgh_toserver;
SCLogDebug("sgh = pflow->sgh_toserver; => %p", sgh);
use_flow_sgh = true;
} else if ((p->flowflags & FLOW_PKT_TOCLIENT) && (pflow->flags & FLOW_SGH_TOCLIENT)) {
sgh = pflow->sgh_toclient;
SCLogDebug("sgh = pflow->sgh_toclient; => %p", sgh);
use_flow_sgh = true;
}
PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH);
}
if (!(use_flow_sgh)) {
PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH);
sgh = SigMatchSignaturesGetSgh(de_ctx, p);
PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH);
/* HACK: prevent the wrong sgh (or NULL) from being stored in the
* flow's sgh pointers */
if (PKT_IS_ICMPV4(p) && ICMPV4_DEST_UNREACH_IS_VALID(p)) {
; /* no-op */
} else {
/* store the found sgh (or NULL) in the flow to save us
* from looking it up again for the next packet.
* Also run other tasks */
DetectRunPostGetFirstRuleGroup(p, pflow, sgh);
}
}
} else { /* p->flags & PKT_HAS_FLOW */
/* no flow */
PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH);
sgh = SigMatchSignaturesGetSgh(de_ctx, p);
PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH);
}
scratch->sgh = sgh;
}
| static inline void DetectRunGetRuleGroup(
const DetectEngineCtx *de_ctx,
Packet * const p, Flow * const pflow,
DetectRunScratchpad *scratch)
{
const SigGroupHead *sgh = NULL;
if (pflow) {
bool use_flow_sgh = false;
/* Get the stored sgh from the flow (if any). Make sure we're not using
* the sgh for icmp error packets part of the same stream. */
if (IP_GET_IPPROTO(p) == pflow->proto) { /* filter out icmp */
PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH);
if ((p->flowflags & FLOW_PKT_TOSERVER) && (pflow->flags & FLOW_SGH_TOSERVER)) {
sgh = pflow->sgh_toserver;
SCLogDebug("sgh = pflow->sgh_toserver; => %p", sgh);
use_flow_sgh = true;
} else if ((p->flowflags & FLOW_PKT_TOCLIENT) && (pflow->flags & FLOW_SGH_TOCLIENT)) {
sgh = pflow->sgh_toclient;
SCLogDebug("sgh = pflow->sgh_toclient; => %p", sgh);
use_flow_sgh = true;
}
PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH);
}
if (!(use_flow_sgh)) {
PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH);
sgh = SigMatchSignaturesGetSgh(de_ctx, p);
PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH);
/* HACK: prevent the wrong sgh (or NULL) from being stored in the
* flow's sgh pointers */
if (PKT_IS_ICMPV4(p) && ICMPV4_DEST_UNREACH_IS_VALID(p)) {
; /* no-op */
} else {
/* store the found sgh (or NULL) in the flow to save us
* from looking it up again for the next packet.
* Also run other tasks */
DetectRunPostGetFirstRuleGroup(p, pflow, sgh);
}
}
} else { /* p->flags & PKT_HAS_FLOW */
/* no flow */
PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH);
sgh = SigMatchSignaturesGetSgh(de_ctx, p);
PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH);
}
scratch->sgh = sgh;
}
| C | suricata | 0 |
CVE-2014-1713 | https://www.cvedetails.com/cve/CVE-2014-1713/ | CWE-399 | https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154 | f85a87ec670ad0fce9d98d90c9a705b72a288154 | document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | static void conditionalConditionCustomVoidMethodMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
V8TestObjectPython::conditionalConditionCustomVoidMethodMethodCustom(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| static void conditionalConditionCustomVoidMethodMethodCallback(const v8::FunctionCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMMethod");
V8TestObjectPython::conditionalConditionCustomVoidMethodMethodCustom(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| C | Chrome | 0 |
CVE-2016-1670 | https://www.cvedetails.com/cve/CVE-2016-1670/ | CWE-362 | https://github.com/chromium/chromium/commit/1af4fada49c4f3890f16daac31d38379a9d782b2 | 1af4fada49c4f3890f16daac31d38379a9d782b2 | Block a compromised renderer from reusing request ids.
BUG=578882
Review URL: https://codereview.chromium.org/1608573002
Cr-Commit-Position: refs/heads/master@{#372547} | void ResourceDispatcherHostImpl::OnRenderViewHostCreated(int child_id,
int route_id) {
scheduler_->OnClientCreated(child_id, route_id);
}
| void ResourceDispatcherHostImpl::OnRenderViewHostCreated(int child_id,
int route_id) {
scheduler_->OnClientCreated(child_id, route_id);
}
| C | Chrome | 0 |
CVE-2012-5112 | https://www.cvedetails.com/cve/CVE-2012-5112/ | CWE-399 | https://github.com/chromium/chromium/commit/d65b01ca819881a507b5e60c25a2f9caff58cd57 | d65b01ca819881a507b5e60c25a2f9caff58cd57 | Wipe out QuotaThreadTask.
This is a one of a series of refactoring patches for QuotaManager.
http://codereview.chromium.org/10872054/
http://codereview.chromium.org/10917060/
BUG=139270
Review URL: https://chromiumcodereview.appspot.com/10919070
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@154987 0039d316-1c4b-4281-b951-d872f2087c98 | QuotaManagerProxy::QuotaManagerProxy(
QuotaManager* manager, base::SingleThreadTaskRunner* io_thread)
: manager_(manager), io_thread_(io_thread) {
}
| QuotaManagerProxy::QuotaManagerProxy(
QuotaManager* manager, base::SingleThreadTaskRunner* io_thread)
: manager_(manager), io_thread_(io_thread) {
}
| C | Chrome | 0 |
CVE-2015-8215 | https://www.cvedetails.com/cve/CVE-2015-8215/ | CWE-20 | https://github.com/torvalds/linux/commit/77751427a1ff25b27d47a4c36b12c3c8667855ac | 77751427a1ff25b27d47a4c36b12c3c8667855ac | ipv6: addrconf: validate new MTU before applying it
Currently we don't check if the new MTU is valid or not and this allows
one to configure a smaller than minimum allowed by RFCs or even bigger
than interface own MTU, which is a problem as it may lead to packet
drops.
If you have a daemon like NetworkManager running, this may be exploited
by remote attackers by forging RA packets with an invalid MTU, possibly
leading to a DoS. (NetworkManager currently only validates for values
too small, but not for too big ones.)
The fix is just to make sure the new value is valid. That is, between
IPV6_MIN_MTU and interface's MTU.
Note that similar check is already performed at
ndisc_router_discovery(), for when kernel itself parses the RA.
Signed-off-by: Marcelo Ricardo Leitner <mleitner@redhat.com>
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: David S. Miller <davem@davemloft.net> | static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
int plen,
const struct net_device *dev,
u32 flags, u32 noflags)
{
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
if (table == NULL)
return NULL;
read_lock_bh(&table->tb6_lock);
fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
if (!fn)
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt->dst.dev->ifindex != dev->ifindex)
continue;
if ((rt->rt6i_flags & flags) != flags)
continue;
if ((rt->rt6i_flags & noflags) != 0)
continue;
dst_hold(&rt->dst);
break;
}
out:
read_unlock_bh(&table->tb6_lock);
return rt;
}
| static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
int plen,
const struct net_device *dev,
u32 flags, u32 noflags)
{
struct fib6_node *fn;
struct rt6_info *rt = NULL;
struct fib6_table *table;
table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
if (table == NULL)
return NULL;
read_lock_bh(&table->tb6_lock);
fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
if (!fn)
goto out;
for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
if (rt->dst.dev->ifindex != dev->ifindex)
continue;
if ((rt->rt6i_flags & flags) != flags)
continue;
if ((rt->rt6i_flags & noflags) != 0)
continue;
dst_hold(&rt->dst);
break;
}
out:
read_unlock_bh(&table->tb6_lock);
return rt;
}
| C | linux | 0 |
CVE-2016-9294 | https://www.cvedetails.com/cve/CVE-2016-9294/ | CWE-476 | http://git.ghostscript.com/?p=mujs.git;a=commit;h=5008105780c0b0182ea6eda83ad5598f225be3ee | 5008105780c0b0182ea6eda83ad5598f225be3ee | null | static void addlocal(JF, js_Ast *ident, int reuse)
{
const char *name = ident->string;
if (J->strict) {
if (!strcmp(name, "arguments"))
jsC_error(J, ident, "redefining 'arguments' is not allowed in strict mode");
if (!strcmp(name, "eval"))
jsC_error(J, ident, "redefining 'eval' is not allowed in strict mode");
}
if (reuse || J->strict) {
int i;
for (i = 0; i < F->varlen; ++i) {
if (!strcmp(F->vartab[i], name)) {
if (reuse)
return;
if (J->strict)
jsC_error(J, ident, "duplicate formal parameter '%s'", name);
}
}
}
if (F->varlen >= F->varcap) {
F->varcap = F->varcap ? F->varcap * 2 : 16;
F->vartab = js_realloc(J, F->vartab, F->varcap * sizeof *F->vartab);
}
F->vartab[F->varlen++] = name;
}
| static void addlocal(JF, js_Ast *ident, int reuse)
{
const char *name = ident->string;
if (J->strict) {
if (!strcmp(name, "arguments"))
jsC_error(J, ident, "redefining 'arguments' is not allowed in strict mode");
if (!strcmp(name, "eval"))
jsC_error(J, ident, "redefining 'eval' is not allowed in strict mode");
}
if (reuse || J->strict) {
int i;
for (i = 0; i < F->varlen; ++i) {
if (!strcmp(F->vartab[i], name)) {
if (reuse)
return;
if (J->strict)
jsC_error(J, ident, "duplicate formal parameter '%s'", name);
}
}
}
if (F->varlen >= F->varcap) {
F->varcap = F->varcap ? F->varcap * 2 : 16;
F->vartab = js_realloc(J, F->vartab, F->varcap * sizeof *F->vartab);
}
F->vartab[F->varlen++] = name;
}
| C | ghostscript | 0 |
CVE-2017-5009 | https://www.cvedetails.com/cve/CVE-2017-5009/ | CWE-119 | https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60 | 1c40f9042ae2d6ee7483d72998aabb5e73b2ff60 | DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <pfeldman@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Commit-Queue: Andrey Lushnikov <lushnikov@chromium.org>
Cr-Commit-Position: refs/heads/master@{#507936} | void RecordTiming(const ResourceLoadTiming& timing, TracedValue* value) {
value->SetDouble("requestTime", timing.RequestTime());
value->SetDouble("proxyStart",
timing.CalculateMillisecondDelta(timing.ProxyStart()));
value->SetDouble("proxyEnd",
timing.CalculateMillisecondDelta(timing.ProxyEnd()));
value->SetDouble("dnsStart",
timing.CalculateMillisecondDelta(timing.DnsStart()));
value->SetDouble("dnsEnd", timing.CalculateMillisecondDelta(timing.DnsEnd()));
value->SetDouble("connectStart",
timing.CalculateMillisecondDelta(timing.ConnectStart()));
value->SetDouble("connectEnd",
timing.CalculateMillisecondDelta(timing.ConnectEnd()));
value->SetDouble("sslStart",
timing.CalculateMillisecondDelta(timing.SslStart()));
value->SetDouble("sslEnd", timing.CalculateMillisecondDelta(timing.SslEnd()));
value->SetDouble("workerStart",
timing.CalculateMillisecondDelta(timing.WorkerStart()));
value->SetDouble("workerReady",
timing.CalculateMillisecondDelta(timing.WorkerReady()));
value->SetDouble("sendStart",
timing.CalculateMillisecondDelta(timing.SendStart()));
value->SetDouble("sendEnd",
timing.CalculateMillisecondDelta(timing.SendEnd()));
value->SetDouble("receiveHeadersEnd", timing.CalculateMillisecondDelta(
timing.ReceiveHeadersEnd()));
value->SetDouble("pushStart", timing.PushStart());
value->SetDouble("pushEnd", timing.PushEnd());
}
| void RecordTiming(const ResourceLoadTiming& timing, TracedValue* value) {
value->SetDouble("requestTime", timing.RequestTime());
value->SetDouble("proxyStart",
timing.CalculateMillisecondDelta(timing.ProxyStart()));
value->SetDouble("proxyEnd",
timing.CalculateMillisecondDelta(timing.ProxyEnd()));
value->SetDouble("dnsStart",
timing.CalculateMillisecondDelta(timing.DnsStart()));
value->SetDouble("dnsEnd", timing.CalculateMillisecondDelta(timing.DnsEnd()));
value->SetDouble("connectStart",
timing.CalculateMillisecondDelta(timing.ConnectStart()));
value->SetDouble("connectEnd",
timing.CalculateMillisecondDelta(timing.ConnectEnd()));
value->SetDouble("sslStart",
timing.CalculateMillisecondDelta(timing.SslStart()));
value->SetDouble("sslEnd", timing.CalculateMillisecondDelta(timing.SslEnd()));
value->SetDouble("workerStart",
timing.CalculateMillisecondDelta(timing.WorkerStart()));
value->SetDouble("workerReady",
timing.CalculateMillisecondDelta(timing.WorkerReady()));
value->SetDouble("sendStart",
timing.CalculateMillisecondDelta(timing.SendStart()));
value->SetDouble("sendEnd",
timing.CalculateMillisecondDelta(timing.SendEnd()));
value->SetDouble("receiveHeadersEnd", timing.CalculateMillisecondDelta(
timing.ReceiveHeadersEnd()));
value->SetDouble("pushStart", timing.PushStart());
value->SetDouble("pushEnd", timing.PushEnd());
}
| C | Chrome | 0 |
CVE-2012-2875 | https://www.cvedetails.com/cve/CVE-2012-2875/ | null | https://github.com/chromium/chromium/commit/7d97e1d327f153cce6137ef8b533d45d083802d9 | 7d97e1d327f153cce6137ef8b533d45d083802d9 | Refresh promo notifications as they're fetched
The "guard" existed for notification scheduling was preventing
"turn-off a promo" and "update a promo" scenarios.
Yet I do not believe it was adding any actual safety: if things
on a server backend go wrong, the clients will be affected one
way or the other, and it is better to have an option to shut
the malformed promo down "as quickly as possible" (~in 12-24 hours).
BUG=
TEST=
Review URL: https://chromiumcodereview.appspot.com/10696204
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@146462 0039d316-1c4b-4281-b951-d872f2087c98 | GURL GetPromoResourceURL() {
const std::string promo_server_url = CommandLine::ForCurrentProcess()->
GetSwitchValueASCII(switches::kPromoServerURL);
return promo_server_url.empty() ?
NotificationPromo::PromoServerURL() : GURL(promo_server_url);
}
| GURL GetPromoResourceURL() {
const std::string promo_server_url = CommandLine::ForCurrentProcess()->
GetSwitchValueASCII(switches::kPromoServerURL);
return promo_server_url.empty() ?
NotificationPromo::PromoServerURL() : GURL(promo_server_url);
}
| C | Chrome | 0 |
CVE-2017-9520 | https://www.cvedetails.com/cve/CVE-2017-9520/ | CWE-416 | https://github.com/radare/radare2/commit/f85bc674b2a2256a364fe796351bc1971e106005 | f85bc674b2a2256a364fe796351bc1971e106005 | Fix #7698 - UAF in r_config_set when loading a dex | static bool is_bool(const char *s) {
return !strcasecmp ("true", s) || !strcasecmp ("false", s);
}
| static bool is_bool(const char *s) {
return !strcasecmp ("true", s) || !strcasecmp ("false", s);
}
| C | radare2 | 0 |
CVE-2014-1713 | https://www.cvedetails.com/cve/CVE-2014-1713/ | CWE-399 | https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154 | f85a87ec670ad0fce9d98d90c9a705b72a288154 | document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | static void dateAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_VOID(double, cppValue, toCoreDate(jsValue));
imp->setDateAttribute(cppValue);
}
| static void dateAttributeAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
TestObjectPython* imp = V8TestObjectPython::toNative(info.Holder());
V8TRYCATCH_VOID(double, cppValue, toCoreDate(jsValue));
imp->setDateAttribute(cppValue);
}
| C | Chrome | 0 |
CVE-2018-6034 | https://www.cvedetails.com/cve/CVE-2018-6034/ | CWE-125 | https://github.com/chromium/chromium/commit/3298d3abf47b3a7a10e44c07d821c68a5c8aa935 | 3298d3abf47b3a7a10e44c07d821c68a5c8aa935 | Tighten about IntRect use in WebGL with overflow detection
BUG=784183
TEST=test case in the bug in ASAN build
R=kbr@chromium.org
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: Ie25ca328af99de7828e28e6a6e3d775f1bebc43f
Reviewed-on: https://chromium-review.googlesource.com/811826
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Commit-Queue: Zhenyao Mo <zmo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#522213} | void WebGLRenderingContextBase::TexImageHelperImageBitmap(
TexImageFunctionID function_id,
GLenum target,
GLint level,
GLint internalformat,
GLenum format,
GLenum type,
GLint xoffset,
GLint yoffset,
GLint zoffset,
ImageBitmap* bitmap,
const IntRect& source_sub_rect,
GLsizei depth,
GLint unpack_image_height,
ExceptionState& exception_state) {
const char* func_name = GetTexImageFunctionName(function_id);
if (isContextLost())
return;
if (!ValidateImageBitmap(func_name, bitmap, exception_state))
return;
WebGLTexture* texture =
ValidateTexImageBinding(func_name, function_id, target);
if (!texture)
return;
bool selecting_sub_rectangle = false;
if (!ValidateTexImageSubRectangle(func_name, function_id, bitmap,
source_sub_rect, depth, unpack_image_height,
&selecting_sub_rectangle)) {
return;
}
TexImageFunctionType function_type;
if (function_id == kTexImage2D)
function_type = kTexImage;
else
function_type = kTexSubImage;
GLsizei width = source_sub_rect.Width();
GLsizei height = source_sub_rect.Height();
if (!ValidateTexFunc(func_name, function_type, kSourceImageBitmap, target,
level, internalformat, width, height, depth, 0, format,
type, xoffset, yoffset, zoffset))
return;
DCHECK(bitmap->BitmapImage());
if (function_id != kTexSubImage3D && function_id != kTexImage3D &&
bitmap->IsAccelerated() && CanUseTexImageByGPU(format, type) &&
!selecting_sub_rectangle) {
if (function_id == kTexImage2D) {
TexImage2DBase(target, level, internalformat, width, height, 0, format,
type, nullptr);
TexImageByGPU(function_id, texture, target, level, 0, 0, 0, bitmap,
source_sub_rect);
} else if (function_id == kTexSubImage2D) {
TexImageByGPU(function_id, texture, target, level, xoffset, yoffset, 0,
bitmap, source_sub_rect);
}
return;
}
sk_sp<SkImage> sk_image =
bitmap->BitmapImage()->PaintImageForCurrentFrame().GetSkImage();
SkPixmap pixmap;
uint8_t* pixel_data_ptr = nullptr;
scoped_refptr<Uint8Array> pixel_data;
bool peek_succeed = sk_image->peekPixels(&pixmap);
if (peek_succeed) {
pixel_data_ptr = static_cast<uint8_t*>(pixmap.writable_addr());
} else {
pixel_data = bitmap->CopyBitmapData(
bitmap->IsPremultiplied() ? kPremultiplyAlpha : kDontPremultiplyAlpha);
pixel_data_ptr = pixel_data->Data();
}
Vector<uint8_t> data;
bool need_conversion = true;
bool have_peekable_rgba =
(peek_succeed &&
pixmap.colorType() == SkColorType::kRGBA_8888_SkColorType);
bool is_pixel_data_rgba = (have_peekable_rgba || !peek_succeed);
if (is_pixel_data_rgba && format == GL_RGBA && type == GL_UNSIGNED_BYTE &&
!selecting_sub_rectangle && depth == 1) {
need_conversion = false;
} else {
if (type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
type = GL_FLOAT;
}
bool is_pixel_data_bgra =
pixmap.colorType() == SkColorType::kBGRA_8888_SkColorType;
if ((is_pixel_data_bgra &&
!WebGLImageConversion::ExtractImageData(
pixel_data_ptr, WebGLImageConversion::DataFormat::kDataFormatBGRA8,
bitmap->Size(), source_sub_rect, depth, unpack_image_height,
format, type, false, false, data)) ||
(is_pixel_data_rgba &&
!WebGLImageConversion::ExtractImageData(
pixel_data_ptr, WebGLImageConversion::DataFormat::kDataFormatRGBA8,
bitmap->Size(), source_sub_rect, depth, unpack_image_height,
format, type, false, false, data))) {
SynthesizeGLError(GL_INVALID_VALUE, func_name, "bad image data");
return;
}
}
ScopedUnpackParametersResetRestore temporary_reset_unpack(this);
if (function_id == kTexImage2D) {
TexImage2DBase(target, level, internalformat, width, height, 0, format,
type, need_conversion ? data.data() : pixel_data_ptr);
} else if (function_id == kTexSubImage2D) {
ContextGL()->TexSubImage2D(target, level, xoffset, yoffset, width, height,
format, type,
need_conversion ? data.data() : pixel_data_ptr);
} else if (function_id == kTexImage3D) {
ContextGL()->TexImage3D(target, level, internalformat, width, height, depth,
0, format, type,
need_conversion ? data.data() : pixel_data_ptr);
} else {
DCHECK_EQ(function_id, kTexSubImage3D);
ContextGL()->TexSubImage3D(target, level, xoffset, yoffset, zoffset, width,
height, depth, format, type,
need_conversion ? data.data() : pixel_data_ptr);
}
}
| void WebGLRenderingContextBase::TexImageHelperImageBitmap(
TexImageFunctionID function_id,
GLenum target,
GLint level,
GLint internalformat,
GLenum format,
GLenum type,
GLint xoffset,
GLint yoffset,
GLint zoffset,
ImageBitmap* bitmap,
const IntRect& source_sub_rect,
GLsizei depth,
GLint unpack_image_height,
ExceptionState& exception_state) {
const char* func_name = GetTexImageFunctionName(function_id);
if (isContextLost())
return;
if (!ValidateImageBitmap(func_name, bitmap, exception_state))
return;
WebGLTexture* texture =
ValidateTexImageBinding(func_name, function_id, target);
if (!texture)
return;
bool selecting_sub_rectangle = false;
if (!ValidateTexImageSubRectangle(func_name, function_id, bitmap,
source_sub_rect, depth, unpack_image_height,
&selecting_sub_rectangle)) {
return;
}
TexImageFunctionType function_type;
if (function_id == kTexImage2D)
function_type = kTexImage;
else
function_type = kTexSubImage;
GLsizei width = source_sub_rect.Width();
GLsizei height = source_sub_rect.Height();
if (!ValidateTexFunc(func_name, function_type, kSourceImageBitmap, target,
level, internalformat, width, height, depth, 0, format,
type, xoffset, yoffset, zoffset))
return;
DCHECK(bitmap->BitmapImage());
if (function_id != kTexSubImage3D && function_id != kTexImage3D &&
bitmap->IsAccelerated() && CanUseTexImageByGPU(format, type) &&
!selecting_sub_rectangle) {
if (function_id == kTexImage2D) {
TexImage2DBase(target, level, internalformat, width, height, 0, format,
type, nullptr);
TexImageByGPU(function_id, texture, target, level, 0, 0, 0, bitmap,
source_sub_rect);
} else if (function_id == kTexSubImage2D) {
TexImageByGPU(function_id, texture, target, level, xoffset, yoffset, 0,
bitmap, source_sub_rect);
}
return;
}
sk_sp<SkImage> sk_image =
bitmap->BitmapImage()->PaintImageForCurrentFrame().GetSkImage();
SkPixmap pixmap;
uint8_t* pixel_data_ptr = nullptr;
scoped_refptr<Uint8Array> pixel_data;
bool peek_succeed = sk_image->peekPixels(&pixmap);
if (peek_succeed) {
pixel_data_ptr = static_cast<uint8_t*>(pixmap.writable_addr());
} else {
pixel_data = bitmap->CopyBitmapData(
bitmap->IsPremultiplied() ? kPremultiplyAlpha : kDontPremultiplyAlpha);
pixel_data_ptr = pixel_data->Data();
}
Vector<uint8_t> data;
bool need_conversion = true;
bool have_peekable_rgba =
(peek_succeed &&
pixmap.colorType() == SkColorType::kRGBA_8888_SkColorType);
bool is_pixel_data_rgba = (have_peekable_rgba || !peek_succeed);
if (is_pixel_data_rgba && format == GL_RGBA && type == GL_UNSIGNED_BYTE &&
!selecting_sub_rectangle && depth == 1) {
need_conversion = false;
} else {
if (type == GL_UNSIGNED_INT_10F_11F_11F_REV) {
type = GL_FLOAT;
}
bool is_pixel_data_bgra =
pixmap.colorType() == SkColorType::kBGRA_8888_SkColorType;
if ((is_pixel_data_bgra &&
!WebGLImageConversion::ExtractImageData(
pixel_data_ptr, WebGLImageConversion::DataFormat::kDataFormatBGRA8,
bitmap->Size(), source_sub_rect, depth, unpack_image_height,
format, type, false, false, data)) ||
(is_pixel_data_rgba &&
!WebGLImageConversion::ExtractImageData(
pixel_data_ptr, WebGLImageConversion::DataFormat::kDataFormatRGBA8,
bitmap->Size(), source_sub_rect, depth, unpack_image_height,
format, type, false, false, data))) {
SynthesizeGLError(GL_INVALID_VALUE, func_name, "bad image data");
return;
}
}
ScopedUnpackParametersResetRestore temporary_reset_unpack(this);
if (function_id == kTexImage2D) {
TexImage2DBase(target, level, internalformat, width, height, 0, format,
type, need_conversion ? data.data() : pixel_data_ptr);
} else if (function_id == kTexSubImage2D) {
ContextGL()->TexSubImage2D(target, level, xoffset, yoffset, width, height,
format, type,
need_conversion ? data.data() : pixel_data_ptr);
} else if (function_id == kTexImage3D) {
ContextGL()->TexImage3D(target, level, internalformat, width, height, depth,
0, format, type,
need_conversion ? data.data() : pixel_data_ptr);
} else {
DCHECK_EQ(function_id, kTexSubImage3D);
ContextGL()->TexSubImage3D(target, level, xoffset, yoffset, zoffset, width,
height, depth, format, type,
need_conversion ? data.data() : pixel_data_ptr);
}
}
| C | Chrome | 0 |
CVE-2015-8952 | https://www.cvedetails.com/cve/CVE-2015-8952/ | CWE-19 | https://github.com/torvalds/linux/commit/82939d7999dfc1f1998c4b1c12e2f19edbdff272 | 82939d7999dfc1f1998c4b1c12e2f19edbdff272 | ext4: convert to mbcache2
The conversion is generally straightforward. The only tricky part is
that xattr block corresponding to found mbcache entry can get freed
before we get buffer lock for that block. So we have to check whether
the entry is still valid after getting buffer lock.
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Theodore Ts'o <tytso@mit.edu> | static void ext4_xattr_rehash(struct ext4_xattr_header *header,
struct ext4_xattr_entry *entry)
{
struct ext4_xattr_entry *here;
__u32 hash = 0;
ext4_xattr_hash_entry(header, entry);
here = ENTRY(header+1);
while (!IS_LAST_ENTRY(here)) {
if (!here->e_hash) {
/* Block is not shared if an entry's hash value == 0 */
hash = 0;
break;
}
hash = (hash << BLOCK_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
le32_to_cpu(here->e_hash);
here = EXT4_XATTR_NEXT(here);
}
header->h_hash = cpu_to_le32(hash);
}
| static void ext4_xattr_rehash(struct ext4_xattr_header *header,
struct ext4_xattr_entry *entry)
{
struct ext4_xattr_entry *here;
__u32 hash = 0;
ext4_xattr_hash_entry(header, entry);
here = ENTRY(header+1);
while (!IS_LAST_ENTRY(here)) {
if (!here->e_hash) {
/* Block is not shared if an entry's hash value == 0 */
hash = 0;
break;
}
hash = (hash << BLOCK_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
le32_to_cpu(here->e_hash);
here = EXT4_XATTR_NEXT(here);
}
header->h_hash = cpu_to_le32(hash);
}
| C | linux | 0 |
CVE-2016-2464 | https://www.cvedetails.com/cve/CVE-2016-2464/ | CWE-20 | https://android.googlesource.com/platform/external/libvpx/+/65c49d5b382de4085ee5668732bcb0f6ecaf7148 | 65c49d5b382de4085ee5668732bcb0f6ecaf7148 | Fix ParseElementHeader to support 0 payload elements
Cherry-pick'ing Change 5c83bbec9a5f6f00a349674ddad85b753d2ea219
from upstream. This fixes regression in some edge cases for mkv
playback.
BUG=26499283
Change-Id: I88de03219a3d941b6b2f251d384e29c36bdd4d9b
| const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const {
if (idx < 0)
return 0;
if (idx >= m_void_element_count)
return 0;
return m_void_elements + idx;
}
| const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const {
if (idx < 0)
return 0;
if (idx >= m_void_element_count)
return 0;
return m_void_elements + idx;
}
| C | Android | 0 |
CVE-2013-0886 | https://www.cvedetails.com/cve/CVE-2013-0886/ | null | https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76 | 18d67244984a574ba2dd8779faabc0e3e34f4b76 | Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
TBR=sky@chromium.org
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98 | BOOL CALLBACK ShowWindowsCallback(HWND window, LPARAM param) {
RenderWidgetHostViewAura* widget =
reinterpret_cast<RenderWidgetHostViewAura*>(param);
HWND parent =
widget->GetNativeView()->GetRootWindow()->GetAcceleratedWidget();
if (GetProp(window, kWidgetOwnerProperty) == widget)
SetParent(window, parent);
return TRUE;
}
| BOOL CALLBACK ShowWindowsCallback(HWND window, LPARAM param) {
RenderWidgetHostViewAura* widget =
reinterpret_cast<RenderWidgetHostViewAura*>(param);
HWND parent =
widget->GetNativeView()->GetRootWindow()->GetAcceleratedWidget();
if (GetProp(window, kWidgetOwnerProperty) == widget)
SetParent(window, parent);
return TRUE;
}
| C | Chrome | 0 |
CVE-2017-11142 | https://www.cvedetails.com/cve/CVE-2017-11142/ | CWE-400 | https://github.com/php/php-src/commit/0f8cf3b8497dc45c010c44ed9e96518e11e19fc3 | 0f8cf3b8497dc45c010c44ed9e96518e11e19fc3 | Fix bug #73807 | PHPAPI void php_register_variable(char *var, char *strval, zval *track_vars_array TSRMLS_DC)
{
php_register_variable_safe(var, strval, strlen(strval), track_vars_array TSRMLS_CC);
}
| PHPAPI void php_register_variable(char *var, char *strval, zval *track_vars_array TSRMLS_DC)
{
php_register_variable_safe(var, strval, strlen(strval), track_vars_array TSRMLS_CC);
}
| C | php-src | 0 |
CVE-2013-4592 | https://www.cvedetails.com/cve/CVE-2013-4592/ | CWE-399 | https://github.com/torvalds/linux/commit/12d6e7538e2d418c08f082b1b44ffa5fb7270ed8 | 12d6e7538e2d418c08f082b1b44ffa5fb7270ed8 | KVM: perform an invalid memslot step for gpa base change
PPC must flush all translations before the new memory slot
is visible.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com> | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| C | linux | 0 |
CVE-2013-4483 | https://www.cvedetails.com/cve/CVE-2013-4483/ | CWE-189 | https://github.com/torvalds/linux/commit/6062a8dc0517bce23e3c2f7d2fea5e22411269a3 | 6062a8dc0517bce23e3c2f7d2fea5e22411269a3 | ipc,sem: fine grained locking for semtimedop
Introduce finer grained locking for semtimedop, to handle the common case
of a program wanting to manipulate one semaphore from an array with
multiple semaphores.
If the call is a semop manipulating just one semaphore in an array with
multiple semaphores, only take the lock for that semaphore itself.
If the call needs to manipulate multiple semaphores, or another caller is
in a transaction that manipulates multiple semaphores, the sem_array lock
is taken, as well as all the locks for the individual semaphores.
On a 24 CPU system, performance numbers with the semop-multi
test with N threads and N semaphores, look like this:
vanilla Davidlohr's Davidlohr's + Davidlohr's +
threads patches rwlock patches v3 patches
10 610652 726325 1783589 2142206
20 341570 365699 1520453 1977878
30 288102 307037 1498167 2037995
40 290714 305955 1612665 2256484
50 288620 312890 1733453 2650292
60 289987 306043 1649360 2388008
70 291298 306347 1723167 2717486
80 290948 305662 1729545 2763582
90 290996 306680 1736021 2757524
100 292243 306700 1773700 3059159
[davidlohr.bueso@hp.com: do not call sem_lock when bogus sma]
[davidlohr.bueso@hp.com: make refcounter atomic]
Signed-off-by: Rik van Riel <riel@redhat.com>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Cc: Chegu Vinod <chegu_vinod@hp.com>
Cc: Jason Low <jason.low2@hp.com>
Reviewed-by: Michel Lespinasse <walken@google.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Stanislav Kinsbursky <skinsbursky@parallels.com>
Tested-by: Emmanuel Benisty <benisty.e@gmail.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
| void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/511d0a0a31a54e0cc0f15cb1b977dc9f9b20f0d3 | 511d0a0a31a54e0cc0f15cb1b977dc9f9b20f0d3 | Implement new websocket handshake based on draft-hixie-thewebsocketprotocol-76
BUG=none
TEST=net_unittests passes
Review URL: http://codereview.chromium.org/1108002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@42736 0039d316-1c4b-4281-b951-d872f2087c98 | void WebSocket::SkipReadBuffer(int len) {
if (len == 0)
return;
DCHECK_GT(len, 0);
read_consumed_len_ += len;
int remaining = current_read_buf_->offset() - read_consumed_len_;
DCHECK_GE(remaining, 0);
if (remaining < read_consumed_len_ &&
current_read_buf_->RemainingCapacity() < read_consumed_len_) {
memmove(current_read_buf_->StartOfBuffer(),
current_read_buf_->StartOfBuffer() + read_consumed_len_,
remaining);
read_consumed_len_ = 0;
current_read_buf_->set_offset(remaining);
}
}
| void WebSocket::SkipReadBuffer(int len) {
if (len == 0)
return;
DCHECK_GT(len, 0);
read_consumed_len_ += len;
int remaining = current_read_buf_->offset() - read_consumed_len_;
DCHECK_GE(remaining, 0);
if (remaining < read_consumed_len_ &&
current_read_buf_->RemainingCapacity() < read_consumed_len_) {
memmove(current_read_buf_->StartOfBuffer(),
current_read_buf_->StartOfBuffer() + read_consumed_len_,
remaining);
read_consumed_len_ = 0;
current_read_buf_->set_offset(remaining);
}
}
| C | Chrome | 0 |
CVE-2016-5225 | https://www.cvedetails.com/cve/CVE-2016-5225/ | CWE-19 | https://github.com/chromium/chromium/commit/4ac4aff49c4c539bce6d8a0d8800c01324bb6bc0 | 4ac4aff49c4c539bce6d8a0d8800c01324bb6bc0 | Enforce form-action CSP even when form.target is present.
BUG=630332
Review-Url: https://codereview.chromium.org/2464123004
Cr-Commit-Position: refs/heads/master@{#429922} | bool HTMLFormElement::isURLAttribute(const Attribute& attribute) const {
return attribute.name() == actionAttr ||
HTMLElement::isURLAttribute(attribute);
}
| bool HTMLFormElement::isURLAttribute(const Attribute& attribute) const {
return attribute.name() == actionAttr ||
HTMLElement::isURLAttribute(attribute);
}
| C | Chrome | 0 |
CVE-2011-2918 | https://www.cvedetails.com/cve/CVE-2011-2918/ | CWE-399 | https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu> | void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec4;
unsigned long i;
int rollback;
check_wait();
rollback = (cpu_wait == r4k_wait);
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
return; /* Already done */
#endif
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
} else {
ebase = CKSEG0;
if (cpu_has_mips_r2)
ebase += (read_c0_ebase() & 0x3ffff000);
}
per_cpu_trap_init();
/*
* Copy the generic exception handlers to their final destination.
* This will be overriden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
*/
for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/*
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
*/
if (cpu_has_watch)
set_except_vector(23, handle_watch);
/*
* Initialise interrupt handlers
*/
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler(i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
* it different ways.
*/
parity_protection_init();
/*
* The Data Bus Errors / Instruction Bus Errors are signaled
* by external hardware. Therefore these two exceptions
* may have board specific handlers.
*/
if (board_be_init)
board_be_init();
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
set_except_vector(4, handle_adel);
set_except_vector(5, handle_ades);
set_except_vector(6, handle_ibe);
set_except_vector(7, handle_dbe);
set_except_vector(8, handle_sys);
set_except_vector(9, handle_bp);
set_except_vector(10, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
/*
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
* written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
}
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
set_except_vector(24, handle_mcheck);
if (cpu_has_mipsmt)
set_except_vector(25, handle_mt);
set_except_vector(26, handle_dsp);
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
else
memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
sort_extable(__start___dbe_table, __stop___dbe_table);
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
| void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec4;
unsigned long i;
int rollback;
check_wait();
rollback = (cpu_wait == r4k_wait);
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
return; /* Already done */
#endif
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
} else {
ebase = CKSEG0;
if (cpu_has_mips_r2)
ebase += (read_c0_ebase() & 0x3ffff000);
}
per_cpu_trap_init();
/*
* Copy the generic exception handlers to their final destination.
* This will be overriden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
*/
for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/*
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
*/
if (cpu_has_watch)
set_except_vector(23, handle_watch);
/*
* Initialise interrupt handlers
*/
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler(i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
* it different ways.
*/
parity_protection_init();
/*
* The Data Bus Errors / Instruction Bus Errors are signaled
* by external hardware. Therefore these two exceptions
* may have board specific handlers.
*/
if (board_be_init)
board_be_init();
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
set_except_vector(4, handle_adel);
set_except_vector(5, handle_ades);
set_except_vector(6, handle_ibe);
set_except_vector(7, handle_dbe);
set_except_vector(8, handle_sys);
set_except_vector(9, handle_bp);
set_except_vector(10, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
/*
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
* written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
}
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
set_except_vector(24, handle_mcheck);
if (cpu_has_mipsmt)
set_except_vector(25, handle_mt);
set_except_vector(26, handle_dsp);
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
else
memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
sort_extable(__start___dbe_table, __stop___dbe_table);
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/9d02cda7a634fbd6e53d98091f618057f0174387 | 9d02cda7a634fbd6e53d98091f618057f0174387 | Coverity: Fixing pass by value.
CID=101462, 101458, 101437, 101471, 101467
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/9006023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115257 0039d316-1c4b-4281-b951-d872f2087c98 | void FreeCopyOfAddrinfo(struct addrinfo* info) {
DCHECK(info);
if (info->ai_canonname)
free(info->ai_canonname); // Allocated by strdup.
if (info->ai_addr)
delete [] reinterpret_cast<char*>(info->ai_addr);
struct addrinfo* next = info->ai_next;
delete info;
if (next)
FreeCopyOfAddrinfo(next);
}
| void FreeCopyOfAddrinfo(struct addrinfo* info) {
DCHECK(info);
if (info->ai_canonname)
free(info->ai_canonname); // Allocated by strdup.
if (info->ai_addr)
delete [] reinterpret_cast<char*>(info->ai_addr);
struct addrinfo* next = info->ai_next;
delete info;
if (next)
FreeCopyOfAddrinfo(next);
}
| C | Chrome | 0 |
CVE-2017-15951 | https://www.cvedetails.com/cve/CVE-2017-15951/ | CWE-20 | https://github.com/torvalds/linux/commit/363b02dab09b3226f3bd1420dad9c72b79a42a76 | 363b02dab09b3226f3bd1420dad9c72b79a42a76 | KEYS: Fix race between updating and finding a negative key
Consolidate KEY_FLAG_INSTANTIATED, KEY_FLAG_NEGATIVE and the rejection
error into one field such that:
(1) The instantiation state can be modified/read atomically.
(2) The error can be accessed atomically with the state.
(3) The error isn't stored unioned with the payload pointers.
This deals with the problem that the state is spread over three different
objects (two bits and a separate variable) and reading or updating them
atomically isn't practical, given that not only can uninstantiated keys
change into instantiated or rejected keys, but rejected keys can also turn
into instantiated keys - and someone accessing the key might not be using
any locking.
The main side effect of this problem is that what was held in the payload
may change, depending on the state. For instance, you might observe the
key to be in the rejected state. You then read the cached error, but if
the key semaphore wasn't locked, the key might've become instantiated
between the two reads - and you might now have something in hand that isn't
actually an error code.
The state is now KEY_IS_UNINSTANTIATED, KEY_IS_POSITIVE or a negative error
code if the key is negatively instantiated. The key_is_instantiated()
function is replaced with key_is_positive() to avoid confusion as negative
keys are also 'instantiated'.
Additionally, barriering is included:
(1) Order payload-set before state-set during instantiation.
(2) Order state-read before payload-read when using the key.
Further separate barriering is necessary if RCU is being used to access the
payload content after reading the payload pointers.
Fixes: 146aa8b1453b ("KEYS: Merge the type-specific data with the payload data")
Cc: stable@vger.kernel.org # v4.4+
Reported-by: Eric Biggers <ebiggers@google.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Eric Biggers <ebiggers@google.com> | struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
char description[16];
struct keyring_search_context ctx = {
.index_key.type = &key_type_request_key_auth,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
struct key *authkey;
key_ref_t authkey_ref;
sprintf(description, "%x", target_id);
authkey_ref = search_process_keyrings(&ctx);
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
if (authkey == ERR_PTR(-EAGAIN))
authkey = ERR_PTR(-ENOKEY);
goto error;
}
authkey = key_ref_to_ptr(authkey_ref);
if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) {
key_put(authkey);
authkey = ERR_PTR(-EKEYREVOKED);
}
error:
return authkey;
}
| struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
char description[16];
struct keyring_search_context ctx = {
.index_key.type = &key_type_request_key_auth,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
struct key *authkey;
key_ref_t authkey_ref;
sprintf(description, "%x", target_id);
authkey_ref = search_process_keyrings(&ctx);
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
if (authkey == ERR_PTR(-EAGAIN))
authkey = ERR_PTR(-ENOKEY);
goto error;
}
authkey = key_ref_to_ptr(authkey_ref);
if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) {
key_put(authkey);
authkey = ERR_PTR(-EKEYREVOKED);
}
error:
return authkey;
}
| C | linux | 0 |
CVE-2013-4244 | https://www.cvedetails.com/cve/CVE-2013-4244/ | CWE-119 | https://github.com/vadz/libtiff/commit/ce6841d9e41d621ba23cf18b190ee6a23b2cc833 | ce6841d9e41d621ba23cf18b190ee6a23b2cc833 | fix possible OOB write in gif2tiff.c | makegamtab(float gam)
{
int i;
for(i=0; i<256; i++)
gamtab[i] = (unsigned short) (IMAX*pow(i/255.0,gam)+0.5);
}
| makegamtab(float gam)
{
int i;
for(i=0; i<256; i++)
gamtab[i] = (unsigned short) (IMAX*pow(i/255.0,gam)+0.5);
}
| C | libtiff | 0 |
CVE-2013-2858 | https://www.cvedetails.com/cve/CVE-2013-2858/ | CWE-416 | https://github.com/chromium/chromium/commit/828eab2216a765dea92575c290421c115b8ad028 | 828eab2216a765dea92575c290421c115b8ad028 | Added daily UMA for non-data-reduction-proxy data usage when the proxy is enabled.
BUG=325325
Review URL: https://codereview.chromium.org/106113002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@239897 0039d316-1c4b-4281-b951-d872f2087c98 | void ObserveKeychainEvents() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
net::CertDatabase::GetInstance()->SetMessageLoopForKeychainEvents();
}
| void ObserveKeychainEvents() {
DCHECK(BrowserThread::CurrentlyOn(BrowserThread::UI));
net::CertDatabase::GetInstance()->SetMessageLoopForKeychainEvents();
}
| C | Chrome | 0 |
CVE-2016-5221 | https://www.cvedetails.com/cve/CVE-2016-5221/ | CWE-190 | https://github.com/chromium/chromium/commit/2a1d9fff62718d7175bf47c7903dda127ee0228c | 2a1d9fff62718d7175bf47c7903dda127ee0228c | [SendTabToSelf] Added logic to display an infobar for the feature.
This CL is one of many to come. It covers:
* Creation of the infobar from the SendTabToSelfInfoBarController
* Plumbed the call to create the infobar to the native code.
* Open the link when user taps on the link
In follow-up CLs, the following will be done:
* Instantiate the InfobarController in the ChromeActivity
* Listen for Model changes in the Controller
Bug: 949233,963193
Change-Id: I5df1359debb5f0f35c32c2df3b691bf9129cdeb8
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1604406
Reviewed-by: Tommy Nyquist <nyquist@chromium.org>
Reviewed-by: Avi Drissman <avi@chromium.org>
Reviewed-by: Mikel Astiz <mastiz@chromium.org>
Reviewed-by: sebsg <sebsg@chromium.org>
Reviewed-by: Jeffrey Cohen <jeffreycohen@chromium.org>
Reviewed-by: Matthew Jones <mdjones@chromium.org>
Commit-Queue: Tanya Gupta <tgupta@chromium.org>
Cr-Commit-Position: refs/heads/master@{#660854} | static void JNI_SendTabToSelfAndroidBridge_DismissEntry(
JNIEnv* env,
const JavaParamRef<jobject>& j_profile,
const JavaParamRef<jstring>& j_guid) {
SendTabToSelfModel* model = GetModel(j_profile);
if (model->IsReady()) {
const std::string guid = ConvertJavaStringToUTF8(env, j_guid);
model->DismissEntry(guid);
}
}
| static void JNI_SendTabToSelfAndroidBridge_DismissEntry(
JNIEnv* env,
const JavaParamRef<jobject>& j_profile,
const JavaParamRef<jstring>& j_guid) {
SendTabToSelfModel* model = GetModel(j_profile);
if (model->IsReady()) {
const std::string guid = ConvertJavaStringToUTF8(env, j_guid);
model->DismissEntry(guid);
}
}
| C | Chrome | 0 |
CVE-2018-1000039 | https://www.cvedetails.com/cve/CVE-2018-1000039/ | CWE-416 | http://git.ghostscript.com/?p=mupdf.git;a=commitdiff;h=4dcc6affe04368461310a21238f7e1871a752a05;hp=8ec561d1bccc46e9db40a9f61310cd8b3763914e | 4dcc6affe04368461310a21238f7e1871a752a05 | null | static void pdf_run_Ts(fz_context *ctx, pdf_processor *proc, float rise)
{
pdf_run_processor *pr = (pdf_run_processor *)proc;
pdf_gstate *gstate = pr->gstate + pr->gtop;
gstate->text.rise = rise;
}
| static void pdf_run_Ts(fz_context *ctx, pdf_processor *proc, float rise)
{
pdf_run_processor *pr = (pdf_run_processor *)proc;
pdf_gstate *gstate = pr->gstate + pr->gtop;
gstate->text.rise = rise;
}
| C | ghostscript | 0 |
CVE-2015-9016 | https://www.cvedetails.com/cve/CVE-2015-9016/ | CWE-362 | https://github.com/torvalds/linux/commit/0048b4837affd153897ed1222283492070027aa9 | 0048b4837affd153897ed1222283492070027aa9 | blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <stable@vger.kernel.org>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com> | static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
return !(rqa->mq_ctx < rqb->mq_ctx ||
(rqa->mq_ctx == rqb->mq_ctx &&
blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}
| static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
return !(rqa->mq_ctx < rqb->mq_ctx ||
(rqa->mq_ctx == rqb->mq_ctx &&
blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}
| C | linux | 0 |
CVE-2016-8669 | https://www.cvedetails.com/cve/CVE-2016-8669/ | CWE-369 | https://git.qemu.org/?p=qemu.git;a=commit;h=3592fe0c919cf27a81d8e9f9b4f269553418bb01 | 3592fe0c919cf27a81d8e9f9b4f269553418bb01 | null | static void serial_update_irq(SerialState *s)
{
uint8_t tmp_iir = UART_IIR_NO_INT;
if ((s->ier & UART_IER_RLSI) && (s->lsr & UART_LSR_INT_ANY)) {
tmp_iir = UART_IIR_RLSI;
} else if ((s->ier & UART_IER_RDI) && s->timeout_ipending) {
/* Note that(s->ier & UART_IER_RDI) can mask this interrupt,
* this is not in the specification but is observed on existing
* hardware. */
tmp_iir = UART_IIR_CTI;
} else if ((s->ier & UART_IER_RDI) && (s->lsr & UART_LSR_DR) &&
(!(s->fcr & UART_FCR_FE) ||
s->recv_fifo.num >= s->recv_fifo_itl)) {
tmp_iir = UART_IIR_RDI;
} else if ((s->ier & UART_IER_THRI) && s->thr_ipending) {
tmp_iir = UART_IIR_THRI;
} else if ((s->ier & UART_IER_MSI) && (s->msr & UART_MSR_ANY_DELTA)) {
tmp_iir = UART_IIR_MSI;
}
s->iir = tmp_iir | (s->iir & 0xF0);
if (tmp_iir != UART_IIR_NO_INT) {
qemu_irq_raise(s->irq);
} else {
qemu_irq_lower(s->irq);
}
}
| static void serial_update_irq(SerialState *s)
{
uint8_t tmp_iir = UART_IIR_NO_INT;
if ((s->ier & UART_IER_RLSI) && (s->lsr & UART_LSR_INT_ANY)) {
tmp_iir = UART_IIR_RLSI;
} else if ((s->ier & UART_IER_RDI) && s->timeout_ipending) {
/* Note that(s->ier & UART_IER_RDI) can mask this interrupt,
* this is not in the specification but is observed on existing
* hardware. */
tmp_iir = UART_IIR_CTI;
} else if ((s->ier & UART_IER_RDI) && (s->lsr & UART_LSR_DR) &&
(!(s->fcr & UART_FCR_FE) ||
s->recv_fifo.num >= s->recv_fifo_itl)) {
tmp_iir = UART_IIR_RDI;
} else if ((s->ier & UART_IER_THRI) && s->thr_ipending) {
tmp_iir = UART_IIR_THRI;
} else if ((s->ier & UART_IER_MSI) && (s->msr & UART_MSR_ANY_DELTA)) {
tmp_iir = UART_IIR_MSI;
}
s->iir = tmp_iir | (s->iir & 0xF0);
if (tmp_iir != UART_IIR_NO_INT) {
qemu_irq_raise(s->irq);
} else {
qemu_irq_lower(s->irq);
}
}
| C | qemu | 0 |
CVE-2011-3087 | https://www.cvedetails.com/cve/CVE-2011-3087/ | null | https://github.com/chromium/chromium/commit/58436a1770176ece2c02b28a57bba2a89db5d58b | 58436a1770176ece2c02b28a57bba2a89db5d58b | Use a new scheme for swapping out RenderViews.
BUG=118664
TEST=none
Review URL: http://codereview.chromium.org/9720004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@127986 0039d316-1c4b-4281-b951-d872f2087c98 | TestWebKitPlatformSupport::~TestWebKitPlatformSupport() {
}
| TestWebKitPlatformSupport::~TestWebKitPlatformSupport() {
}
| C | Chrome | 0 |
CVE-2017-18218 | https://www.cvedetails.com/cve/CVE-2017-18218/ | CWE-416 | https://github.com/torvalds/linux/commit/27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2 | 27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2 | net: hns: Fix a skb used after free bug
skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK,
which cause hns_nic_net_xmit to use a freed skb.
BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940...
[17659.112635] alloc_debug_processing+0x18c/0x1a0
[17659.117208] __slab_alloc+0x52c/0x560
[17659.120909] kmem_cache_alloc_node+0xac/0x2c0
[17659.125309] __alloc_skb+0x6c/0x260
[17659.128837] tcp_send_ack+0x8c/0x280
[17659.132449] __tcp_ack_snd_check+0x9c/0xf0
[17659.136587] tcp_rcv_established+0x5a4/0xa70
[17659.140899] tcp_v4_do_rcv+0x27c/0x620
[17659.144687] tcp_prequeue_process+0x108/0x170
[17659.149085] tcp_recvmsg+0x940/0x1020
[17659.152787] inet_recvmsg+0x124/0x180
[17659.156488] sock_recvmsg+0x64/0x80
[17659.160012] SyS_recvfrom+0xd8/0x180
[17659.163626] __sys_trace_return+0x0/0x4
[17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13
[17659.174000] free_debug_processing+0x1d4/0x2c0
[17659.178486] __slab_free+0x240/0x390
[17659.182100] kmem_cache_free+0x24c/0x270
[17659.186062] kfree_skbmem+0xa0/0xb0
[17659.189587] __kfree_skb+0x28/0x40
[17659.193025] napi_gro_receive+0x168/0x1c0
[17659.197074] hns_nic_rx_up_pro+0x58/0x90
[17659.201038] hns_nic_rx_poll_one+0x518/0xbc0
[17659.205352] hns_nic_common_poll+0x94/0x140
[17659.209576] net_rx_action+0x458/0x5e0
[17659.213363] __do_softirq+0x1b8/0x480
[17659.217062] run_ksoftirqd+0x64/0x80
[17659.220679] smpboot_thread_fn+0x224/0x310
[17659.224821] kthread+0x150/0x170
[17659.228084] ret_from_fork+0x10/0x40
BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0...
[17751.080490] __slab_alloc+0x52c/0x560
[17751.084188] kmem_cache_alloc+0x244/0x280
[17751.088238] __build_skb+0x40/0x150
[17751.091764] build_skb+0x28/0x100
[17751.095115] __alloc_rx_skb+0x94/0x150
[17751.098900] __napi_alloc_skb+0x34/0x90
[17751.102776] hns_nic_rx_poll_one+0x180/0xbc0
[17751.107097] hns_nic_common_poll+0x94/0x140
[17751.111333] net_rx_action+0x458/0x5e0
[17751.115123] __do_softirq+0x1b8/0x480
[17751.118823] run_ksoftirqd+0x64/0x80
[17751.122437] smpboot_thread_fn+0x224/0x310
[17751.126575] kthread+0x150/0x170
[17751.129838] ret_from_fork+0x10/0x40
[17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43
[17751.139951] free_debug_processing+0x1d4/0x2c0
[17751.144436] __slab_free+0x240/0x390
[17751.148051] kmem_cache_free+0x24c/0x270
[17751.152014] kfree_skbmem+0xa0/0xb0
[17751.155543] __kfree_skb+0x28/0x40
[17751.159022] napi_gro_receive+0x168/0x1c0
[17751.163074] hns_nic_rx_up_pro+0x58/0x90
[17751.167041] hns_nic_rx_poll_one+0x518/0xbc0
[17751.171358] hns_nic_common_poll+0x94/0x140
[17751.175585] net_rx_action+0x458/0x5e0
[17751.179373] __do_softirq+0x1b8/0x480
[17751.183076] run_ksoftirqd+0x64/0x80
[17751.186691] smpboot_thread_fn+0x224/0x310
[17751.190826] kthread+0x150/0x170
[17751.194093] ret_from_fork+0x10/0x40
Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: lipeng <lipeng321@huawei.com>
Reported-by: Jun He <hjat2005@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void fill_tso_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
int frag_buf_num;
int sizeoflast;
int k;
frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
sizeoflast = size % BD_MAX_SEND_SIZE;
sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
fill_v2_desc(ring, priv,
(k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
dma + BD_MAX_SEND_SIZE * k,
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
buf_num,
(type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
mtu);
}
| static void fill_tso_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
int frag_buf_num;
int sizeoflast;
int k;
frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
sizeoflast = size % BD_MAX_SEND_SIZE;
sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
fill_v2_desc(ring, priv,
(k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
dma + BD_MAX_SEND_SIZE * k,
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
buf_num,
(type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
mtu);
}
| C | linux | 0 |
CVE-2012-3552 | https://www.cvedetails.com/cve/CVE-2012-3552/ | CWE-362 | https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net> | int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
{
const struct ip_options *sopt;
unsigned char *sptr, *dptr;
int soffset, doffset;
int optlen;
__be32 daddr;
memset(dopt, 0, sizeof(struct ip_options));
sopt = &(IPCB(skb)->opt);
if (sopt->optlen == 0)
return 0;
sptr = skb_network_header(skb);
dptr = dopt->__data;
daddr = skb_rtable(skb)->rt_spec_dst;
if (sopt->rr) {
optlen = sptr[sopt->rr+1];
soffset = sptr[sopt->rr+2];
dopt->rr = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->rr, optlen);
if (sopt->rr_needaddr && soffset <= optlen) {
if (soffset + 3 > optlen)
return -EINVAL;
dptr[2] = soffset + 4;
dopt->rr_needaddr = 1;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->ts) {
optlen = sptr[sopt->ts+1];
soffset = sptr[sopt->ts+2];
dopt->ts = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->ts, optlen);
if (soffset <= optlen) {
if (sopt->ts_needaddr) {
if (soffset + 3 > optlen)
return -EINVAL;
dopt->ts_needaddr = 1;
soffset += 4;
}
if (sopt->ts_needtime) {
if (soffset + 3 > optlen)
return -EINVAL;
if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) {
dopt->ts_needtime = 1;
soffset += 4;
} else {
dopt->ts_needtime = 0;
if (soffset + 7 <= optlen) {
__be32 addr;
memcpy(&addr, dptr+soffset-1, 4);
if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) {
dopt->ts_needtime = 1;
soffset += 8;
}
}
}
}
dptr[2] = soffset;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->srr) {
unsigned char *start = sptr+sopt->srr;
__be32 faddr;
optlen = start[1];
soffset = start[2];
doffset = 0;
if (soffset > optlen)
soffset = optlen + 1;
soffset -= 4;
if (soffset > 3) {
memcpy(&faddr, &start[soffset-1], 4);
for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4)
memcpy(&dptr[doffset-1], &start[soffset-1], 4);
/*
* RFC1812 requires to fix illegal source routes.
*/
if (memcmp(&ip_hdr(skb)->saddr,
&start[soffset + 3], 4) == 0)
doffset -= 4;
}
if (doffset > 3) {
memcpy(&start[doffset-1], &daddr, 4);
dopt->faddr = faddr;
dptr[0] = start[0];
dptr[1] = doffset+3;
dptr[2] = 4;
dptr += doffset+3;
dopt->srr = dopt->optlen + sizeof(struct iphdr);
dopt->optlen += doffset+3;
dopt->is_strictroute = sopt->is_strictroute;
}
}
if (sopt->cipso) {
optlen = sptr[sopt->cipso+1];
dopt->cipso = dopt->optlen+sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->cipso, optlen);
dptr += optlen;
dopt->optlen += optlen;
}
while (dopt->optlen & 3) {
*dptr++ = IPOPT_END;
dopt->optlen++;
}
return 0;
}
| int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
{
struct ip_options *sopt;
unsigned char *sptr, *dptr;
int soffset, doffset;
int optlen;
__be32 daddr;
memset(dopt, 0, sizeof(struct ip_options));
sopt = &(IPCB(skb)->opt);
if (sopt->optlen == 0) {
dopt->optlen = 0;
return 0;
}
sptr = skb_network_header(skb);
dptr = dopt->__data;
daddr = skb_rtable(skb)->rt_spec_dst;
if (sopt->rr) {
optlen = sptr[sopt->rr+1];
soffset = sptr[sopt->rr+2];
dopt->rr = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->rr, optlen);
if (sopt->rr_needaddr && soffset <= optlen) {
if (soffset + 3 > optlen)
return -EINVAL;
dptr[2] = soffset + 4;
dopt->rr_needaddr = 1;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->ts) {
optlen = sptr[sopt->ts+1];
soffset = sptr[sopt->ts+2];
dopt->ts = dopt->optlen + sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->ts, optlen);
if (soffset <= optlen) {
if (sopt->ts_needaddr) {
if (soffset + 3 > optlen)
return -EINVAL;
dopt->ts_needaddr = 1;
soffset += 4;
}
if (sopt->ts_needtime) {
if (soffset + 3 > optlen)
return -EINVAL;
if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) {
dopt->ts_needtime = 1;
soffset += 4;
} else {
dopt->ts_needtime = 0;
if (soffset + 7 <= optlen) {
__be32 addr;
memcpy(&addr, dptr+soffset-1, 4);
if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) {
dopt->ts_needtime = 1;
soffset += 8;
}
}
}
}
dptr[2] = soffset;
}
dptr += optlen;
dopt->optlen += optlen;
}
if (sopt->srr) {
unsigned char * start = sptr+sopt->srr;
__be32 faddr;
optlen = start[1];
soffset = start[2];
doffset = 0;
if (soffset > optlen)
soffset = optlen + 1;
soffset -= 4;
if (soffset > 3) {
memcpy(&faddr, &start[soffset-1], 4);
for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4)
memcpy(&dptr[doffset-1], &start[soffset-1], 4);
/*
* RFC1812 requires to fix illegal source routes.
*/
if (memcmp(&ip_hdr(skb)->saddr,
&start[soffset + 3], 4) == 0)
doffset -= 4;
}
if (doffset > 3) {
memcpy(&start[doffset-1], &daddr, 4);
dopt->faddr = faddr;
dptr[0] = start[0];
dptr[1] = doffset+3;
dptr[2] = 4;
dptr += doffset+3;
dopt->srr = dopt->optlen + sizeof(struct iphdr);
dopt->optlen += doffset+3;
dopt->is_strictroute = sopt->is_strictroute;
}
}
if (sopt->cipso) {
optlen = sptr[sopt->cipso+1];
dopt->cipso = dopt->optlen+sizeof(struct iphdr);
memcpy(dptr, sptr+sopt->cipso, optlen);
dptr += optlen;
dopt->optlen += optlen;
}
while (dopt->optlen & 3) {
*dptr++ = IPOPT_END;
dopt->optlen++;
}
return 0;
}
| C | linux | 1 |
CVE-2018-6066 | https://www.cvedetails.com/cve/CVE-2018-6066/ | CWE-200 | https://github.com/chromium/chromium/commit/fad67a5b73639d7211b24fd9bdb242e82039b765 | fad67a5b73639d7211b24fd9bdb242e82039b765 | Check CORS using PassesAccessControlCheck() with supplied SecurityOrigin
Partial revert of https://chromium-review.googlesource.com/535694.
Bug: 799477
Change-Id: I878bb9bcb83afaafe8601293db9aa644fc5929b3
Reviewed-on: https://chromium-review.googlesource.com/898427
Commit-Queue: Hiroshige Hayashizaki <hiroshige@chromium.org>
Reviewed-by: Kouhei Ueno <kouhei@chromium.org>
Reviewed-by: Yutaka Hirano <yhirano@chromium.org>
Reviewed-by: Takeshi Yoshino <tyoshino@chromium.org>
Cr-Commit-Position: refs/heads/master@{#535176} | bool ImageResource::ShouldShowPlaceholder() const {
if (RuntimeEnabledFeatures::ClientPlaceholdersForServerLoFiEnabled() &&
(GetResourceRequest().GetPreviewsState() &
WebURLRequest::kServerLoFiOn)) {
return true;
}
switch (placeholder_option_) {
case PlaceholderOption::kShowAndReloadPlaceholderAlways:
case PlaceholderOption::kShowAndDoNotReloadPlaceholder:
return true;
case PlaceholderOption::kReloadPlaceholderOnDecodeError:
case PlaceholderOption::kDoNotReloadPlaceholder:
return false;
}
NOTREACHED();
return false;
}
| bool ImageResource::ShouldShowPlaceholder() const {
if (RuntimeEnabledFeatures::ClientPlaceholdersForServerLoFiEnabled() &&
(GetResourceRequest().GetPreviewsState() &
WebURLRequest::kServerLoFiOn)) {
return true;
}
switch (placeholder_option_) {
case PlaceholderOption::kShowAndReloadPlaceholderAlways:
case PlaceholderOption::kShowAndDoNotReloadPlaceholder:
return true;
case PlaceholderOption::kReloadPlaceholderOnDecodeError:
case PlaceholderOption::kDoNotReloadPlaceholder:
return false;
}
NOTREACHED();
return false;
}
| C | Chrome | 0 |
CVE-2018-6096 | https://www.cvedetails.com/cve/CVE-2018-6096/ | null | https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51 | 36f801fdbec07d116a6f4f07bb363f10897d6a51 | If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <nasko@chromium.org>
Reviewed-by: Philip Jägenstedt <foolip@chromium.org>
Commit-Queue: Avi Drissman <avi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533790} | bool ChromeClientImpl::RequestPointerLock(LocalFrame* frame) {
LocalFrame& local_root = frame->LocalFrameRoot();
return WebLocalFrameImpl::FromFrame(&local_root)
->FrameWidget()
->Client()
->RequestPointerLock();
}
| bool ChromeClientImpl::RequestPointerLock(LocalFrame* frame) {
LocalFrame& local_root = frame->LocalFrameRoot();
return WebLocalFrameImpl::FromFrame(&local_root)
->FrameWidget()
->Client()
->RequestPointerLock();
}
| C | Chrome | 0 |
CVE-2013-1929 | https://www.cvedetails.com/cve/CVE-2013-1929/ | CWE-119 | https://github.com/torvalds/linux/commit/715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <keescook@chromium.org>
Reported-by: Oded Horovitz <oded@privatecore.com>
Reported-by: Brad Spengler <spender@grsecurity.net>
Cc: stable@vger.kernel.org
Cc: Matt Carlson <mcarlson@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
dma_addr_t mapping, u32 len, u32 flags,
u32 mss, u32 vlan)
{
txbd->addr_hi = ((u64) mapping >> 32);
txbd->addr_lo = ((u64) mapping & 0xffffffff);
txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
}
| static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
dma_addr_t mapping, u32 len, u32 flags,
u32 mss, u32 vlan)
{
txbd->addr_hi = ((u64) mapping >> 32);
txbd->addr_lo = ((u64) mapping & 0xffffffff);
txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
}
| C | linux | 0 |
CVE-2012-6638 | https://www.cvedetails.com/cve/CVE-2012-6638/ | CWE-399 | https://github.com/torvalds/linux/commit/fdf5af0daf8019cec2396cdef8fb042d80fe71fa | fdf5af0daf8019cec2396cdef8fb042d80fe71fa | tcp: drop SYN+FIN messages
Denys Fedoryshchenko reported that SYN+FIN attacks were bringing his
linux machines to their limits.
Dont call conn_request() if the TCP flags includes SYN flag
Reported-by: Denys Fedoryshchenko <denys@visp.net.lb>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void tcp_update_reordering(struct sock *sk, const int metric,
const int ts)
{
struct tcp_sock *tp = tcp_sk(sk);
if (metric > tp->reordering) {
int mib_idx;
tp->reordering = min(TCP_MAX_REORDERING, metric);
/* This exciting event is worth to be remembered. 8) */
if (ts)
mib_idx = LINUX_MIB_TCPTSREORDER;
else if (tcp_is_reno(tp))
mib_idx = LINUX_MIB_TCPRENOREORDER;
else if (tcp_is_fack(tp))
mib_idx = LINUX_MIB_TCPFACKREORDER;
else
mib_idx = LINUX_MIB_TCPSACKREORDER;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
tp->reordering,
tp->fackets_out,
tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0);
#endif
tcp_disable_fack(tp);
}
}
| static void tcp_update_reordering(struct sock *sk, const int metric,
const int ts)
{
struct tcp_sock *tp = tcp_sk(sk);
if (metric > tp->reordering) {
int mib_idx;
tp->reordering = min(TCP_MAX_REORDERING, metric);
/* This exciting event is worth to be remembered. 8) */
if (ts)
mib_idx = LINUX_MIB_TCPTSREORDER;
else if (tcp_is_reno(tp))
mib_idx = LINUX_MIB_TCPRENOREORDER;
else if (tcp_is_fack(tp))
mib_idx = LINUX_MIB_TCPFACKREORDER;
else
mib_idx = LINUX_MIB_TCPSACKREORDER;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
tp->reordering,
tp->fackets_out,
tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0);
#endif
tcp_disable_fack(tp);
}
}
| C | linux | 0 |
CVE-2017-17862 | https://www.cvedetails.com/cve/CVE-2017-17862/ | CWE-20 | https://github.com/torvalds/linux/commit/c131187db2d3fa2f8bf32fdf4e9a4ef805168467 | c131187db2d3fa2f8bf32fdf4e9a4ef805168467 | bpf: fix branch pruning logic
when the verifier detects that register contains a runtime constant
and it's compared with another constant it will prune exploration
of the branch that is guaranteed not to be taken at runtime.
This is all correct, but malicious program may be constructed
in such a way that it always has a constant comparison and
the other branch is never taken under any conditions.
In this case such path through the program will not be explored
by the verifier. It won't be taken at run-time either, but since
all instructions are JITed the malicious program may cause JITs
to complain about using reserved fields, etc.
To fix the issue we have to track the instructions explored by
the verifier and sanitize instructions that are dead at run time
with NOPs. We cannot reject such dead code, since llvm generates
it for valid C code, since it doesn't do as much data flow
analysis as the verifier does.
Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> | static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
const char *fmt, ...)
{
struct bpf_verifer_log *log = &env->log;
unsigned int n;
va_list args;
if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
return;
va_start(args, fmt);
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
va_end(args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
log->ubuf = NULL;
}
| static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
const char *fmt, ...)
{
struct bpf_verifer_log *log = &env->log;
unsigned int n;
va_list args;
if (!log->level || !log->ubuf || bpf_verifier_log_full(log))
return;
va_start(args, fmt);
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
va_end(args);
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
"verifier log line truncated - local buffer too short\n");
n = min(log->len_total - log->len_used - 1, n);
log->kbuf[n] = '\0';
if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
log->len_used += n;
else
log->ubuf = NULL;
}
| C | linux | 0 |
CVE-2014-3171 | https://www.cvedetails.com/cve/CVE-2014-3171/ | null | https://github.com/chromium/chromium/commit/d10a8dac48d3a9467e81c62cb45208344f4542db | d10a8dac48d3a9467e81c62cb45208344f4542db | Replace further questionable HashMap::add usages in bindings
BUG=390928
R=dcarney@chromium.org
Review URL: https://codereview.chromium.org/411273002
git-svn-id: svn://svn.chromium.org/blink/trunk@178823 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | bool doReadAlgorithmId(blink::WebCryptoAlgorithmId& id)
{
uint32_t rawId;
if (!doReadUint32(&rawId))
return false;
switch (static_cast<CryptoKeyAlgorithmTag>(rawId)) {
case AesCbcTag:
id = blink::WebCryptoAlgorithmIdAesCbc;
return true;
case HmacTag:
id = blink::WebCryptoAlgorithmIdHmac;
return true;
case RsaSsaPkcs1v1_5Tag:
id = blink::WebCryptoAlgorithmIdRsaSsaPkcs1v1_5;
return true;
case Sha1Tag:
id = blink::WebCryptoAlgorithmIdSha1;
return true;
case Sha256Tag:
id = blink::WebCryptoAlgorithmIdSha256;
return true;
case Sha384Tag:
id = blink::WebCryptoAlgorithmIdSha384;
return true;
case Sha512Tag:
id = blink::WebCryptoAlgorithmIdSha512;
return true;
case AesGcmTag:
id = blink::WebCryptoAlgorithmIdAesGcm;
return true;
case RsaOaepTag:
id = blink::WebCryptoAlgorithmIdRsaOaep;
return true;
case AesCtrTag:
id = blink::WebCryptoAlgorithmIdAesCtr;
return true;
case AesKwTag:
id = blink::WebCryptoAlgorithmIdAesKw;
return true;
}
return false;
}
| bool doReadAlgorithmId(blink::WebCryptoAlgorithmId& id)
{
uint32_t rawId;
if (!doReadUint32(&rawId))
return false;
switch (static_cast<CryptoKeyAlgorithmTag>(rawId)) {
case AesCbcTag:
id = blink::WebCryptoAlgorithmIdAesCbc;
return true;
case HmacTag:
id = blink::WebCryptoAlgorithmIdHmac;
return true;
case RsaSsaPkcs1v1_5Tag:
id = blink::WebCryptoAlgorithmIdRsaSsaPkcs1v1_5;
return true;
case Sha1Tag:
id = blink::WebCryptoAlgorithmIdSha1;
return true;
case Sha256Tag:
id = blink::WebCryptoAlgorithmIdSha256;
return true;
case Sha384Tag:
id = blink::WebCryptoAlgorithmIdSha384;
return true;
case Sha512Tag:
id = blink::WebCryptoAlgorithmIdSha512;
return true;
case AesGcmTag:
id = blink::WebCryptoAlgorithmIdAesGcm;
return true;
case RsaOaepTag:
id = blink::WebCryptoAlgorithmIdRsaOaep;
return true;
case AesCtrTag:
id = blink::WebCryptoAlgorithmIdAesCtr;
return true;
case AesKwTag:
id = blink::WebCryptoAlgorithmIdAesKw;
return true;
}
return false;
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/283fb25624bf253d120708152e23cf9143519198 | 283fb25624bf253d120708152e23cf9143519198 | Coverity; Fixing pass by value bugs.
CID=101466, 101464, 101494, 101495, 101496, 101497
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/8956046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115399 0039d316-1c4b-4281-b951-d872f2087c98 | InfoBarDelegate* ExtensionInstallUI::GetNewThemeInstalledInfoBarDelegate(
TabContentsWrapper* tab_contents,
const Extension* new_theme,
const std::string& previous_theme_id,
bool previous_using_native_theme) {
Profile* profile = tab_contents->profile();
return new ThemeInstalledInfoBarDelegate(
tab_contents->infobar_tab_helper(),
profile->GetExtensionService(),
ThemeServiceFactory::GetForProfile(profile),
new_theme,
previous_theme_id,
previous_using_native_theme);
}
| InfoBarDelegate* ExtensionInstallUI::GetNewThemeInstalledInfoBarDelegate(
TabContentsWrapper* tab_contents,
const Extension* new_theme,
const std::string& previous_theme_id,
bool previous_using_native_theme) {
Profile* profile = tab_contents->profile();
return new ThemeInstalledInfoBarDelegate(
tab_contents->infobar_tab_helper(),
profile->GetExtensionService(),
ThemeServiceFactory::GetForProfile(profile),
new_theme,
previous_theme_id,
previous_using_native_theme);
}
| C | Chrome | 0 |
CVE-2018-6198 | https://www.cvedetails.com/cve/CVE-2018-6198/ | CWE-59 | https://github.com/tats/w3m/commit/18dcbadf2771cdb0c18509b14e4e73505b242753 | 18dcbadf2771cdb0c18509b14e4e73505b242753 | Make temporary directory safely when ~/.w3m is unwritable | change_charset(struct parsed_tagarg *arg)
{
Buffer *buf = Currentbuf->linkBuffer[LB_N_INFO];
wc_ces charset;
if (buf == NULL)
return;
delBuffer(Currentbuf);
Currentbuf = buf;
if (Currentbuf->bufferprop & BP_INTERNAL)
return;
charset = Currentbuf->document_charset;
for (; arg; arg = arg->next) {
if (!strcmp(arg->arg, "charset"))
charset = atoi(arg->value);
}
_docCSet(charset);
}
| change_charset(struct parsed_tagarg *arg)
{
Buffer *buf = Currentbuf->linkBuffer[LB_N_INFO];
wc_ces charset;
if (buf == NULL)
return;
delBuffer(Currentbuf);
Currentbuf = buf;
if (Currentbuf->bufferprop & BP_INTERNAL)
return;
charset = Currentbuf->document_charset;
for (; arg; arg = arg->next) {
if (!strcmp(arg->arg, "charset"))
charset = atoi(arg->value);
}
_docCSet(charset);
}
| C | w3m | 0 |
CVE-2018-18955 | https://www.cvedetails.com/cve/CVE-2018-18955/ | CWE-20 | https://github.com/torvalds/linux/commit/d2f007dbe7e4c9583eea6eb04d60001e85c6f1bd | d2f007dbe7e4c9583eea6eb04d60001e85c6f1bd | userns: also map extents in the reverse map to kernel IDs
The current logic first clones the extent array and sorts both copies, then
maps the lower IDs of the forward mapping into the lower namespace, but
doesn't map the lower IDs of the reverse mapping.
This means that code in a nested user namespace with >5 extents will see
incorrect IDs. It also breaks some access checks, like
inode_owner_or_capable() and privileged_wrt_inode_uidgid(), so a process
can incorrectly appear to be capable relative to an inode.
To fix it, we have to make sure that the "lower_first" members of extents
in both arrays are translated; and we have to make sure that the reverse
map is sorted *after* the translation (since otherwise the translation can
break the sorting).
This is CVE-2018-18955.
Fixes: 6397fac4915a ("userns: bump idmap limits to 340")
Cc: stable@vger.kernel.org
Signed-off-by: Jann Horn <jannh@google.com>
Tested-by: Eric W. Biederman <ebiederm@xmission.com>
Reviewed-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> | static int cmp_extents_reverse(const void *a, const void *b)
{
const struct uid_gid_extent *e1 = a;
const struct uid_gid_extent *e2 = b;
if (e1->lower_first < e2->lower_first)
return -1;
if (e1->lower_first > e2->lower_first)
return 1;
return 0;
}
| static int cmp_extents_reverse(const void *a, const void *b)
{
const struct uid_gid_extent *e1 = a;
const struct uid_gid_extent *e2 = b;
if (e1->lower_first < e2->lower_first)
return -1;
if (e1->lower_first > e2->lower_first)
return 1;
return 0;
}
| C | linux | 0 |
CVE-2016-2476 | https://www.cvedetails.com/cve/CVE-2016-2476/ | CWE-119 | https://android.googlesource.com/platform/frameworks/av/+/295c883fe3105b19bcd0f9e07d54c6b589fc5bff | 295c883fe3105b19bcd0f9e07d54c6b589fc5bff | DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
| OMX_ERRORTYPE SoftAVC::setConfig(
OMX_INDEXTYPE index, const OMX_PTR _params) {
switch (index) {
case OMX_IndexConfigVideoIntraVOPRefresh:
{
OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
(OMX_CONFIG_INTRAREFRESHVOPTYPE *)_params;
if (!isValidOMXParam(params)) {
return OMX_ErrorBadParameter;
}
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
mKeyFrameRequested = params->IntraRefreshVOP;
return OMX_ErrorNone;
}
case OMX_IndexConfigVideoBitrate:
{
OMX_VIDEO_CONFIG_BITRATETYPE *params =
(OMX_VIDEO_CONFIG_BITRATETYPE *)_params;
if (!isValidOMXParam(params)) {
return OMX_ErrorBadParameter;
}
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
if (mBitrate != params->nEncodeBitrate) {
mBitrate = params->nEncodeBitrate;
mBitrateUpdated = true;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::setConfig(index, _params);
}
}
| OMX_ERRORTYPE SoftAVC::setConfig(
OMX_INDEXTYPE index, const OMX_PTR _params) {
switch (index) {
case OMX_IndexConfigVideoIntraVOPRefresh:
{
OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
(OMX_CONFIG_INTRAREFRESHVOPTYPE *)_params;
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
mKeyFrameRequested = params->IntraRefreshVOP;
return OMX_ErrorNone;
}
case OMX_IndexConfigVideoBitrate:
{
OMX_VIDEO_CONFIG_BITRATETYPE *params =
(OMX_VIDEO_CONFIG_BITRATETYPE *)_params;
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
if (mBitrate != params->nEncodeBitrate) {
mBitrate = params->nEncodeBitrate;
mBitrateUpdated = true;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::setConfig(index, _params);
}
}
| C | Android | 1 |
CVE-2016-6303 | https://www.cvedetails.com/cve/CVE-2016-6303/ | CWE-787 | https://git.openssl.org/?p=openssl.git;a=commit;h=55d83bf7c10c7b205fffa23fa7c3977491e56c07 | 55d83bf7c10c7b205fffa23fa7c3977491e56c07 | null | int MDC2_Init(MDC2_CTX *c)
{
c->num = 0;
c->pad_type = 1;
memset(&(c->h[0]), 0x52, MDC2_BLOCK);
memset(&(c->hh[0]), 0x25, MDC2_BLOCK);
return 1;
}
| int MDC2_Init(MDC2_CTX *c)
{
c->num = 0;
c->pad_type = 1;
memset(&(c->h[0]), 0x52, MDC2_BLOCK);
memset(&(c->hh[0]), 0x25, MDC2_BLOCK);
return 1;
}
| C | openssl | 0 |
CVE-2014-3571 | https://www.cvedetails.com/cve/CVE-2014-3571/ | null | https://github.com/openssl/openssl/commit/248385c606620b29ecc96ca9d3603463f879652b | 248385c606620b29ecc96ca9d3603463f879652b | Follow on from CVE-2014-3571. This fixes the code that was the original source
of the crash due to p being NULL. Steve's fix prevents this situation from
occuring - however this is by no means obvious by looking at the code for
dtls1_get_record. This fix just makes things look a bit more sane.
Reviewed-by: Dr Stephen Henson <steve@openssl.org> | int dtls1_get_record(SSL *s)
{
int ssl_major,ssl_minor;
int i,n;
SSL3_RECORD *rr;
unsigned char *p = NULL;
unsigned short version;
DTLS1_BITMAP *bitmap;
unsigned int is_next_epoch;
rr= &(s->s3->rrec);
/* The epoch may have changed. If so, process all the
* pending records. This is a non-blocking operation. */
dtls1_process_buffered_records(s);
/* if we're renegotiating, then there may be buffered records */
if (dtls1_get_processed_record(s))
return 1;
/* get something from the wire */
again:
/* check if we have the header */
if ( (s->rstate != SSL_ST_READ_BODY) ||
(s->packet_length < DTLS1_RT_HEADER_LENGTH))
{
n=ssl3_read_n(s, DTLS1_RT_HEADER_LENGTH, s->s3->rbuf.len, 0);
/* read timeout is handled by dtls1_read_bytes */
if (n <= 0) return(n); /* error or non-blocking */
/* this packet contained a partial record, dump it */
if (s->packet_length != DTLS1_RT_HEADER_LENGTH)
{
s->packet_length = 0;
goto again;
}
s->rstate=SSL_ST_READ_BODY;
p=s->packet;
if (s->msg_callback)
s->msg_callback(0, 0, SSL3_RT_HEADER, p, DTLS1_RT_HEADER_LENGTH, s, s->msg_callback_arg);
/* Pull apart the header into the DTLS1_RECORD */
rr->type= *(p++);
ssl_major= *(p++);
ssl_minor= *(p++);
version=(ssl_major<<8)|ssl_minor;
/* sequence number is 64 bits, with top 2 bytes = epoch */
n2s(p,rr->epoch);
memcpy(&(s->s3->read_sequence[2]), p, 6);
p+=6;
n2s(p,rr->length);
/* Lets check version */
if (!s->first_packet)
{
if (version != s->version)
{
/* unexpected version, silently discard */
rr->length = 0;
s->packet_length = 0;
goto again;
}
}
if ((version & 0xff00) != (s->version & 0xff00))
{
/* wrong version, silently discard record */
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH)
{
/* record too long, silently discard it */
rr->length = 0;
s->packet_length = 0;
goto again;
}
/* now s->rstate == SSL_ST_READ_BODY */
}
/* s->rstate == SSL_ST_READ_BODY, get and decode the data */
if (rr->length > s->packet_length-DTLS1_RT_HEADER_LENGTH)
{
/* now s->packet_length == DTLS1_RT_HEADER_LENGTH */
i=rr->length;
n=ssl3_read_n(s,i,i,1);
/* this packet contained a partial record, dump it */
if ( n != i)
{
rr->length = 0;
s->packet_length = 0;
goto again;
}
/* now n == rr->length,
* and s->packet_length == DTLS1_RT_HEADER_LENGTH + rr->length */
}
s->rstate=SSL_ST_READ_HEADER; /* set state for later operations */
/* match epochs. NULL means the packet is dropped on the floor */
bitmap = dtls1_get_bitmap(s, rr, &is_next_epoch);
if ( bitmap == NULL)
{
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
/* Only do replay check if no SCTP bio */
if (!BIO_dgram_is_sctp(SSL_get_rbio(s)))
{
#endif
/* Check whether this is a repeat, or aged record.
* Don't check if we're listening and this message is
* a ClientHello. They can look as if they're replayed,
* since they arrive from different connections and
* would be dropped unnecessarily.
*/
if (!(s->d1->listen && rr->type == SSL3_RT_HANDSHAKE &&
s->packet_length > DTLS1_RT_HEADER_LENGTH &&
s->packet[DTLS1_RT_HEADER_LENGTH] == SSL3_MT_CLIENT_HELLO) &&
!dtls1_record_replay_check(s, bitmap))
{
rr->length = 0;
s->packet_length=0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
}
#endif
/* just read a 0 length packet */
if (rr->length == 0) goto again;
/* If this record is from the next epoch (either HM or ALERT),
* and a handshake is currently in progress, buffer it since it
* cannot be processed at this time. However, do not buffer
* anything while listening.
*/
if (is_next_epoch)
{
if ((SSL_in_init(s) || s->in_handshake) && !s->d1->listen)
{
dtls1_buffer_record(s, &(s->d1->unprocessed_rcds), rr->seq_num);
}
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (!dtls1_process_record(s))
{
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
return(1);
}
| int dtls1_get_record(SSL *s)
{
int ssl_major,ssl_minor;
int i,n;
SSL3_RECORD *rr;
unsigned char *p = NULL;
unsigned short version;
DTLS1_BITMAP *bitmap;
unsigned int is_next_epoch;
rr= &(s->s3->rrec);
/* The epoch may have changed. If so, process all the
* pending records. This is a non-blocking operation. */
dtls1_process_buffered_records(s);
/* if we're renegotiating, then there may be buffered records */
if (dtls1_get_processed_record(s))
return 1;
/* get something from the wire */
again:
/* check if we have the header */
if ( (s->rstate != SSL_ST_READ_BODY) ||
(s->packet_length < DTLS1_RT_HEADER_LENGTH))
{
n=ssl3_read_n(s, DTLS1_RT_HEADER_LENGTH, s->s3->rbuf.len, 0);
/* read timeout is handled by dtls1_read_bytes */
if (n <= 0) return(n); /* error or non-blocking */
/* this packet contained a partial record, dump it */
if (s->packet_length != DTLS1_RT_HEADER_LENGTH)
{
s->packet_length = 0;
goto again;
}
s->rstate=SSL_ST_READ_BODY;
p=s->packet;
if (s->msg_callback)
s->msg_callback(0, 0, SSL3_RT_HEADER, p, DTLS1_RT_HEADER_LENGTH, s, s->msg_callback_arg);
/* Pull apart the header into the DTLS1_RECORD */
rr->type= *(p++);
ssl_major= *(p++);
ssl_minor= *(p++);
version=(ssl_major<<8)|ssl_minor;
/* sequence number is 64 bits, with top 2 bytes = epoch */
n2s(p,rr->epoch);
memcpy(&(s->s3->read_sequence[2]), p, 6);
p+=6;
n2s(p,rr->length);
/* Lets check version */
if (!s->first_packet)
{
if (version != s->version)
{
/* unexpected version, silently discard */
rr->length = 0;
s->packet_length = 0;
goto again;
}
}
if ((version & 0xff00) != (s->version & 0xff00))
{
/* wrong version, silently discard record */
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH)
{
/* record too long, silently discard it */
rr->length = 0;
s->packet_length = 0;
goto again;
}
/* now s->rstate == SSL_ST_READ_BODY */
}
/* s->rstate == SSL_ST_READ_BODY, get and decode the data */
if (rr->length > s->packet_length-DTLS1_RT_HEADER_LENGTH)
{
/* now s->packet_length == DTLS1_RT_HEADER_LENGTH */
i=rr->length;
n=ssl3_read_n(s,i,i,1);
/* this packet contained a partial record, dump it */
if ( n != i)
{
rr->length = 0;
s->packet_length = 0;
goto again;
}
/* now n == rr->length,
* and s->packet_length == DTLS1_RT_HEADER_LENGTH + rr->length */
}
s->rstate=SSL_ST_READ_HEADER; /* set state for later operations */
/* match epochs. NULL means the packet is dropped on the floor */
bitmap = dtls1_get_bitmap(s, rr, &is_next_epoch);
if ( bitmap == NULL)
{
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
/* Only do replay check if no SCTP bio */
if (!BIO_dgram_is_sctp(SSL_get_rbio(s)))
{
#endif
/* Check whether this is a repeat, or aged record.
* Don't check if we're listening and this message is
* a ClientHello. They can look as if they're replayed,
* since they arrive from different connections and
* would be dropped unnecessarily.
*/
if (!(s->d1->listen && rr->type == SSL3_RT_HANDSHAKE &&
*p == SSL3_MT_CLIENT_HELLO) &&
!dtls1_record_replay_check(s, bitmap))
{
rr->length = 0;
s->packet_length=0; /* dump this record */
goto again; /* get another record */
}
#ifndef OPENSSL_NO_SCTP
}
#endif
/* just read a 0 length packet */
if (rr->length == 0) goto again;
/* If this record is from the next epoch (either HM or ALERT),
* and a handshake is currently in progress, buffer it since it
* cannot be processed at this time. However, do not buffer
* anything while listening.
*/
if (is_next_epoch)
{
if ((SSL_in_init(s) || s->in_handshake) && !s->d1->listen)
{
dtls1_buffer_record(s, &(s->d1->unprocessed_rcds), rr->seq_num);
}
rr->length = 0;
s->packet_length = 0;
goto again;
}
if (!dtls1_process_record(s))
{
rr->length = 0;
s->packet_length = 0; /* dump this record */
goto again; /* get another record */
}
return(1);
}
| C | openssl | 1 |
CVE-2018-11363 | https://www.cvedetails.com/cve/CVE-2018-11363/ | CWE-125 | https://github.com/AndreRenaud/PDFGen/commit/ee58aff6918b8bbc3be29b9e3089485ea46ff956 | ee58aff6918b8bbc3be29b9e3089485ea46ff956 | jpeg: Fix another possible buffer overrun
Found via the clang libfuzzer | int pdf_set_font(struct pdf_doc *pdf, const char *font)
{
struct pdf_object *obj;
int last_index = 0;
/* See if we've used this font before */
for (obj = pdf_find_first_object(pdf, OBJ_font); obj; obj = obj->next) {
if (strcmp(obj->font.name, font) == 0)
break;
last_index = obj->font.index;
}
/* Create a new font object if we need it */
if (!obj) {
obj = pdf_add_object(pdf, OBJ_font);
if (!obj)
return pdf->errval;
strncpy(obj->font.name, font, sizeof(obj->font.name));
obj->font.name[sizeof(obj->font.name) - 1] = '\0';
obj->font.index = last_index + 1;
}
pdf->current_font = obj;
return 0;
}
| int pdf_set_font(struct pdf_doc *pdf, const char *font)
{
struct pdf_object *obj;
int last_index = 0;
/* See if we've used this font before */
for (obj = pdf_find_first_object(pdf, OBJ_font); obj; obj = obj->next) {
if (strcmp(obj->font.name, font) == 0)
break;
last_index = obj->font.index;
}
/* Create a new font object if we need it */
if (!obj) {
obj = pdf_add_object(pdf, OBJ_font);
if (!obj)
return pdf->errval;
strncpy(obj->font.name, font, sizeof(obj->font.name));
obj->font.name[sizeof(obj->font.name) - 1] = '\0';
obj->font.index = last_index + 1;
}
pdf->current_font = obj;
return 0;
}
| C | PDFGen | 0 |
CVE-2016-2342 | https://www.cvedetails.com/cve/CVE-2016-2342/ | CWE-119 | https://git.savannah.gnu.org/cgit/quagga.git/commit/?id=a3bc7e9400b214a0f078fdb19596ba54214a1442 | a3bc7e9400b214a0f078fdb19596ba54214a1442 | null | decode_rd_as (u_char *pnt, struct rd_as *rd_as)
{
rd_as->as = (u_int16_t) *pnt++ << 8;
rd_as->as |= (u_int16_t) *pnt++;
rd_as->val = ((u_int32_t) *pnt++ << 24);
rd_as->val |= ((u_int32_t) *pnt++ << 16);
rd_as->val |= ((u_int32_t) *pnt++ << 8);
rd_as->val |= (u_int32_t) *pnt;
}
| decode_rd_as (u_char *pnt, struct rd_as *rd_as)
{
rd_as->as = (u_int16_t) *pnt++ << 8;
rd_as->as |= (u_int16_t) *pnt++;
rd_as->val = ((u_int32_t) *pnt++ << 24);
rd_as->val |= ((u_int32_t) *pnt++ << 16);
rd_as->val |= ((u_int32_t) *pnt++ << 8);
rd_as->val |= (u_int32_t) *pnt;
}
| C | savannah | 0 |
null | null | null | https://github.com/chromium/chromium/commit/b9e2ecab97a8a7f3cce06951ab92a3eaef559206 | b9e2ecab97a8a7f3cce06951ab92a3eaef559206 | Do not discount a MANUAL_SUBFRAME load just because it involved
some redirects.
R=brettw
BUG=21353
TEST=none
Review URL: http://codereview.chromium.org/246073
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@27887 0039d316-1c4b-4281-b951-d872f2087c98 | bool WebFrameLoaderClient::canHandleRequest(const ResourceRequest&) const {
return true;
}
| bool WebFrameLoaderClient::canHandleRequest(const ResourceRequest&) const {
return true;
}
| C | Chrome | 0 |
CVE-2011-3085 | https://www.cvedetails.com/cve/CVE-2011-3085/ | CWE-119 | https://github.com/chromium/chromium/commit/c7e50b5ef454efd6ab9527d795442c213eeb6afa | c7e50b5ef454efd6ab9527d795442c213eeb6afa | [REGRESSION] Refreshed autofill popup renders garbage
https://bugs.webkit.org/show_bug.cgi?id=83255
http://code.google.com/p/chromium/issues/detail?id=118374
The code used to update only the PopupContainer coordinates as if they were the coordinates relative
to the root view. Instead, a WebWidget positioned relative to the screen origin holds the PopupContainer,
so it is the WebWidget that should be positioned in PopupContainer::refresh(), and the PopupContainer's
location should be (0, 0) (and their sizes should always be equal).
Reviewed by Kent Tamura.
No new tests, as the popup appearance is not testable in WebKit.
* platform/chromium/PopupContainer.cpp:
(WebCore::PopupContainer::layoutAndCalculateWidgetRect): Variable renamed.
(WebCore::PopupContainer::showPopup): Use m_originalFrameRect rather than frameRect()
for passing into chromeClient.
(WebCore::PopupContainer::showInRect): Set up the correct frameRect() for the container.
(WebCore::PopupContainer::refresh): Resize the container and position the WebWidget correctly.
* platform/chromium/PopupContainer.h:
(PopupContainer):
git-svn-id: svn://svn.chromium.org/blink/trunk@113418 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void PopupContainer::paintBorder(GraphicsContext* gc, const IntRect& rect)
{
Color borderColor(127, 157, 185);
gc->setStrokeStyle(NoStroke);
gc->setFillColor(borderColor, ColorSpaceDeviceRGB);
int tx = x();
int ty = y();
gc->drawRect(IntRect(tx, ty, width(), kBorderSize));
gc->drawRect(IntRect(tx, ty, kBorderSize, height()));
gc->drawRect(IntRect(tx, ty + height() - kBorderSize, width(), kBorderSize));
gc->drawRect(IntRect(tx + width() - kBorderSize, ty, kBorderSize, height()));
}
| void PopupContainer::paintBorder(GraphicsContext* gc, const IntRect& rect)
{
Color borderColor(127, 157, 185);
gc->setStrokeStyle(NoStroke);
gc->setFillColor(borderColor, ColorSpaceDeviceRGB);
int tx = x();
int ty = y();
gc->drawRect(IntRect(tx, ty, width(), kBorderSize));
gc->drawRect(IntRect(tx, ty, kBorderSize, height()));
gc->drawRect(IntRect(tx, ty + height() - kBorderSize, width(), kBorderSize));
gc->drawRect(IntRect(tx + width() - kBorderSize, ty, kBorderSize, height()));
}
| C | Chrome | 0 |
CVE-2012-1179 | https://www.cvedetails.com/cve/CVE-2012-1179/ | CWE-264 | https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850 | 4a1d704194a441bf83c636004a479e01360ec850 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | int free_swap_and_cache(swp_entry_t entry)
{
struct swap_info_struct *p;
struct page *page = NULL;
if (non_swap_entry(entry))
return 1;
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
page = find_get_page(&swapper_space, entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
page = NULL;
}
}
spin_unlock(&swap_lock);
}
if (page) {
/*
* Not mapped elsewhere, or swap space full? Free it!
* Also recheck PageSwapCache now page is locked (above).
*/
if (PageSwapCache(page) && !PageWriteback(page) &&
(!page_mapped(page) || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
unlock_page(page);
page_cache_release(page);
}
return p != NULL;
}
| int free_swap_and_cache(swp_entry_t entry)
{
struct swap_info_struct *p;
struct page *page = NULL;
if (non_swap_entry(entry))
return 1;
p = swap_info_get(entry);
if (p) {
if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) {
page = find_get_page(&swapper_space, entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
page = NULL;
}
}
spin_unlock(&swap_lock);
}
if (page) {
/*
* Not mapped elsewhere, or swap space full? Free it!
* Also recheck PageSwapCache now page is locked (above).
*/
if (PageSwapCache(page) && !PageWriteback(page) &&
(!page_mapped(page) || vm_swap_full())) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
unlock_page(page);
page_cache_release(page);
}
return p != NULL;
}
| C | linux | 0 |
CVE-2014-3173 | https://www.cvedetails.com/cve/CVE-2014-3173/ | CWE-119 | https://github.com/chromium/chromium/commit/ee7579229ff7e9e5ae28bf53aea069251499d7da | ee7579229ff7e9e5ae28bf53aea069251499d7da | Framebuffer clear() needs to consider the situation some draw buffers are disabled.
This is when we expose DrawBuffers extension.
BUG=376951
TEST=the attached test case, webgl conformance
R=kbr@chromium.org,bajones@chromium.org
Review URL: https://codereview.chromium.org/315283002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@275338 0039d316-1c4b-4281-b951-d872f2087c98 | void GLES2DecoderImpl::BlitFramebufferHelper(GLint srcX0,
GLint srcY0,
GLint srcX1,
GLint srcY1,
GLint dstX0,
GLint dstY0,
GLint dstX1,
GLint dstY1,
GLbitfield mask,
GLenum filter) {
if (feature_info_->feature_flags().is_angle) {
glBlitFramebufferANGLE(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
} else if (feature_info_->feature_flags().use_core_framebuffer_multisample) {
glBlitFramebuffer(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
} else {
glBlitFramebufferEXT(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
}
}
| void GLES2DecoderImpl::BlitFramebufferHelper(GLint srcX0,
GLint srcY0,
GLint srcX1,
GLint srcY1,
GLint dstX0,
GLint dstY0,
GLint dstX1,
GLint dstY1,
GLbitfield mask,
GLenum filter) {
if (feature_info_->feature_flags().is_angle) {
glBlitFramebufferANGLE(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
} else if (feature_info_->feature_flags().use_core_framebuffer_multisample) {
glBlitFramebuffer(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
} else {
glBlitFramebufferEXT(
srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter);
}
}
| C | Chrome | 0 |
CVE-2016-5204 | https://www.cvedetails.com/cve/CVE-2016-5204/ | CWE-79 | https://github.com/chromium/chromium/commit/e1e67d5d341d82c61cab2c41ff4163f17caf14ae | e1e67d5d341d82c61cab2c41ff4163f17caf14ae | Add boolean to UserIntiatedInfo noting if an input event led to navigation.
Also refactor UkmPageLoadMetricsObserver to use this new boolean to
report the user initiated metric in RecordPageLoadExtraInfoMetrics, so
that it works correctly in the case when the page load failed.
Bug: 925104
Change-Id: Ie08e7d3912cb1da484190d838005e95e57a209ff
Reviewed-on: https://chromium-review.googlesource.com/c/1450460
Commit-Queue: Annie Sullivan <sullivan@chromium.org>
Reviewed-by: Bryan McQuade <bmcquade@chromium.org>
Cr-Commit-Position: refs/heads/master@{#630870} | void MetricsWebContentsObserver::NavigationStopped() {
NotifyPageEndAllLoads(END_STOP, UserInitiatedInfo::NotUserInitiated());
}
| void MetricsWebContentsObserver::NavigationStopped() {
NotifyPageEndAllLoads(END_STOP, UserInitiatedInfo::NotUserInitiated());
}
| C | Chrome | 0 |
CVE-2013-2871 | https://www.cvedetails.com/cve/CVE-2013-2871/ | CWE-20 | https://github.com/chromium/chromium/commit/bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9 | bb9cfb0aba25f4b13e57bdd4a9fac80ba071e7b9 | Setting input.x-webkit-speech should not cause focus change
In r150866, we introduced element()->focus() in destroyShadowSubtree()
to retain focus on <input> when its type attribute gets changed.
But when x-webkit-speech attribute is changed, the element is detached
before calling destroyShadowSubtree() and element()->focus() failed
This patch moves detach() after destroyShadowSubtree() to fix the
problem.
BUG=243818
TEST=fast/forms/input-type-change-focusout.html
NOTRY=true
Review URL: https://chromiumcodereview.appspot.com/16084005
git-svn-id: svn://svn.chromium.org/blink/trunk@151444 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | bool HTMLInputElement::isSpeechEnabled() const
{
return m_inputType->shouldRespectSpeechAttribute() && RuntimeEnabledFeatures::speechInputEnabled() && hasAttribute(webkitspeechAttr);
}
| bool HTMLInputElement::isSpeechEnabled() const
{
return m_inputType->shouldRespectSpeechAttribute() && RuntimeEnabledFeatures::speechInputEnabled() && hasAttribute(webkitspeechAttr);
}
| C | Chrome | 0 |
CVE-2018-8087 | https://www.cvedetails.com/cve/CVE-2018-8087/ | CWE-772 | https://github.com/torvalds/linux/commit/0ddcff49b672239dda94d70d0fcf50317a9f4b51 | 0ddcff49b672239dda94d70d0fcf50317a9f4b51 | mac80211_hwsim: fix possible memory leak in hwsim_new_radio_nl()
'hwname' is malloced in hwsim_new_radio_nl() and should be freed
before leaving from the error handling cases, otherwise it will cause
memory leak.
Fixes: ff4dd73dd2b4 ("mac80211_hwsim: check HWSIM_ATTR_RADIO_NAME length")
Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
Reviewed-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: Johannes Berg <johannes.berg@intel.com> | static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *data = hw->priv;
return le64_to_cpu(__mac80211_hwsim_get_tsf(data));
}
| static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mac80211_hwsim_data *data = hw->priv;
return le64_to_cpu(__mac80211_hwsim_get_tsf(data));
}
| C | linux | 0 |
CVE-2015-5289 | https://www.cvedetails.com/cve/CVE-2015-5289/ | CWE-119 | https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=08fa47c4850cea32c3116665975bca219fbf2fe6 | 08fa47c4850cea32c3116665975bca219fbf2fe6 | null | IsValidJsonNumber(const char *str, int len)
{
bool numeric_error;
JsonLexContext dummy_lex;
/*
* json_lex_number expects a leading '-' to have been eaten already.
*
* having to cast away the constness of str is ugly, but there's not much
* easy alternative.
*/
if (*str == '-')
{
dummy_lex.input = (char *) str + 1;
dummy_lex.input_length = len - 1;
}
else
{
dummy_lex.input = (char *) str;
dummy_lex.input_length = len;
}
json_lex_number(&dummy_lex, dummy_lex.input, &numeric_error);
return !numeric_error;
}
| IsValidJsonNumber(const char *str, int len)
{
bool numeric_error;
JsonLexContext dummy_lex;
/*
* json_lex_number expects a leading '-' to have been eaten already.
*
* having to cast away the constness of str is ugly, but there's not much
* easy alternative.
*/
if (*str == '-')
{
dummy_lex.input = (char *) str + 1;
dummy_lex.input_length = len - 1;
}
else
{
dummy_lex.input = (char *) str;
dummy_lex.input_length = len;
}
json_lex_number(&dummy_lex, dummy_lex.input, &numeric_error);
return !numeric_error;
}
| C | postgresql | 0 |
CVE-2016-5219 | https://www.cvedetails.com/cve/CVE-2016-5219/ | CWE-416 | https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae | a4150b688a754d3d10d2ca385155b1c95d77d6ae | Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568} | error::Error GLES2DecoderPassthroughImpl::DoTexSubImage3D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLint zoffset,
GLsizei width,
GLsizei height,
GLsizei depth,
GLenum format,
GLenum type,
GLsizei image_size,
const void* pixels) {
ScopedUnpackStateButAlignmentReset reset_unpack(
api(), image_size != 0 && feature_info_->gl_version_info().is_es3, true);
api()->glTexSubImage3DRobustANGLEFn(target, level, xoffset, yoffset, zoffset,
width, height, depth, format, type,
image_size, pixels);
ExitCommandProcessingEarly();
return error::kNoError;
}
| error::Error GLES2DecoderPassthroughImpl::DoTexSubImage3D(GLenum target,
GLint level,
GLint xoffset,
GLint yoffset,
GLint zoffset,
GLsizei width,
GLsizei height,
GLsizei depth,
GLenum format,
GLenum type,
GLsizei image_size,
const void* pixels) {
ScopedUnpackStateButAlignmentReset reset_unpack(
api(), image_size != 0 && feature_info_->gl_version_info().is_es3, true);
api()->glTexSubImage3DRobustANGLEFn(target, level, xoffset, yoffset, zoffset,
width, height, depth, format, type,
image_size, pixels);
ExitCommandProcessingEarly();
return error::kNoError;
}
| C | Chrome | 0 |
CVE-2018-9490 | https://www.cvedetails.com/cve/CVE-2018-9490/ | CWE-704 | https://android.googlesource.com/platform/external/v8/+/a24543157ae2cdd25da43e20f4e48a07481e6ceb | a24543157ae2cdd25da43e20f4e48a07481e6ceb | Backport: Fix Object.entries/values with changing elements
Bug: 111274046
Test: m -j proxy_resolver_v8_unittest && adb sync && adb shell \
/data/nativetest64/proxy_resolver_v8_unittest/proxy_resolver_v8_unittest
Change-Id: I705fc512cc5837e9364ed187559cc75d079aa5cb
(cherry picked from commit d8be9a10287afed07705ac8af027d6a46d4def99)
| static uint32_t NumberOfElementsImpl(JSObject* receiver,
FixedArrayBase* backing_store) {
return AccessorClass::GetCapacityImpl(receiver, backing_store);
}
| static uint32_t NumberOfElementsImpl(JSObject* receiver,
FixedArrayBase* backing_store) {
return AccessorClass::GetCapacityImpl(receiver, backing_store);
}
| C | Android | 0 |
CVE-2014-7822 | https://www.cvedetails.com/cve/CVE-2014-7822/ | CWE-264 | https://github.com/torvalds/linux/commit/8d0207652cbe27d1f962050737848e5ad4671958 | 8d0207652cbe27d1f962050737848e5ad4671958 | ->splice_write() via ->write_iter()
iter_file_splice_write() - a ->splice_write() instance that gathers the
pipe buffers, builds a bio_vec-based iov_iter covering those and feeds
it to ->write_iter(). A bunch of simple cases coverted to that...
[AV: fixed the braino spotted by Cyrill]
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
int err = update_mctime(file_inode(iocb->ki_filp));
if (err)
return err;
return generic_file_write_iter(iocb, from);
}
| static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
int err = update_mctime(file_inode(iocb->ki_filp));
if (err)
return err;
return generic_file_write_iter(iocb, from);
}
| C | linux | 0 |
CVE-2015-3834 | https://www.cvedetails.com/cve/CVE-2015-3834/ | CWE-189 | https://android.googlesource.com/platform/frameworks/av/+/c82e31a7039a03dca7b37c65b7890ba5c1e18ced | c82e31a7039a03dca7b37c65b7890ba5c1e18ced | HDCP: buffer over flow check -- DO NOT MERGE
bug: 20222489
Change-Id: I3a64a5999d68ea243d187f12ec7717b7f26d93a3
(cherry picked from commit 532cd7b86a5fdc7b9a30a45d8ae2d16ef7660a72)
| BpHDCPObserver(const sp<IBinder> &impl)
: BpInterface<IHDCPObserver>(impl) {
}
| BpHDCPObserver(const sp<IBinder> &impl)
: BpInterface<IHDCPObserver>(impl) {
}
| C | Android | 0 |
CVE-2017-5044 | https://www.cvedetails.com/cve/CVE-2017-5044/ | CWE-119 | https://github.com/chromium/chromium/commit/62154472bd2c43e1790dd1bd8a527c1db9118d88 | 62154472bd2c43e1790dd1bd8a527c1db9118d88 | bluetooth: Implement getAvailability()
This change implements the getAvailability() method for
navigator.bluetooth as defined in the specification.
Bug: 707640
Change-Id: I9e9b3e7f8ea7f259e975f71cb6d9570e5f04b479
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1651516
Reviewed-by: Chris Harrelson <chrishtr@chromium.org>
Reviewed-by: Giovanni Ortuño Urquidi <ortuno@chromium.org>
Reviewed-by: Kinuko Yasuda <kinuko@chromium.org>
Commit-Queue: Ovidio de Jesús Ruiz-Henríquez <odejesush@chromium.org>
Auto-Submit: Ovidio de Jesús Ruiz-Henríquez <odejesush@chromium.org>
Cr-Commit-Position: refs/heads/master@{#688987} | void FakeCentral::RemoveFakeService(const std::string& identifier,
const std::string& peripheral_address,
RemoveFakeServiceCallback callback) {
FakePeripheral* fake_peripheral = GetFakePeripheral(peripheral_address);
if (!fake_peripheral) {
std::move(callback).Run(false);
return;
}
std::move(callback).Run(fake_peripheral->RemoveFakeService(identifier));
}
| void FakeCentral::RemoveFakeService(const std::string& identifier,
const std::string& peripheral_address,
RemoveFakeServiceCallback callback) {
FakePeripheral* fake_peripheral = GetFakePeripheral(peripheral_address);
if (!fake_peripheral) {
std::move(callback).Run(false);
return;
}
std::move(callback).Run(fake_peripheral->RemoveFakeService(identifier));
}
| C | Chrome | 0 |
CVE-2017-18200 | https://www.cvedetails.com/cve/CVE-2017-18200/ | CWE-20 | https://github.com/torvalds/linux/commit/638164a2718f337ea224b747cf5977ef143166a4 | 638164a2718f337ea224b747cf5977ef143166a4 | f2fs: fix potential panic during fstrim
As Ju Hyung Park reported:
"When 'fstrim' is called for manual trim, a BUG() can be triggered
randomly with this patch.
I'm seeing this issue on both x86 Desktop and arm64 Android phone.
On x86 Desktop, this was caused during Ubuntu boot-up. I have a
cronjob installed which calls 'fstrim -v /' during boot. On arm64
Android, this was caused during GC looping with 1ms gc_min_sleep_time
& gc_max_sleep_time."
Root cause of this issue is that f2fs_wait_discard_bios can only be
used by f2fs_put_super, because during put_super there must be no
other referrers, so it can ignore discard entry's reference count
when removing the entry, otherwise in other caller we will hit bug_on
in __remove_discard_cmd as there may be other issuer added reference
count in discard entry.
Thread A Thread B
- issue_discard_thread
- f2fs_ioc_fitrim
- f2fs_trim_fs
- f2fs_wait_discard_bios
- __issue_discard_cmd
- __submit_discard_cmd
- __wait_discard_cmd
- dc->ref++
- __wait_one_discard_bio
- __wait_discard_cmd
- __remove_discard_cmd
- f2fs_bug_on(sbi, dc->ref)
Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de
Reported-by: Ju Hyung Park <qkrwngud825@gmail.com>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org> | static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
{
/*
* We do the test below only for project quotas. 'usrquota' and
* 'grpquota' mount options are allowed even without quota feature
* to support legacy quotas in quota files.
*/
if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
"Cannot enable project quota enforcement.");
return -1;
}
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
sbi->s_qf_names[PRJQUOTA]) {
if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sbi, USRQUOTA);
if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sbi, GRPQUOTA);
if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
clear_opt(sbi, PRJQUOTA);
if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
test_opt(sbi, PRJQUOTA)) {
f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
"format mixing");
return -1;
}
if (!sbi->s_jquota_fmt) {
f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
"not specified");
return -1;
}
}
return 0;
}
| static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
{
/*
* We do the test below only for project quotas. 'usrquota' and
* 'grpquota' mount options are allowed even without quota feature
* to support legacy quotas in quota files.
*/
if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi->sb)) {
f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. "
"Cannot enable project quota enforcement.");
return -1;
}
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] ||
sbi->s_qf_names[PRJQUOTA]) {
if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sbi, USRQUOTA);
if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
clear_opt(sbi, GRPQUOTA);
if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA])
clear_opt(sbi, PRJQUOTA);
if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
test_opt(sbi, PRJQUOTA)) {
f2fs_msg(sbi->sb, KERN_ERR, "old and new quota "
"format mixing");
return -1;
}
if (!sbi->s_jquota_fmt) {
f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format "
"not specified");
return -1;
}
}
return 0;
}
| C | linux | 0 |
CVE-2019-11487 | https://www.cvedetails.com/cve/CVE-2019-11487/ | CWE-416 | https://github.com/torvalds/linux/commit/6b3a707736301c2128ca85ce85fb13f60b5e350a | 6b3a707736301c2128ca85ce85fb13f60b5e350a | Merge branch 'page-refs' (page ref overflow)
Merge page ref overflow branch.
Jann Horn reported that he can overflow the page ref count with
sufficient memory (and a filesystem that is intentionally extremely
slow).
Admittedly it's not exactly easy. To have more than four billion
references to a page requires a minimum of 32GB of kernel memory just
for the pointers to the pages, much less any metadata to keep track of
those pointers. Jann needed a total of 140GB of memory and a specially
crafted filesystem that leaves all reads pending (in order to not ever
free the page references and just keep adding more).
Still, we have a fairly straightforward way to limit the two obvious
user-controllable sources of page references: direct-IO like page
references gotten through get_user_pages(), and the splice pipe page
duplication. So let's just do that.
* branch page-refs:
fs: prevent page refcount overflow in pipe_buf_get
mm: prevent get_user_pages() from overflowing page refcount
mm: add 'try_get_page()' helper function
mm: make page ref count overflow check tighter and more explicit | static int link_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
int ret = 0, i = 0, nbuf;
/*
* Potential ABBA deadlock, work around it by ordering lock
* grabbing by pipe info address. Otherwise two different processes
* could deadlock (one doing tee from A -> B, the other from B -> A).
*/
pipe_double_lock(ipipe, opipe);
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
/*
* If we have iterated all input buffers or ran out of
* output room, break.
*/
if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
break;
ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
/*
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
if (!pipe_buf_get(ipipe, ibuf)) {
if (ret == 0)
ret = -EFAULT;
break;
}
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
/*
* Don't inherit the gift flag, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
pipe_buf_mark_unmergeable(obuf);
if (obuf->len > len)
obuf->len = len;
opipe->nrbufs++;
ret += obuf->len;
len -= obuf->len;
i++;
} while (len);
/*
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
ret = -EAGAIN;
pipe_unlock(ipipe);
pipe_unlock(opipe);
/*
* If we put data in the output pipe, wakeup any potential readers.
*/
if (ret > 0)
wakeup_pipe_readers(opipe);
return ret;
}
| static int link_pipe(struct pipe_inode_info *ipipe,
struct pipe_inode_info *opipe,
size_t len, unsigned int flags)
{
struct pipe_buffer *ibuf, *obuf;
int ret = 0, i = 0, nbuf;
/*
* Potential ABBA deadlock, work around it by ordering lock
* grabbing by pipe info address. Otherwise two different processes
* could deadlock (one doing tee from A -> B, the other from B -> A).
*/
pipe_double_lock(ipipe, opipe);
do {
if (!opipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
/*
* If we have iterated all input buffers or ran out of
* output room, break.
*/
if (i >= ipipe->nrbufs || opipe->nrbufs >= opipe->buffers)
break;
ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (ipipe->buffers-1));
nbuf = (opipe->curbuf + opipe->nrbufs) & (opipe->buffers - 1);
/*
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
pipe_buf_get(ipipe, ibuf);
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
/*
* Don't inherit the gift flag, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
pipe_buf_mark_unmergeable(obuf);
if (obuf->len > len)
obuf->len = len;
opipe->nrbufs++;
ret += obuf->len;
len -= obuf->len;
i++;
} while (len);
/*
* return EAGAIN if we have the potential of some data in the
* future, otherwise just return 0
*/
if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
ret = -EAGAIN;
pipe_unlock(ipipe);
pipe_unlock(opipe);
/*
* If we put data in the output pipe, wakeup any potential readers.
*/
if (ret > 0)
wakeup_pipe_readers(opipe);
return ret;
}
| C | linux | 1 |
CVE-2016-3156 | https://www.cvedetails.com/cve/CVE-2016-3156/ | CWE-399 | https://github.com/torvalds/linux/commit/fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <solar@openwall.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Tested-by: Cyrill Gorcunov <gorcunov@openvz.org> | static int masq_device_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
if (event == NETDEV_DOWN) {
/* Device was downed. Search entire table for
* conntracks which were associated with that device,
* and forget them.
*/
NF_CT_ASSERT(dev->ifindex != 0);
nf_ct_iterate_cleanup(net, device_cmp,
(void *)(long)dev->ifindex, 0, 0);
}
return NOTIFY_DONE;
}
| static int masq_device_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
if (event == NETDEV_DOWN) {
/* Device was downed. Search entire table for
* conntracks which were associated with that device,
* and forget them.
*/
NF_CT_ASSERT(dev->ifindex != 0);
nf_ct_iterate_cleanup(net, device_cmp,
(void *)(long)dev->ifindex, 0, 0);
}
return NOTIFY_DONE;
}
| C | linux | 0 |
CVE-2013-2873 | https://www.cvedetails.com/cve/CVE-2013-2873/ | CWE-416 | https://github.com/chromium/chromium/commit/370bd9b522d2ccd4a3113d6c93d30cdf8ca502ef | 370bd9b522d2ccd4a3113d6c93d30cdf8ca502ef | Protect WebURLLoaderImpl::Context while receiving responses.
A client's didReceiveResponse can cancel a request; by protecting the
Context we avoid a use after free in this case.
Interestingly, we really had very good warning about this problem, see
https://codereview.chromium.org/11900002/ back in January.
R=darin
BUG=241139
Review URL: https://chromiumcodereview.appspot.com/15738007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@202821 0039d316-1c4b-4281-b951-d872f2087c98 | void WebURLLoaderImpl::Context::OnReceivedCachedMetadata(
const char* data, int len) {
if (client_)
client_->didReceiveCachedMetadata(loader_, data, len);
}
| void WebURLLoaderImpl::Context::OnReceivedCachedMetadata(
const char* data, int len) {
if (client_)
client_->didReceiveCachedMetadata(loader_, data, len);
}
| C | Chrome | 0 |
CVE-2016-8693 | https://www.cvedetails.com/cve/CVE-2016-8693/ | CWE-415 | https://github.com/mdadams/jasper/commit/44a524e367597af58d6265ae2014468b334d0309 | 44a524e367597af58d6265ae2014468b334d0309 | The memory stream interface allows for a buffer size of zero.
The case of a zero-sized buffer was not handled correctly, as it could
lead to a double free.
This problem has now been fixed (hopefully).
One might ask whether a zero-sized buffer should be allowed at all,
but this is a question for another day. | int jas_stream_gobble(jas_stream_t *stream, int n)
{
int m;
m = n;
for (m = n; m > 0; --m) {
if (jas_stream_getc(stream) == EOF) {
return n - m;
}
}
return n;
}
| int jas_stream_gobble(jas_stream_t *stream, int n)
{
int m;
m = n;
for (m = n; m > 0; --m) {
if (jas_stream_getc(stream) == EOF) {
return n - m;
}
}
return n;
}
| C | jasper | 0 |
null | null | null | https://github.com/chromium/chromium/commit/21d4d15a81b030f522fef29a0429f08a70220f68 | 21d4d15a81b030f522fef29a0429f08a70220f68 | Moved guest_view_registry to GuestViewManager and made it an instance map
This change allows for the change towards moving GuestViewManager to components
and implementing an extensions specific GuestViewManager that installs extensions-specific
guest types.
BUG=444869
Review URL: https://codereview.chromium.org/1096623002
Cr-Commit-Position: refs/heads/master@{#325919} | void GuestViewBase::SetUpSizing(const base::DictionaryValue& params) {
bool auto_size_enabled = auto_size_enabled_;
params.GetBoolean(guestview::kAttributeAutoSize, &auto_size_enabled);
int max_height = max_auto_size_.height();
int max_width = max_auto_size_.width();
params.GetInteger(guestview::kAttributeMaxHeight, &max_height);
params.GetInteger(guestview::kAttributeMaxWidth, &max_width);
int min_height = min_auto_size_.height();
int min_width = min_auto_size_.width();
params.GetInteger(guestview::kAttributeMinHeight, &min_height);
params.GetInteger(guestview::kAttributeMinWidth, &min_width);
double element_height = 0.0;
double element_width = 0.0;
params.GetDouble(guestview::kElementHeight, &element_height);
params.GetDouble(guestview::kElementWidth, &element_width);
int normal_height = normal_size_.height();
int normal_width = normal_size_.width();
bool element_size_is_logical = false;
params.GetBoolean(guestview::kElementSizeIsLogical, &element_size_is_logical);
if (element_size_is_logical) {
normal_height = LogicalPixelsToPhysicalPixels(element_height);
normal_width = LogicalPixelsToPhysicalPixels(element_width);
} else {
normal_height = lround(element_height);
normal_width = lround(element_width);
}
SetSizeParams set_size_params;
set_size_params.enable_auto_size.reset(new bool(auto_size_enabled));
set_size_params.min_size.reset(new gfx::Size(min_width, min_height));
set_size_params.max_size.reset(new gfx::Size(max_width, max_height));
set_size_params.normal_size.reset(new gfx::Size(normal_width, normal_height));
SetSize(set_size_params);
}
| void GuestViewBase::SetUpSizing(const base::DictionaryValue& params) {
bool auto_size_enabled = auto_size_enabled_;
params.GetBoolean(guestview::kAttributeAutoSize, &auto_size_enabled);
int max_height = max_auto_size_.height();
int max_width = max_auto_size_.width();
params.GetInteger(guestview::kAttributeMaxHeight, &max_height);
params.GetInteger(guestview::kAttributeMaxWidth, &max_width);
int min_height = min_auto_size_.height();
int min_width = min_auto_size_.width();
params.GetInteger(guestview::kAttributeMinHeight, &min_height);
params.GetInteger(guestview::kAttributeMinWidth, &min_width);
double element_height = 0.0;
double element_width = 0.0;
params.GetDouble(guestview::kElementHeight, &element_height);
params.GetDouble(guestview::kElementWidth, &element_width);
int normal_height = normal_size_.height();
int normal_width = normal_size_.width();
bool element_size_is_logical = false;
params.GetBoolean(guestview::kElementSizeIsLogical, &element_size_is_logical);
if (element_size_is_logical) {
normal_height = LogicalPixelsToPhysicalPixels(element_height);
normal_width = LogicalPixelsToPhysicalPixels(element_width);
} else {
normal_height = lround(element_height);
normal_width = lround(element_width);
}
SetSizeParams set_size_params;
set_size_params.enable_auto_size.reset(new bool(auto_size_enabled));
set_size_params.min_size.reset(new gfx::Size(min_width, min_height));
set_size_params.max_size.reset(new gfx::Size(max_width, max_height));
set_size_params.normal_size.reset(new gfx::Size(normal_width, normal_height));
SetSize(set_size_params);
}
| C | Chrome | 0 |
CVE-2014-9888 | https://www.cvedetails.com/cve/CVE-2014-9888/ | CWE-264 | https://github.com/torvalds/linux/commit/0ea1ec713f04bdfac343c9702b21cd3a7c711826 | 0ea1ec713f04bdfac343c9702b21cd3a7c711826 | ARM: dma-mapping: don't allow DMA mappings to be marked executable
DMA mapping permissions were being derived from pgprot_kernel directly
without using PAGE_KERNEL. This causes them to be marked with executable
permission, which is not what we want. Fix this.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> | static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
paddr = page_to_phys(page) + off;
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
outer_clean_range(paddr, paddr + size);
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
| static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
unsigned long paddr;
dma_cache_maint_page(page, off, size, dir, dmac_map_area);
paddr = page_to_phys(page) + off;
if (dir == DMA_FROM_DEVICE) {
outer_inv_range(paddr, paddr + size);
} else {
outer_clean_range(paddr, paddr + size);
}
/* FIXME: non-speculating: flush on bidirectional mappings? */
}
| C | linux | 0 |
CVE-2016-5219 | https://www.cvedetails.com/cve/CVE-2016-5219/ | CWE-416 | https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae | a4150b688a754d3d10d2ca385155b1c95d77d6ae | Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568} | void GLES2Implementation::TraceEndCHROMIUM() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTraceEndCHROMIUM("
<< ")");
if (current_trace_stack_ == 0) {
SetGLError(GL_INVALID_OPERATION, "glTraceEndCHROMIUM",
"missing begin trace");
return;
}
helper_->TraceEndCHROMIUM();
current_trace_stack_--;
}
| void GLES2Implementation::TraceEndCHROMIUM() {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glTraceEndCHROMIUM("
<< ")");
if (current_trace_stack_ == 0) {
SetGLError(GL_INVALID_OPERATION, "glTraceEndCHROMIUM",
"missing begin trace");
return;
}
helper_->TraceEndCHROMIUM();
current_trace_stack_--;
}
| C | Chrome | 0 |
CVE-2016-7915 | https://www.cvedetails.com/cve/CVE-2016-7915/ | CWE-125 | https://github.com/torvalds/linux/commit/50220dead1650609206efe91f0cc116132d59b3f | 50220dead1650609206efe91f0cc116132d59b3f | HID: core: prevent out-of-bound readings
Plugging a Logitech DJ receiver with KASAN activated raises a bunch of
out-of-bound readings.
The fields are allocated up to MAX_USAGE, meaning that potentially, we do
not have enough fields to fit the incoming values.
Add checks and silence KASAN.
Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz> | static int hid_device_probe(struct device *dev)
{
struct hid_driver *hdrv = to_hid_driver(dev->driver);
struct hid_device *hdev = to_hid_device(dev);
const struct hid_device_id *id;
int ret = 0;
if (down_interruptible(&hdev->driver_lock))
return -EINTR;
if (down_interruptible(&hdev->driver_input_lock)) {
ret = -EINTR;
goto unlock_driver_lock;
}
hdev->io_started = false;
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
if (id == NULL) {
ret = -ENODEV;
goto unlock;
}
hdev->driver = hdrv;
if (hdrv->probe) {
ret = hdrv->probe(hdev, id);
} else { /* default probe */
ret = hid_open_report(hdev);
if (!ret)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
if (ret) {
hid_close_report(hdev);
hdev->driver = NULL;
}
}
unlock:
if (!hdev->io_started)
up(&hdev->driver_input_lock);
unlock_driver_lock:
up(&hdev->driver_lock);
return ret;
}
| static int hid_device_probe(struct device *dev)
{
struct hid_driver *hdrv = to_hid_driver(dev->driver);
struct hid_device *hdev = to_hid_device(dev);
const struct hid_device_id *id;
int ret = 0;
if (down_interruptible(&hdev->driver_lock))
return -EINTR;
if (down_interruptible(&hdev->driver_input_lock)) {
ret = -EINTR;
goto unlock_driver_lock;
}
hdev->io_started = false;
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
if (id == NULL) {
ret = -ENODEV;
goto unlock;
}
hdev->driver = hdrv;
if (hdrv->probe) {
ret = hdrv->probe(hdev, id);
} else { /* default probe */
ret = hid_open_report(hdev);
if (!ret)
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
if (ret) {
hid_close_report(hdev);
hdev->driver = NULL;
}
}
unlock:
if (!hdev->io_started)
up(&hdev->driver_input_lock);
unlock_driver_lock:
up(&hdev->driver_lock);
return ret;
}
| C | linux | 0 |
CVE-2013-6634 | https://www.cvedetails.com/cve/CVE-2013-6634/ | CWE-287 | https://github.com/chromium/chromium/commit/50370b3c98047bdc80184ff87a502edc5c597d3a | 50370b3c98047bdc80184ff87a502edc5c597d3a | During redirects in the one click sign in flow, check the current URL
instead of original URL to validate gaia http headers.
BUG=307159
Review URL: https://codereview.chromium.org/77343002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@236563 0039d316-1c4b-4281-b951-d872f2087c98 | bool AreWeShowingSignin(GURL url, signin::Source source, std::string email) {
GURL::Replacements replacements;
replacements.ClearQuery();
GURL clean_login_url =
GaiaUrls::GetInstance()->service_login_url().ReplaceComponents(
replacements);
return (url.ReplaceComponents(replacements) == clean_login_url &&
source != signin::SOURCE_UNKNOWN) ||
(IsValidGaiaSigninRedirectOrResponseURL(url) &&
url.spec().find("ChromeLoginPrompt") != std::string::npos &&
!email.empty());
}
| bool AreWeShowingSignin(GURL url, signin::Source source, std::string email) {
GURL::Replacements replacements;
replacements.ClearQuery();
GURL clean_login_url =
GaiaUrls::GetInstance()->service_login_url().ReplaceComponents(
replacements);
return (url.ReplaceComponents(replacements) == clean_login_url &&
source != signin::SOURCE_UNKNOWN) ||
(IsValidGaiaSigninRedirectOrResponseURL(url) &&
url.spec().find("ChromeLoginPrompt") != std::string::npos &&
!email.empty());
}
| C | Chrome | 0 |
CVE-2019-11922 | https://www.cvedetails.com/cve/CVE-2019-11922/ | CWE-362 | https://github.com/facebook/zstd/pull/1404/commits/3e5cdf1b6a85843e991d7d10f6a2567c15580da0 | 3e5cdf1b6a85843e991d7d10f6a2567c15580da0 | fixed T36302429 | size_t ZSTD_estimateCCtxSize(int compressionLevel)
{
int level;
size_t memBudget = 0;
for (level=1; level<=compressionLevel; level++) {
size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
return memBudget;
}
| size_t ZSTD_estimateCCtxSize(int compressionLevel)
{
int level;
size_t memBudget = 0;
for (level=1; level<=compressionLevel; level++) {
size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
if (newMB > memBudget) memBudget = newMB;
}
return memBudget;
}
| C | zstd | 0 |
CVE-2017-11399 | https://www.cvedetails.com/cve/CVE-2017-11399/ | CWE-125 | https://github.com/FFmpeg/FFmpeg/commit/ba4beaf6149f7241c8bd85fe853318c2f6837ad0 | ba4beaf6149f7241c8bd85fe853318c2f6837ad0 | avcodec/apedec: Fix integer overflow
Fixes: out of array access
Fixes: PoC.ape and others
Found-by: Bingchang, Liu@VARAS of IIE
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> | static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
int32_t *data, int count, int order, int fracbits)
{
int res;
int absres;
while (count--) {
/* round fixedpoint scalar product */
res = ctx->adsp.scalarproduct_and_madd_int16(f->coeffs,
f->delay - order,
f->adaptcoeffs - order,
order, APESIGN(*data));
res = (res + (1 << (fracbits - 1))) >> fracbits;
res += *data;
*data++ = res;
/* Update the output history */
*f->delay++ = av_clip_int16(res);
if (version < 3980) {
/* Version ??? to < 3.98 files (untested) */
f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
f->adaptcoeffs[-4] >>= 1;
f->adaptcoeffs[-8] >>= 1;
} else {
/* Version 3.98 and later files */
/* Update the adaption coefficients */
absres = FFABS(res);
if (absres)
*f->adaptcoeffs = APESIGN(res) *
(8 << ((absres > f->avg * 3) + (absres > f->avg * 4 / 3)));
/* equivalent to the following code
if (absres <= f->avg * 4 / 3)
*f->adaptcoeffs = APESIGN(res) * 8;
else if (absres <= f->avg * 3)
*f->adaptcoeffs = APESIGN(res) * 16;
else
*f->adaptcoeffs = APESIGN(res) * 32;
*/
else
*f->adaptcoeffs = 0;
f->avg += (absres - f->avg) / 16;
f->adaptcoeffs[-1] >>= 1;
f->adaptcoeffs[-2] >>= 1;
f->adaptcoeffs[-8] >>= 1;
}
f->adaptcoeffs++;
/* Have we filled the history buffer? */
if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
memmove(f->historybuffer, f->delay - (order * 2),
(order * 2) * sizeof(*f->historybuffer));
f->delay = f->historybuffer + order * 2;
f->adaptcoeffs = f->historybuffer + order;
}
}
}
| static void do_apply_filter(APEContext *ctx, int version, APEFilter *f,
int32_t *data, int count, int order, int fracbits)
{
int res;
int absres;
while (count--) {
/* round fixedpoint scalar product */
res = ctx->adsp.scalarproduct_and_madd_int16(f->coeffs,
f->delay - order,
f->adaptcoeffs - order,
order, APESIGN(*data));
res = (res + (1 << (fracbits - 1))) >> fracbits;
res += *data;
*data++ = res;
/* Update the output history */
*f->delay++ = av_clip_int16(res);
if (version < 3980) {
/* Version ??? to < 3.98 files (untested) */
f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4;
f->adaptcoeffs[-4] >>= 1;
f->adaptcoeffs[-8] >>= 1;
} else {
/* Version 3.98 and later files */
/* Update the adaption coefficients */
absres = FFABS(res);
if (absres)
*f->adaptcoeffs = APESIGN(res) *
(8 << ((absres > f->avg * 3) + (absres > f->avg * 4 / 3)));
/* equivalent to the following code
if (absres <= f->avg * 4 / 3)
*f->adaptcoeffs = APESIGN(res) * 8;
else if (absres <= f->avg * 3)
*f->adaptcoeffs = APESIGN(res) * 16;
else
*f->adaptcoeffs = APESIGN(res) * 32;
*/
else
*f->adaptcoeffs = 0;
f->avg += (absres - f->avg) / 16;
f->adaptcoeffs[-1] >>= 1;
f->adaptcoeffs[-2] >>= 1;
f->adaptcoeffs[-8] >>= 1;
}
f->adaptcoeffs++;
/* Have we filled the history buffer? */
if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) {
memmove(f->historybuffer, f->delay - (order * 2),
(order * 2) * sizeof(*f->historybuffer));
f->delay = f->historybuffer + order * 2;
f->adaptcoeffs = f->historybuffer + order;
}
}
}
| C | FFmpeg | 0 |
CVE-2013-7421 | https://www.cvedetails.com/cve/CVE-2013-7421/ | CWE-264 | https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b | 5d26a105b5a73e5635eae0629b42fa0a90e07b7b | crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> | static void __exit michael_mic_exit(void)
{
crypto_unregister_shash(&alg);
}
| static void __exit michael_mic_exit(void)
{
crypto_unregister_shash(&alg);
}
| C | linux | 0 |
CVE-2017-16532 | https://www.cvedetails.com/cve/CVE-2017-16532/ | CWE-476 | https://github.com/torvalds/linux/commit/7c80f9e4a588f1925b07134bb2e3689335f6c6d8 | 7c80f9e4a588f1925b07134bb2e3689335f6c6d8 | usb: usbtest: fix NULL pointer dereference
If the usbtest driver encounters a device with an IN bulk endpoint but
no OUT bulk endpoint, it will try to dereference a NULL pointer
(out->desc.bEndpointAddress). The problem can be solved by adding a
missing test.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com> | static unsigned get_maxpacket(struct usb_device *udev, int pipe)
{
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(udev, pipe);
return le16_to_cpup(&ep->desc.wMaxPacketSize);
}
| static unsigned get_maxpacket(struct usb_device *udev, int pipe)
{
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(udev, pipe);
return le16_to_cpup(&ep->desc.wMaxPacketSize);
}
| C | linux | 0 |
CVE-2017-15129 | https://www.cvedetails.com/cve/CVE-2017-15129/ | CWE-416 | https://github.com/torvalds/linux/commit/21b5944350052d2583e82dd59b19a9ba94a007f0 | 21b5944350052d2583e82dd59b19a9ba94a007f0 | net: Fix double free and memory corruption in get_net_ns_by_id()
(I can trivially verify that that idr_remove in cleanup_net happens
after the network namespace count has dropped to zero --EWB)
Function get_net_ns_by_id() does not check for net::count
after it has found a peer in netns_ids idr.
It may dereference a peer, after its count has already been
finaly decremented. This leads to double free and memory
corruption:
put_net(peer) rtnl_lock()
atomic_dec_and_test(&peer->count) [count=0] ...
__put_net(peer) get_net_ns_by_id(net, id)
spin_lock(&cleanup_list_lock)
list_add(&net->cleanup_list, &cleanup_list)
spin_unlock(&cleanup_list_lock)
queue_work() peer = idr_find(&net->netns_ids, id)
| get_net(peer) [count=1]
| ...
| (use after final put)
v ...
cleanup_net() ...
spin_lock(&cleanup_list_lock) ...
list_replace_init(&cleanup_list, ..) ...
spin_unlock(&cleanup_list_lock) ...
... ...
... put_net(peer)
... atomic_dec_and_test(&peer->count) [count=0]
... spin_lock(&cleanup_list_lock)
... list_add(&net->cleanup_list, &cleanup_list)
... spin_unlock(&cleanup_list_lock)
... queue_work()
... rtnl_unlock()
rtnl_lock() ...
for_each_net(tmp) { ...
id = __peernet2id(tmp, peer) ...
spin_lock_irq(&tmp->nsid_lock) ...
idr_remove(&tmp->netns_ids, id) ...
... ...
net_drop_ns() ...
net_free(peer) ...
} ...
|
v
cleanup_net()
...
(Second free of peer)
Also, put_net() on the right cpu may reorder with left's cpu
list_replace_init(&cleanup_list, ..), and then cleanup_list
will be corrupted.
Since cleanup_net() is executed in worker thread, while
put_net(peer) can happen everywhere, there should be
enough time for concurrent get_net_ns_by_id() to pick
the peer up, and the race does not seem to be unlikely.
The patch fixes the problem in standard way.
(Also, there is possible problem in peernet2id_alloc(), which requires
check for net::count under nsid_lock and maybe_get_net(peer), but
in current stable kernel it's used under rtnl_lock() and it has to be
safe. Openswitch begun to use peernet2id_alloc(), and possibly it should
be fixed too. While this is not in stable kernel yet, so I'll send
a separate message to netdev@ later).
Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
Fixes: 0c7aecd4bde4 "netns: add rtnl cmd to add and get peer netns ids"
Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
int error;
if (ops->id) {
again:
error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
if (error < 0) {
if (error == -EAGAIN) {
ida_pre_get(&net_generic_ids, GFP_KERNEL);
goto again;
}
return error;
}
max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
}
error = __register_pernet_operations(list, ops);
if (error) {
rcu_barrier();
if (ops->id)
ida_remove(&net_generic_ids, *ops->id);
}
return error;
}
| static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
int error;
if (ops->id) {
again:
error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
if (error < 0) {
if (error == -EAGAIN) {
ida_pre_get(&net_generic_ids, GFP_KERNEL);
goto again;
}
return error;
}
max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
}
error = __register_pernet_operations(list, ops);
if (error) {
rcu_barrier();
if (ops->id)
ida_remove(&net_generic_ids, *ops->id);
}
return error;
}
| C | linux | 0 |
CVE-2015-8844 | https://www.cvedetails.com/cve/CVE-2015-8844/ | CWE-20 | https://github.com/torvalds/linux/commit/d2b9d2a5ad5ef04ff978c9923d19730cb05efd55 | d2b9d2a5ad5ef04ff978c9923d19730cb05efd55 | powerpc/tm: Block signal return setting invalid MSR state
Currently we allow both the MSR T and S bits to be set by userspace on
a signal return. Unfortunately this is a reserved configuration and
will cause a TM Bad Thing exception if attempted (via rfid).
This patch checks for this case in both the 32 and 64 bit signals
code. If both T and S are set, we mark the context as invalid.
Found using a syscall fuzzer.
Fixes: 2b0a576d15e0 ("powerpc: Add new transactional memory state to the signal context")
Cc: stable@vger.kernel.org # v3.9+
Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> | static int save_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *frame,
struct mcontext __user *tm_frame, int sigret)
{
unsigned long msr = regs->msr;
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
* just indicates to userland that we were doing a transaction, but we
* don't want to return in transactional state. This also ensures
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
*/
regs->msr &= ~MSR_TS_MASK;
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
/* Save both sets of general registers */
if (save_general_regs(¤t->thread.ckpt_regs, frame)
|| save_general_regs(regs, tm_frame))
return 1;
/* Stash the top half of the 64bit MSR into the 32bit MSR word
* of the transactional mcontext. This way we have a backward-compatible
* MSR in the 'normal' (checkpointed) mcontext and additionally one can
* also look at what type of transaction (T or S) was active at the
* time of the signal.
*/
if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
return 1;
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
if (msr & MSR_VEC) {
if (__copy_to_user(&tm_frame->mc_vregs,
¤t->thread.transact_vr,
ELF_NVRREG * sizeof(vector128)))
return 1;
} else {
if (__copy_to_user(&tm_frame->mc_vregs,
¤t->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
}
/* set MSR_VEC in the saved MSR value to indicate that
* frame->mc_vregs contains valid data
*/
msr |= MSR_VEC;
}
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec. Since VSCR only contains 32 bits saved in the least
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.vrsave = mfspr(SPRN_VRSAVE);
if (__put_user(current->thread.vrsave,
(u32 __user *)&frame->mc_vregs[32]))
return 1;
if (msr & MSR_VEC) {
if (__put_user(current->thread.transact_vrsave,
(u32 __user *)&tm_frame->mc_vregs[32]))
return 1;
} else {
if (__put_user(current->thread.vrsave,
(u32 __user *)&tm_frame->mc_vregs[32]))
return 1;
}
#endif /* CONFIG_ALTIVEC */
if (copy_fpr_to_user(&frame->mc_fregs, current))
return 1;
if (msr & MSR_FP) {
if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
return 1;
} else {
if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
return 1;
}
#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
* the saved MSR value to indicate that frame->mc_vregs
* contains valid data
*/
if (current->thread.used_vsr) {
__giveup_vsx(current);
if (copy_vsx_to_user(&frame->mc_vsregs, current))
return 1;
if (msr & MSR_VSX) {
if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
current))
return 1;
} else {
if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
return 1;
}
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* SPE regs are not checkpointed with TM, so this section is
* simply the same as in save_user_regs().
*/
if (current->thread.used_spe) {
flush_spe_to_thread(current);
if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
ELF_NEVRREG * sizeof(u32)))
return 1;
/* set MSR_SPE in the saved MSR value to indicate that
* frame->mc_vregs contains valid data */
msr |= MSR_SPE;
}
/* We always copy to/from spefscr */
if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
return 1;
#endif /* CONFIG_SPE */
if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
return 1;
if (sigret) {
/* Set up the sigreturn trampoline: li r0,sigret; sc */
if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
|| __put_user(0x44000002UL, &frame->tramp[1]))
return 1;
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
}
return 0;
}
| static int save_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *frame,
struct mcontext __user *tm_frame, int sigret)
{
unsigned long msr = regs->msr;
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
* just indicates to userland that we were doing a transaction, but we
* don't want to return in transactional state. This also ensures
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
*/
regs->msr &= ~MSR_TS_MASK;
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
/* Save both sets of general registers */
if (save_general_regs(¤t->thread.ckpt_regs, frame)
|| save_general_regs(regs, tm_frame))
return 1;
/* Stash the top half of the 64bit MSR into the 32bit MSR word
* of the transactional mcontext. This way we have a backward-compatible
* MSR in the 'normal' (checkpointed) mcontext and additionally one can
* also look at what type of transaction (T or S) was active at the
* time of the signal.
*/
if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
return 1;
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
flush_altivec_to_thread(current);
if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
if (msr & MSR_VEC) {
if (__copy_to_user(&tm_frame->mc_vregs,
¤t->thread.transact_vr,
ELF_NVRREG * sizeof(vector128)))
return 1;
} else {
if (__copy_to_user(&tm_frame->mc_vregs,
¤t->thread.vr_state,
ELF_NVRREG * sizeof(vector128)))
return 1;
}
/* set MSR_VEC in the saved MSR value to indicate that
* frame->mc_vregs contains valid data
*/
msr |= MSR_VEC;
}
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec. Since VSCR only contains 32 bits saved in the least
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.vrsave = mfspr(SPRN_VRSAVE);
if (__put_user(current->thread.vrsave,
(u32 __user *)&frame->mc_vregs[32]))
return 1;
if (msr & MSR_VEC) {
if (__put_user(current->thread.transact_vrsave,
(u32 __user *)&tm_frame->mc_vregs[32]))
return 1;
} else {
if (__put_user(current->thread.vrsave,
(u32 __user *)&tm_frame->mc_vregs[32]))
return 1;
}
#endif /* CONFIG_ALTIVEC */
if (copy_fpr_to_user(&frame->mc_fregs, current))
return 1;
if (msr & MSR_FP) {
if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
return 1;
} else {
if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
return 1;
}
#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
* the saved MSR value to indicate that frame->mc_vregs
* contains valid data
*/
if (current->thread.used_vsr) {
__giveup_vsx(current);
if (copy_vsx_to_user(&frame->mc_vsregs, current))
return 1;
if (msr & MSR_VSX) {
if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
current))
return 1;
} else {
if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
return 1;
}
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* SPE regs are not checkpointed with TM, so this section is
* simply the same as in save_user_regs().
*/
if (current->thread.used_spe) {
flush_spe_to_thread(current);
if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
ELF_NEVRREG * sizeof(u32)))
return 1;
/* set MSR_SPE in the saved MSR value to indicate that
* frame->mc_vregs contains valid data */
msr |= MSR_SPE;
}
/* We always copy to/from spefscr */
if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
return 1;
#endif /* CONFIG_SPE */
if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
return 1;
if (sigret) {
/* Set up the sigreturn trampoline: li r0,sigret; sc */
if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
|| __put_user(0x44000002UL, &frame->tramp[1]))
return 1;
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
}
return 0;
}
| C | linux | 0 |
CVE-2011-2918 | https://www.cvedetails.com/cve/CVE-2011-2918/ | CWE-399 | https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu> | fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP /*EQ*/) == 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
| fcmp_eq(struct sh_fpu_soft_struct *fregs, struct pt_regs *regs, int m, int n)
{
if (CMP(CMP /*EQ*/) == 0)
regs->sr |= 1;
else
regs->sr &= ~1;
return 0;
}
| C | linux | 0 |
CVE-2017-9798 | https://www.cvedetails.com/cve/CVE-2017-9798/ | CWE-416 | https://github.com/apache/httpd/commit/29afdd2550b3d30a8defece2b95ae81edcf66ac9 | 29afdd2550b3d30a8defece2b95ae81edcf66ac9 | core: Disallow Methods' registration at run time (.htaccess), they may be
used only if registered at init time (httpd.conf).
Calling ap_method_register() in children processes is not the right scope
since it won't be shared for all requests.
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1807655 13f79535-47bb-0310-9956-ffa450edef68 | static const char *set_error_document(cmd_parms *cmd, void *conf_,
const char *errno_str, const char *msg)
{
core_dir_config *conf = conf_;
int error_number, index_number, idx500;
enum { MSG, LOCAL_PATH, REMOTE_PATH } what = MSG;
/* 1st parameter should be a 3 digit number, which we recognize;
* convert it into an array index
*/
error_number = atoi(errno_str);
idx500 = ap_index_of_response(HTTP_INTERNAL_SERVER_ERROR);
if (error_number == HTTP_INTERNAL_SERVER_ERROR) {
index_number = idx500;
}
else if ((index_number = ap_index_of_response(error_number)) == idx500) {
return apr_pstrcat(cmd->pool, "Unsupported HTTP response code ",
errno_str, NULL);
}
/* Heuristic to determine second argument. */
if (ap_strchr_c(msg,' '))
what = MSG;
else if (msg[0] == '/')
what = LOCAL_PATH;
else if (ap_is_url(msg))
what = REMOTE_PATH;
else
what = MSG;
/* The entry should be ignored if it is a full URL for a 401 error */
if (error_number == 401 && what == REMOTE_PATH) {
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, cmd->server, APLOGNO(00113)
"%s:%d cannot use a full URL in a 401 ErrorDocument "
"directive --- ignoring!", cmd->directive->filename, cmd->directive->line_num);
}
else { /* Store it... */
if (conf->response_code_exprs == NULL) {
conf->response_code_exprs = apr_hash_make(cmd->pool);
}
if (ap_cstr_casecmp(msg, "default") == 0) {
/* special case: ErrorDocument 404 default restores the
* canned server error response
*/
apr_hash_set(conf->response_code_exprs,
apr_pmemdup(cmd->pool, &index_number, sizeof(index_number)),
sizeof(index_number), &errordocument_default);
}
else {
ap_expr_info_t *expr;
const char *expr_err = NULL;
/* hack. Prefix a " if it is a msg; as that is what
* http_protocol.c relies on to distinguish between
* a msg and a (local) path.
*/
const char *response =
(what == MSG) ? apr_pstrcat(cmd->pool, "\"", msg, NULL) :
apr_pstrdup(cmd->pool, msg);
expr = ap_expr_parse_cmd(cmd, response, AP_EXPR_FLAG_STRING_RESULT,
&expr_err, NULL);
if (expr_err) {
return apr_pstrcat(cmd->temp_pool,
"Cannot parse expression in ErrorDocument: ",
expr_err, NULL);
}
apr_hash_set(conf->response_code_exprs,
apr_pmemdup(cmd->pool, &index_number, sizeof(index_number)),
sizeof(index_number), expr);
}
}
return NULL;
}
| static const char *set_error_document(cmd_parms *cmd, void *conf_,
const char *errno_str, const char *msg)
{
core_dir_config *conf = conf_;
int error_number, index_number, idx500;
enum { MSG, LOCAL_PATH, REMOTE_PATH } what = MSG;
/* 1st parameter should be a 3 digit number, which we recognize;
* convert it into an array index
*/
error_number = atoi(errno_str);
idx500 = ap_index_of_response(HTTP_INTERNAL_SERVER_ERROR);
if (error_number == HTTP_INTERNAL_SERVER_ERROR) {
index_number = idx500;
}
else if ((index_number = ap_index_of_response(error_number)) == idx500) {
return apr_pstrcat(cmd->pool, "Unsupported HTTP response code ",
errno_str, NULL);
}
/* Heuristic to determine second argument. */
if (ap_strchr_c(msg,' '))
what = MSG;
else if (msg[0] == '/')
what = LOCAL_PATH;
else if (ap_is_url(msg))
what = REMOTE_PATH;
else
what = MSG;
/* The entry should be ignored if it is a full URL for a 401 error */
if (error_number == 401 && what == REMOTE_PATH) {
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, cmd->server, APLOGNO(00113)
"%s:%d cannot use a full URL in a 401 ErrorDocument "
"directive --- ignoring!", cmd->directive->filename, cmd->directive->line_num);
}
else { /* Store it... */
if (conf->response_code_exprs == NULL) {
conf->response_code_exprs = apr_hash_make(cmd->pool);
}
if (ap_cstr_casecmp(msg, "default") == 0) {
/* special case: ErrorDocument 404 default restores the
* canned server error response
*/
apr_hash_set(conf->response_code_exprs,
apr_pmemdup(cmd->pool, &index_number, sizeof(index_number)),
sizeof(index_number), &errordocument_default);
}
else {
ap_expr_info_t *expr;
const char *expr_err = NULL;
/* hack. Prefix a " if it is a msg; as that is what
* http_protocol.c relies on to distinguish between
* a msg and a (local) path.
*/
const char *response =
(what == MSG) ? apr_pstrcat(cmd->pool, "\"", msg, NULL) :
apr_pstrdup(cmd->pool, msg);
expr = ap_expr_parse_cmd(cmd, response, AP_EXPR_FLAG_STRING_RESULT,
&expr_err, NULL);
if (expr_err) {
return apr_pstrcat(cmd->temp_pool,
"Cannot parse expression in ErrorDocument: ",
expr_err, NULL);
}
apr_hash_set(conf->response_code_exprs,
apr_pmemdup(cmd->pool, &index_number, sizeof(index_number)),
sizeof(index_number), expr);
}
}
return NULL;
}
| C | httpd | 0 |
null | null | null | https://github.com/chromium/chromium/commit/aa0e1ed74972a619072341b6409dc5cacd2418aa | aa0e1ed74972a619072341b6409dc5cacd2418aa | [BlackBerry] willComposite() and didComposite() are now in InspectorController
https://bugs.webkit.org/show_bug.cgi?id=110343
Patch by Alberto Garcia <albgarcia@rim.com> on 2013-02-21
Reviewed by Carlos Garcia Campos.
This was changed in r142879.
* Api/WebPage.cpp:
(BlackBerry::WebKit::WebPagePrivate::willComposite):
(BlackBerry::WebKit::WebPagePrivate::didComposite):
git-svn-id: svn://svn.chromium.org/blink/trunk@143584 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void WebPagePrivate::exitFullscreenForNode(Node* node)
{
#if ENABLE(VIDEO)
if (m_fullscreenNode.get()) {
m_client->fullscreenStop();
m_fullscreenNode = 0;
}
if (!node || !node->hasTagName(HTMLNames::videoTag))
return;
MediaPlayer* player = static_cast<HTMLMediaElement*>(node)->player();
if (!player)
return;
MediaPlayerPrivate* mmrPlayer = static_cast<MediaPlayerPrivate*>(player->implementation());
if (!mmrPlayer)
return;
mmrPlayer->setFullscreenWebPageClient(0);
#endif
}
| void WebPagePrivate::exitFullscreenForNode(Node* node)
{
#if ENABLE(VIDEO)
if (m_fullscreenNode.get()) {
m_client->fullscreenStop();
m_fullscreenNode = 0;
}
if (!node || !node->hasTagName(HTMLNames::videoTag))
return;
MediaPlayer* player = static_cast<HTMLMediaElement*>(node)->player();
if (!player)
return;
MediaPlayerPrivate* mmrPlayer = static_cast<MediaPlayerPrivate*>(player->implementation());
if (!mmrPlayer)
return;
mmrPlayer->setFullscreenWebPageClient(0);
#endif
}
| C | Chrome | 0 |
CVE-2016-3835 | https://www.cvedetails.com/cve/CVE-2016-3835/ | CWE-200 | https://android.googlesource.com/platform/hardware/qcom/media/+/7558d03e6498e970b761aa44fff6b2c659202d95 | 7558d03e6498e970b761aa44fff6b2c659202d95 | DO NOT MERGE mm-video-v4l2: venc: add checks before accessing heap pointers
Heap pointers do not point to user virtual addresses in case
of secure session.
Set them to NULL and add checks to avoid accesing them
Bug: 28815329
Bug: 28920116
Change-Id: I94fd5808e753b58654d65e175d3857ef46ffba26
| int omx_venc::async_message_process (void *context, void* message)
{
omx_video* omx = NULL;
struct venc_msg *m_sVenc_msg = NULL;
OMX_BUFFERHEADERTYPE* omxhdr = NULL;
struct venc_buffer *temp_buff = NULL;
if (context == NULL || message == NULL) {
DEBUG_PRINT_ERROR("ERROR: omx_venc::async_message_process invalid i/p params");
return -1;
}
m_sVenc_msg = (struct venc_msg *)message;
omx = reinterpret_cast<omx_video*>(context);
if (m_sVenc_msg->statuscode != VEN_S_SUCCESS) {
DEBUG_PRINT_ERROR("ERROR: async_msg_process() - Error statuscode = %lu",
m_sVenc_msg->statuscode);
if(m_sVenc_msg->msgcode == VEN_MSG_HW_OVERLOAD) {
omx->omx_report_hw_overload();
} else
omx->omx_report_error();
}
DEBUG_PRINT_LOW("omx_venc::async_message_process- msgcode = %lu",
m_sVenc_msg->msgcode);
switch (m_sVenc_msg->msgcode) {
case VEN_MSG_START:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_START_DONE);
break;
case VEN_MSG_STOP:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_STOP_DONE);
break;
case VEN_MSG_RESUME:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_RESUME_DONE);
break;
case VEN_MSG_PAUSE:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_PAUSE_DONE);
break;
case VEN_MSG_FLUSH_INPUT_DONE:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_EVENT_INPUT_FLUSH);
break;
case VEN_MSG_FLUSH_OUPUT_DONE:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_EVENT_OUTPUT_FLUSH);
break;
case VEN_MSG_INPUT_BUFFER_DONE:
omxhdr = (OMX_BUFFERHEADERTYPE* )\
m_sVenc_msg->buf.clientdata;
if (omxhdr == NULL ||
(((OMX_U32)(omxhdr - omx->m_inp_mem_ptr) > omx->m_sInPortDef.nBufferCountActual) &&
((OMX_U32)(omxhdr - omx->meta_buffer_hdr) > omx->m_sInPortDef.nBufferCountActual))) {
omxhdr = NULL;
m_sVenc_msg->statuscode = VEN_S_EFAIL;
}
#ifdef _ANDROID_ICS_
omx->omx_release_meta_buffer(omxhdr);
#endif
omx->post_event ((unsigned long)omxhdr,m_sVenc_msg->statuscode,
OMX_COMPONENT_GENERATE_EBD);
break;
case VEN_MSG_OUTPUT_BUFFER_DONE:
omxhdr = (OMX_BUFFERHEADERTYPE*)m_sVenc_msg->buf.clientdata;
if ( (omxhdr != NULL) &&
((OMX_U32)(omxhdr - omx->m_out_mem_ptr) < omx->m_sOutPortDef.nBufferCountActual)) {
if (m_sVenc_msg->buf.len <= omxhdr->nAllocLen) {
omxhdr->nFilledLen = m_sVenc_msg->buf.len;
omxhdr->nOffset = m_sVenc_msg->buf.offset;
omxhdr->nTimeStamp = m_sVenc_msg->buf.timestamp;
DEBUG_PRINT_LOW("o/p TS = %u", (unsigned int)m_sVenc_msg->buf.timestamp);
omxhdr->nFlags = m_sVenc_msg->buf.flags;
/*Use buffer case*/
if (omx->output_use_buffer && !omx->m_use_output_pmem && !omx->is_secure_session()) {
DEBUG_PRINT_LOW("memcpy() for o/p Heap UseBuffer");
memcpy(omxhdr->pBuffer,
(m_sVenc_msg->buf.ptrbuffer),
m_sVenc_msg->buf.len);
}
} else {
omxhdr->nFilledLen = 0;
}
} else {
omxhdr = NULL;
m_sVenc_msg->statuscode = VEN_S_EFAIL;
}
omx->post_event ((unsigned long)omxhdr,m_sVenc_msg->statuscode,
OMX_COMPONENT_GENERATE_FBD);
break;
case VEN_MSG_NEED_OUTPUT_BUFFER:
break;
#ifndef _MSM8974_
case VEN_MSG_LTRUSE_FAILED:
DEBUG_PRINT_ERROR("LTRUSE Failed!");
omx->post_event (NULL,m_sVenc_msg->statuscode,
OMX_COMPONENT_GENERATE_LTRUSE_FAILED);
break;
#endif
default:
DEBUG_PRINT_HIGH("Unknown msg received : %lu", m_sVenc_msg->msgcode);
break;
}
return 0;
}
| int omx_venc::async_message_process (void *context, void* message)
{
omx_video* omx = NULL;
struct venc_msg *m_sVenc_msg = NULL;
OMX_BUFFERHEADERTYPE* omxhdr = NULL;
struct venc_buffer *temp_buff = NULL;
if (context == NULL || message == NULL) {
DEBUG_PRINT_ERROR("ERROR: omx_venc::async_message_process invalid i/p params");
return -1;
}
m_sVenc_msg = (struct venc_msg *)message;
omx = reinterpret_cast<omx_video*>(context);
if (m_sVenc_msg->statuscode != VEN_S_SUCCESS) {
DEBUG_PRINT_ERROR("ERROR: async_msg_process() - Error statuscode = %lu",
m_sVenc_msg->statuscode);
if(m_sVenc_msg->msgcode == VEN_MSG_HW_OVERLOAD) {
omx->omx_report_hw_overload();
} else
omx->omx_report_error();
}
DEBUG_PRINT_LOW("omx_venc::async_message_process- msgcode = %lu",
m_sVenc_msg->msgcode);
switch (m_sVenc_msg->msgcode) {
case VEN_MSG_START:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_START_DONE);
break;
case VEN_MSG_STOP:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_STOP_DONE);
break;
case VEN_MSG_RESUME:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_RESUME_DONE);
break;
case VEN_MSG_PAUSE:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_PAUSE_DONE);
break;
case VEN_MSG_FLUSH_INPUT_DONE:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_EVENT_INPUT_FLUSH);
break;
case VEN_MSG_FLUSH_OUPUT_DONE:
omx->post_event (0,m_sVenc_msg->statuscode,\
OMX_COMPONENT_GENERATE_EVENT_OUTPUT_FLUSH);
break;
case VEN_MSG_INPUT_BUFFER_DONE:
omxhdr = (OMX_BUFFERHEADERTYPE* )\
m_sVenc_msg->buf.clientdata;
if (omxhdr == NULL ||
(((OMX_U32)(omxhdr - omx->m_inp_mem_ptr) > omx->m_sInPortDef.nBufferCountActual) &&
((OMX_U32)(omxhdr - omx->meta_buffer_hdr) > omx->m_sInPortDef.nBufferCountActual))) {
omxhdr = NULL;
m_sVenc_msg->statuscode = VEN_S_EFAIL;
}
#ifdef _ANDROID_ICS_
omx->omx_release_meta_buffer(omxhdr);
#endif
omx->post_event ((unsigned long)omxhdr,m_sVenc_msg->statuscode,
OMX_COMPONENT_GENERATE_EBD);
break;
case VEN_MSG_OUTPUT_BUFFER_DONE:
omxhdr = (OMX_BUFFERHEADERTYPE*)m_sVenc_msg->buf.clientdata;
if ( (omxhdr != NULL) &&
((OMX_U32)(omxhdr - omx->m_out_mem_ptr) < omx->m_sOutPortDef.nBufferCountActual)) {
if (m_sVenc_msg->buf.len <= omxhdr->nAllocLen) {
omxhdr->nFilledLen = m_sVenc_msg->buf.len;
omxhdr->nOffset = m_sVenc_msg->buf.offset;
omxhdr->nTimeStamp = m_sVenc_msg->buf.timestamp;
DEBUG_PRINT_LOW("o/p TS = %u", (unsigned int)m_sVenc_msg->buf.timestamp);
omxhdr->nFlags = m_sVenc_msg->buf.flags;
/*Use buffer case*/
if (omx->output_use_buffer && !omx->m_use_output_pmem) {
DEBUG_PRINT_LOW("memcpy() for o/p Heap UseBuffer");
memcpy(omxhdr->pBuffer,
(m_sVenc_msg->buf.ptrbuffer),
m_sVenc_msg->buf.len);
}
} else {
omxhdr->nFilledLen = 0;
}
} else {
omxhdr = NULL;
m_sVenc_msg->statuscode = VEN_S_EFAIL;
}
omx->post_event ((unsigned long)omxhdr,m_sVenc_msg->statuscode,
OMX_COMPONENT_GENERATE_FBD);
break;
case VEN_MSG_NEED_OUTPUT_BUFFER:
break;
#ifndef _MSM8974_
case VEN_MSG_LTRUSE_FAILED:
DEBUG_PRINT_ERROR("LTRUSE Failed!");
omx->post_event (NULL,m_sVenc_msg->statuscode,
OMX_COMPONENT_GENERATE_LTRUSE_FAILED);
break;
#endif
default:
DEBUG_PRINT_HIGH("Unknown msg received : %lu", m_sVenc_msg->msgcode);
break;
}
return 0;
}
| C | Android | 1 |
CVE-2018-13785 | https://www.cvedetails.com/cve/CVE-2018-13785/ | CWE-190 | https://github.com/glennrp/libpng/commit/8a05766cb74af05c04c53e6c9d60c13fc4d59bf2 | 8a05766cb74af05c04c53e6c9d60c13fc4d59bf2 | [libpng16] Fix the calculation of row_factor in png_check_chunk_length
(Bug report by Thuan Pham, SourceForge issue #278) | png_zlib_inflate(png_structrp png_ptr, int flush)
{
if (png_ptr->zstream_start && png_ptr->zstream.avail_in > 0)
{
if ((*png_ptr->zstream.next_in >> 4) > 7)
{
png_ptr->zstream.msg = "invalid window size (libpng)";
return Z_DATA_ERROR;
}
png_ptr->zstream_start = 0;
}
return inflate(&png_ptr->zstream, flush);
}
| png_zlib_inflate(png_structrp png_ptr, int flush)
{
if (png_ptr->zstream_start && png_ptr->zstream.avail_in > 0)
{
if ((*png_ptr->zstream.next_in >> 4) > 7)
{
png_ptr->zstream.msg = "invalid window size (libpng)";
return Z_DATA_ERROR;
}
png_ptr->zstream_start = 0;
}
return inflate(&png_ptr->zstream, flush);
}
| C | libpng | 0 |
CVE-2014-3688 | https://www.cvedetails.com/cve/CVE-2014-3688/ | CWE-399 | https://github.com/torvalds/linux/commit/26b87c7881006311828bb0ab271a551a62dcceb4 | 26b87c7881006311828bb0ab271a551a62dcceb4 | net: sctp: fix remote memory pressure from excessive queueing
This scenario is not limited to ASCONF, just taken as one
example triggering the issue. When receiving ASCONF probes
in the form of ...
-------------- INIT[ASCONF; ASCONF_ACK] ------------->
<----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------
-------------------- COOKIE-ECHO -------------------->
<-------------------- COOKIE-ACK ---------------------
---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------>
[...]
---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------>
... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed
ASCONFs and have increasing serial numbers, we process such
ASCONF chunk(s) marked with !end_of_packet and !singleton,
since we have not yet reached the SCTP packet end. SCTP does
only do verification on a chunk by chunk basis, as an SCTP
packet is nothing more than just a container of a stream of
chunks which it eats up one by one.
We could run into the case that we receive a packet with a
malformed tail, above marked as trailing JUNK. All previous
chunks are here goodformed, so the stack will eat up all
previous chunks up to this point. In case JUNK does not fit
into a chunk header and there are no more other chunks in
the input queue, or in case JUNK contains a garbage chunk
header, but the encoded chunk length would exceed the skb
tail, or we came here from an entirely different scenario
and the chunk has pdiscard=1 mark (without having had a flush
point), it will happen, that we will excessively queue up
the association's output queue (a correct final chunk may
then turn it into a response flood when flushing the
queue ;)): I ran a simple script with incremental ASCONF
serial numbers and could see the server side consuming
excessive amount of RAM [before/after: up to 2GB and more].
The issue at heart is that the chunk train basically ends
with !end_of_packet and !singleton markers and since commit
2e3216cd54b1 ("sctp: Follow security requirement of responding
with 1 packet") therefore preventing an output queue flush
point in sctp_do_sm() -> sctp_cmd_interpreter() on the input
chunk (chunk = event_arg) even though local_cork is set,
but its precedence has changed since then. In the normal
case, the last chunk with end_of_packet=1 would trigger the
queue flush to accommodate possible outgoing bundling.
In the input queue, sctp_inq_pop() seems to do the right thing
in terms of discarding invalid chunks. So, above JUNK will
not enter the state machine and instead be released and exit
the sctp_assoc_bh_rcv() chunk processing loop. It's simply
the flush point being missing at loop exit. Adding a try-flush
approach on the output queue might not work as the underlying
infrastructure might be long gone at this point due to the
side-effect interpreter run.
One possibility, albeit a bit of a kludge, would be to defer
invalid chunk freeing into the state machine in order to
possibly trigger packet discards and thus indirectly a queue
flush on error. It would surely be better to discard chunks
as in the current, perhaps better controlled environment, but
going back and forth, it's simply architecturally not possible.
I tried various trailing JUNK attack cases and it seems to
look good now.
Joint work with Vlad Yasevich.
Fixes: 2e3216cd54b1 ("sctp: Follow security requirement of responding with 1 packet")
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static sctp_disposition_t sctp_sf_violation_chunk(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The following chunk violates protocol:";
if (!asoc)
return sctp_sf_violation(net, ep, asoc, type, arg, commands);
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
| static sctp_disposition_t sctp_sf_violation_chunk(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The following chunk violates protocol:";
if (!asoc)
return sctp_sf_violation(net, ep, asoc, type, arg, commands);
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
| C | linux | 0 |
CVE-2010-1152 | https://www.cvedetails.com/cve/CVE-2010-1152/ | CWE-20 | https://github.com/memcached/memcached/commit/75cc83685e103bc8ba380a57468c8f04413033f9 | 75cc83685e103bc8ba380a57468c8f04413033f9 | Issue 102: Piping null to the server will crash it | static enum test_return test_vperror(void) {
int rv = 0;
int oldstderr = dup(STDERR_FILENO);
char tmpl[sizeof(TMP_TEMPLATE)+1];
strncpy(tmpl, TMP_TEMPLATE, sizeof(TMP_TEMPLATE)+1);
int newfile = mkstemp(tmpl);
assert(newfile > 0);
rv = dup2(newfile, STDERR_FILENO);
assert(rv == STDERR_FILENO);
rv = close(newfile);
assert(rv == 0);
errno = EIO;
vperror("Old McDonald had a farm. %s", "EI EIO");
/* Restore stderr */
rv = dup2(oldstderr, STDERR_FILENO);
assert(rv == STDERR_FILENO);
/* Go read the file */
char buf[80] = { 0 };
FILE *efile = fopen(tmpl, "r");
assert(efile);
char *prv = fgets(buf, sizeof(buf), efile);
assert(prv);
fclose(efile);
unlink(tmpl);
char expected[80] = { 0 };
snprintf(expected, sizeof(expected),
"Old McDonald had a farm. EI EIO: %s\n", strerror(EIO));
/*
fprintf(stderr,
"\nExpected: ``%s''"
"\nGot: ``%s''\n", expected, buf);
*/
return strcmp(expected, buf) == 0 ? TEST_PASS : TEST_FAIL;
}
| static enum test_return test_vperror(void) {
int rv = 0;
int oldstderr = dup(STDERR_FILENO);
char tmpl[sizeof(TMP_TEMPLATE)+1];
strncpy(tmpl, TMP_TEMPLATE, sizeof(TMP_TEMPLATE)+1);
int newfile = mkstemp(tmpl);
assert(newfile > 0);
rv = dup2(newfile, STDERR_FILENO);
assert(rv == STDERR_FILENO);
rv = close(newfile);
assert(rv == 0);
errno = EIO;
vperror("Old McDonald had a farm. %s", "EI EIO");
/* Restore stderr */
rv = dup2(oldstderr, STDERR_FILENO);
assert(rv == STDERR_FILENO);
/* Go read the file */
char buf[80] = { 0 };
FILE *efile = fopen(tmpl, "r");
assert(efile);
char *prv = fgets(buf, sizeof(buf), efile);
assert(prv);
fclose(efile);
unlink(tmpl);
char expected[80] = { 0 };
snprintf(expected, sizeof(expected),
"Old McDonald had a farm. EI EIO: %s\n", strerror(EIO));
/*
fprintf(stderr,
"\nExpected: ``%s''"
"\nGot: ``%s''\n", expected, buf);
*/
return strcmp(expected, buf) == 0 ? TEST_PASS : TEST_FAIL;
}
| C | memcached | 0 |
CVE-2016-2070 | https://www.cvedetails.com/cve/CVE-2016-2070/ | CWE-189 | https://github.com/torvalds/linux/commit/8b8a321ff72c785ed5e8b4cf6eda20b35d427390 | 8b8a321ff72c785ed5e8b4cf6eda20b35d427390 | tcp: fix zero cwnd in tcp_cwnd_reduction
Patch 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode
conditionally") introduced a bug that cwnd may become 0 when both
inflight and sndcnt are 0 (cwnd = inflight + sndcnt). This may lead
to a div-by-zero if the connection starts another cwnd reduction
phase by setting tp->prior_cwnd to the current cwnd (0) in
tcp_init_cwnd_reduction().
To prevent this we skip PRR operation when nothing is acked or
sacked. Then cwnd must be positive in all cases as long as ssthresh
is positive:
1) The proportional reduction mode
inflight > ssthresh > 0
2) The reduction bound mode
a) inflight == ssthresh > 0
b) inflight < ssthresh
sndcnt > 0 since newly_acked_sacked > 0 and inflight < ssthresh
Therefore in all cases inflight and sndcnt can not both be 0.
We check invalid tp->prior_cwnd to avoid potential div0 bugs.
In reality this bug is triggered only with a sequence of less common
events. For example, the connection is terminating an ECN-triggered
cwnd reduction with an inflight 0, then it receives reordered/old
ACKs or DSACKs from prior transmission (which acks nothing). Or the
connection is in fast recovery stage that marks everything lost,
but fails to retransmit due to local issues, then receives data
packets from other end which acks nothing.
Fixes: 3759824da87b ("tcp: PRR uses CRB mode by default and SS mode conditionally")
Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
if (before(ack, tp->tlp_high_seq))
return;
if (flag & FLAG_DSACKING_ACK) {
/* This DSACK means original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
} else if (after(ack, tp->tlp_high_seq)) {
/* ACK advances: there was a loss, so reduce cwnd. Reset
* tlp_high_seq in tcp_init_cwnd_reduction()
*/
tcp_init_cwnd_reduction(sk);
tcp_set_ca_state(sk, TCP_CA_CWR);
tcp_end_cwnd_reduction(sk);
tcp_try_keep_open(sk);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPLOSSPROBERECOVERY);
} else if (!(flag & (FLAG_SND_UNA_ADVANCED |
FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
/* Pure dupack: original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
}
}
| static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
if (before(ack, tp->tlp_high_seq))
return;
if (flag & FLAG_DSACKING_ACK) {
/* This DSACK means original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
} else if (after(ack, tp->tlp_high_seq)) {
/* ACK advances: there was a loss, so reduce cwnd. Reset
* tlp_high_seq in tcp_init_cwnd_reduction()
*/
tcp_init_cwnd_reduction(sk);
tcp_set_ca_state(sk, TCP_CA_CWR);
tcp_end_cwnd_reduction(sk);
tcp_try_keep_open(sk);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPLOSSPROBERECOVERY);
} else if (!(flag & (FLAG_SND_UNA_ADVANCED |
FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
/* Pure dupack: original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
}
}
| C | linux | 0 |
CVE-2012-2896 | https://www.cvedetails.com/cve/CVE-2012-2896/ | CWE-189 | https://github.com/chromium/chromium/commit/3aad1a37affb1ab70d1897f2b03eb8c077264984 | 3aad1a37affb1ab70d1897f2b03eb8c077264984 | Fix SafeAdd and SafeMultiply
BUG=145648,145544
Review URL: https://chromiumcodereview.appspot.com/10916165
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155478 0039d316-1c4b-4281-b951-d872f2087c98 | void GLES2DecoderImpl::DoLinkProgram(GLuint program) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoLinkProgram");
ProgramManager::ProgramInfo* info = GetProgramInfoNotShader(
program, "glLinkProgram");
if (!info) {
return;
}
ShaderTranslator* vertex_translator = NULL;
ShaderTranslator* fragment_translator = NULL;
if (use_shader_translator_) {
vertex_translator = vertex_translator_;
fragment_translator = fragment_translator_;
}
if (info->Link(shader_manager(),
vertex_translator,
fragment_translator,
feature_info_)) {
if (info == current_program_.get()) {
program_manager()->ClearUniforms(info);
}
}
};
| void GLES2DecoderImpl::DoLinkProgram(GLuint program) {
TRACE_EVENT0("gpu", "GLES2DecoderImpl::DoLinkProgram");
ProgramManager::ProgramInfo* info = GetProgramInfoNotShader(
program, "glLinkProgram");
if (!info) {
return;
}
ShaderTranslator* vertex_translator = NULL;
ShaderTranslator* fragment_translator = NULL;
if (use_shader_translator_) {
vertex_translator = vertex_translator_;
fragment_translator = fragment_translator_;
}
if (info->Link(shader_manager(),
vertex_translator,
fragment_translator,
feature_info_)) {
if (info == current_program_.get()) {
program_manager()->ClearUniforms(info);
}
}
};
| C | Chrome | 0 |
CVE-2018-20067 | https://www.cvedetails.com/cve/CVE-2018-20067/ | CWE-254 | https://github.com/chromium/chromium/commit/a7d715ae5b654d1f98669fd979a00282a7229044 | a7d715ae5b654d1f98669fd979a00282a7229044 | Prevent renderer initiated back navigation to cancel a browser one.
Renderer initiated back/forward navigations must not be able to cancel ongoing
browser initiated navigation if they are not user initiated.
Note: 'normal' renderer initiated navigation uses the
FrameHost::BeginNavigation() path. A code similar to this patch is done
in NavigatorImpl::OnBeginNavigation().
Test:
-----
Added: NavigationBrowserTest.
* HistoryBackInBeforeUnload
* HistoryBackInBeforeUnloadAfterSetTimeout
* HistoryBackCancelPendingNavigationNoUserGesture
* HistoryBackCancelPendingNavigationUserGesture
Fixed:
* (WPT) .../the-history-interface/traverse_the_history_2.html
* (WPT) .../the-history-interface/traverse_the_history_3.html
* (WPT) .../the-history-interface/traverse_the_history_4.html
* (WPT) .../the-history-interface/traverse_the_history_5.html
Bug: 879965
Change-Id: I1a9bfaaea1ffc219e6c32f6e676b660e746c578c
Reviewed-on: https://chromium-review.googlesource.com/1209744
Commit-Queue: Arthur Sonzogni <arthursonzogni@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Mustaq Ahmed <mustaq@chromium.org>
Reviewed-by: Camille Lamy <clamy@chromium.org>
Reviewed-by: Charlie Reis <creis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#592823} | void RenderViewImpl::UpdateBrowserControlsState(
BrowserControlsState constraints,
BrowserControlsState current,
bool animate) {
if (GetWebWidget()) {
GetWebWidget()->UpdateBrowserControlsState(ContentToCc(constraints),
ContentToCc(current), animate);
}
top_controls_constraints_ = constraints;
}
| void RenderViewImpl::UpdateBrowserControlsState(
BrowserControlsState constraints,
BrowserControlsState current,
bool animate) {
if (GetWebWidget()) {
GetWebWidget()->UpdateBrowserControlsState(ContentToCc(constraints),
ContentToCc(current), animate);
}
top_controls_constraints_ = constraints;
}
| C | Chrome | 0 |
CVE-2011-4930 | https://www.cvedetails.com/cve/CVE-2011-4930/ | CWE-134 | https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867 | 5e5571d1a431eb3c61977b6dd6ec90186ef79867 | null | GahpServer::Reaper(Service *,int pid,int status)
{
/* This should be much better.... for now, if our Gahp Server
goes away for any reason, we EXCEPT. */
GahpServer *dead_server = NULL;
GahpServer *next_server = NULL;
GahpServersById.startIterations();
while ( GahpServersById.iterate( next_server ) != 0 ) {
if ( pid == next_server->m_gahp_pid ) {
dead_server = next_server;
break;
}
}
std::string buf;
sprintf( buf, "Gahp Server (pid=%d) ", pid );
if( WIFSIGNALED(status) ) {
sprintf_cat( buf, "died due to %s",
daemonCore->GetExceptionString(status) );
} else {
sprintf_cat( buf, "exited with status %d", WEXITSTATUS(status) );
}
if ( dead_server ) {
sprintf_cat( buf, " unexpectedly" );
EXCEPT( "%s", buf.c_str() );
} else {
sprintf_cat( buf, "\n" );
dprintf( D_ALWAYS, "%s", buf.c_str() );
}
}
| GahpServer::Reaper(Service *,int pid,int status)
{
/* This should be much better.... for now, if our Gahp Server
goes away for any reason, we EXCEPT. */
GahpServer *dead_server = NULL;
GahpServer *next_server = NULL;
GahpServersById.startIterations();
while ( GahpServersById.iterate( next_server ) != 0 ) {
if ( pid == next_server->m_gahp_pid ) {
dead_server = next_server;
break;
}
}
std::string buf;
sprintf( buf, "Gahp Server (pid=%d) ", pid );
if( WIFSIGNALED(status) ) {
sprintf_cat( buf, "died due to %s",
daemonCore->GetExceptionString(status) );
} else {
sprintf_cat( buf, "exited with status %d", WEXITSTATUS(status) );
}
if ( dead_server ) {
sprintf_cat( buf, " unexpectedly" );
EXCEPT( buf.c_str() );
} else {
sprintf_cat( buf, "\n" );
dprintf( D_ALWAYS, buf.c_str() );
}
}
| CPP | htcondor | 1 |
CVE-2018-6096 | https://www.cvedetails.com/cve/CVE-2018-6096/ | null | https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51 | 36f801fdbec07d116a6f4f07bb363f10897d6a51 | If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <nasko@chromium.org>
Reviewed-by: Philip Jägenstedt <foolip@chromium.org>
Commit-Queue: Avi Drissman <avi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533790} | void RenderFrameImpl::OnDeleteSurroundingText(int before, int after) {
ImeEventGuard guard(GetRenderWidget());
frame_->DeleteSurroundingText(before, after);
}
| void RenderFrameImpl::OnDeleteSurroundingText(int before, int after) {
ImeEventGuard guard(GetRenderWidget());
frame_->DeleteSurroundingText(before, after);
}
| C | Chrome | 0 |
CVE-2011-3896 | https://www.cvedetails.com/cve/CVE-2011-3896/ | CWE-119 | https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d | 5925dff83699508b5e2735afb0297dfb310e159d | Implement a bubble that appears at the top of the screen when a tab enters
fullscreen mode via webkitRequestFullScreen(), telling the user how to exit
fullscreen.
This is implemented as an NSView rather than an NSWindow because the floating
chrome that appears in presentation mode should overlap the bubble.
Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac:
the mode in which the UI is hidden, accessible by moving the cursor to the top
of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode.
On Lion, however, fullscreen mode does not imply presentation mode: in
non-presentation fullscreen mode, the chrome is permanently shown. It is
possible to switch between presentation mode and fullscreen mode using the
presentation mode UI control.
When a tab initiates fullscreen mode on Lion, we enter presentation mode if not
in presentation mode already. When the user exits fullscreen mode using Chrome
UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we
return the user to the mode they were in before the tab entered fullscreen.
BUG=14471
TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen.
Need to test the Lion logic somehow, with no Lion trybots.
BUG=96883
Original review http://codereview.chromium.org/7890056/
TBR=thakis
Review URL: http://codereview.chromium.org/7920024
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98 | void Browser::FocusAppMenu() {
UserMetrics::RecordAction(UserMetricsAction("FocusAppMenu"));
window_->FocusAppMenu();
}
| void Browser::FocusAppMenu() {
UserMetrics::RecordAction(UserMetricsAction("FocusAppMenu"));
window_->FocusAppMenu();
}
| C | Chrome | 0 |
CVE-2013-3230 | https://www.cvedetails.com/cve/CVE-2013-3230/ | CWE-200 | https://github.com/torvalds/linux/commit/b860d3cc62877fad02863e2a08efff69a19382d2 | b860d3cc62877fad02863e2a08efff69a19382d2 | l2tp: fix info leak in l2tp_ip6_recvmsg()
The L2TP code for IPv6 fails to initialize the l2tp_conn_id member of
struct sockaddr_l2tpip6 and therefore leaks four bytes kernel stack
in l2tp_ip6_recvmsg() in case msg_name is set.
Initialize l2tp_conn_id with 0 to avoid the info leak.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int l2tp_ip6_recv(struct sk_buff *skb)
{
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
/* Point to L2TP header */
optr = ptr = skb->data;
if (!pskb_may_pull(skb, 4))
goto discard;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(&init_net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
0, tunnel_id);
read_unlock_bh(&l2tp_ip6_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
| static int l2tp_ip6_recv(struct sk_buff *skb)
{
struct sock *sk;
u32 session_id;
u32 tunnel_id;
unsigned char *ptr, *optr;
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
int length;
/* Point to L2TP header */
optr = ptr = skb->data;
if (!pskb_may_pull(skb, 4))
goto discard;
session_id = ntohl(*((__be32 *) ptr));
ptr += 4;
/* RFC3931: L2TP/IP packets have the first 4 bytes containing
* the session_id. If it is 0, the packet is a L2TP control
* frame and the session_id value can be discarded.
*/
if (session_id == 0) {
__skb_pull(skb, 4);
goto pass_up;
}
/* Ok, this is a data packet. Lookup the session. */
session = l2tp_session_find(&init_net, NULL, session_id);
if (session == NULL)
goto discard;
tunnel = session->tunnel;
if (tunnel == NULL)
goto discard;
/* Trace packet contents, if enabled */
if (tunnel->debug & L2TP_MSG_DATA) {
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto discard;
pr_debug("%s: ip recv\n", tunnel->name);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
return 0;
pass_up:
/* Get the tunnel_id from the L2TP header */
if (!pskb_may_pull(skb, 12))
goto discard;
if ((skb->data[0] & 0xc0) != 0xc0)
goto discard;
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
if (tunnel != NULL)
sk = tunnel->sock;
else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
0, tunnel_id);
read_unlock_bh(&l2tp_ip6_lock);
}
if (sk == NULL)
goto discard;
sock_hold(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
discard_put:
sock_put(sk);
discard:
kfree_skb(skb);
return 0;
}
| C | linux | 0 |
CVE-2011-2802 | https://www.cvedetails.com/cve/CVE-2011-2802/ | CWE-399 | https://github.com/chromium/chromium/commit/4ab22cfc619ee8ff17a8c50e289ec3b30731ceba | 4ab22cfc619ee8ff17a8c50e289ec3b30731ceba | In chromedriver, add /log url to get the contents of the chromedriver log
remotely. Also add a 'chrome.verbose' boolean startup option.
Remove usage of VLOG(1) in chromedriver. We do not need as complicated
logging as in Chrome.
BUG=85241
TEST=none
Review URL: http://codereview.chromium.org/7104085
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88591 0039d316-1c4b-4281-b951-d872f2087c98 | void Dispatcher::AddShutdown(const std::string& pattern,
base::WaitableEvent* shutdown_event) {
mg_set_uri_callback(context_, (root_ + pattern).c_str(), &Shutdown,
shutdown_event);
}
| void Dispatcher::AddShutdown(const std::string& pattern,
base::WaitableEvent* shutdown_event) {
mg_set_uri_callback(context_, (root_ + pattern).c_str(), &Shutdown,
shutdown_event);
}
| C | Chrome | 0 |
CVE-2019-11599 | https://www.cvedetails.com/cve/CVE-2019-11599/ | CWE-362 | https://github.com/torvalds/linux/commit/04f5866e41fb70690e28397487d8bd8eea7d712a | 04f5866e41fb70690e28397487d8bd8eea7d712a | coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
The core dumping code has always run without holding the mmap_sem for
writing, despite that is the only way to ensure that the entire vma
layout will not change from under it. Only using some signal
serialization on the processes belonging to the mm is not nearly enough.
This was pointed out earlier. For example in Hugh's post from Jul 2017:
https://lkml.kernel.org/r/alpine.LSU.2.11.1707191716030.2055@eggly.anvils
"Not strictly relevant here, but a related note: I was very surprised
to discover, only quite recently, how handle_mm_fault() may be called
without down_read(mmap_sem) - when core dumping. That seems a
misguided optimization to me, which would also be nice to correct"
In particular because the growsdown and growsup can move the
vm_start/vm_end the various loops the core dump does around the vma will
not be consistent if page faults can happen concurrently.
Pretty much all users calling mmget_not_zero()/get_task_mm() and then
taking the mmap_sem had the potential to introduce unexpected side
effects in the core dumping code.
Adding mmap_sem for writing around the ->core_dump invocation is a
viable long term fix, but it requires removing all copy user and page
faults and to replace them with get_dump_page() for all binary formats
which is not suitable as a short term fix.
For the time being this solution manually covers the places that can
confuse the core dump either by altering the vma layout or the vma flags
while it runs. Once ->core_dump runs under mmap_sem for writing the
function mmget_still_valid() can be dropped.
Allowing mmap_sem protected sections to run in parallel with the
coredump provides some minor parallelism advantage to the swapoff code
(which seems to be safe enough by never mangling any vma field and can
keep doing swapins in parallel to the core dumping) and to some other
corner case.
In order to facilitate the backporting I added "Fixes: 86039bd3b4e6"
however the side effect of this same race condition in /proc/pid/mem
should be reproducible since before 2.6.12-rc2 so I couldn't add any
other "Fixes:" because there's no hash beyond the git genesis commit.
Because find_extend_vma() is the only location outside of the process
context that could modify the "mm" structures under mmap_sem for
reading, by adding the mmget_still_valid() check to it, all other cases
that take the mmap_sem for reading don't need the new check after
mmget_not_zero()/get_task_mm(). The expand_stack() in page fault
context also doesn't need the new check, because all tasks under core
dumping are frozen.
Link: http://lkml.kernel.org/r/20190325224949.11068-1-aarcange@redhat.com
Fixes: 86039bd3b4e6 ("userfaultfd: add new syscall to provide memory externalization")
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Oleg Nesterov <oleg@redhat.com>
Acked-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Mike Rapoport <rppt@linux.ibm.com>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Jann Horn <jannh@google.com>
Acked-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static int smaps_pte_hole(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
mss->swap += shmem_partial_swap_usage(
walk->vma->vm_file->f_mapping, addr, end);
return 0;
}
| static int smaps_pte_hole(unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
mss->swap += shmem_partial_swap_usage(
walk->vma->vm_file->f_mapping, addr, end);
return 0;
}
| C | linux | 0 |
Subsets and Splits