CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2015-5707 | https://www.cvedetails.com/cve/CVE-2015-5707/ | CWE-189 | https://github.com/torvalds/linux/commit/fdc81f45e9f57858da6351836507fbcf1b7583ee | fdc81f45e9f57858da6351836507fbcf1b7583ee | sg_start_req(): use import_iovec()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
unsigned char *long_cmdp = NULL;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
if (hp->cmd_len > BLK_MAX_CDB) {
long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
if (!long_cmdp)
return -ENOMEM;
}
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
* preallocated requests are already in use, then using GFP_ATOMIC with
* blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
* will cause blk_get_request() to sleep until an active command
* completes, freeing up a request. Neither option is ideal, but
* GFP_KERNEL is the better choice to prevent userspace from getting an
* unexpected EWOULDBLOCK.
*
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
blk_rq_set_block_pc(rq);
if (hp->cmd_len > BLK_MAX_CDB)
rq->cmd = long_cmdp;
memcpy(rq->cmd, cmd, hp->cmd_len);
rq->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
rq->sense = srp->sense_b;
rq->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
!sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
md = &map_data;
if (md) {
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
sg_link_reserve(sfp, srp, dxfer_len);
else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res)
return res;
}
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
md->from_user = 1;
else
md->from_user = 0;
}
if (iov_count) {
struct iovec *iov = NULL;
struct iov_iter i;
res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i);
if (res < 0)
return res;
iov_iter_truncate(&i, hp->dxfer_len);
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
hp->dxfer_len, GFP_ATOMIC);
if (!res) {
srp->bio = rq->bio;
if (!md) {
req_schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
}
}
return res;
}
| sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
Sg_fd *sfp = srp->parentfp;
sg_io_hdr_t *hp = &srp->header;
int dxfer_len = (int) hp->dxfer_len;
int dxfer_dir = hp->dxfer_direction;
unsigned int iov_count = hp->iovec_count;
Sg_scatter_hold *req_schp = &srp->data;
Sg_scatter_hold *rsv_schp = &sfp->reserve;
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
unsigned char *long_cmdp = NULL;
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
"sg_start_req: dxfer_len=%d\n",
dxfer_len));
if (hp->cmd_len > BLK_MAX_CDB) {
long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
if (!long_cmdp)
return -ENOMEM;
}
/*
* NOTE
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
* preallocated requests are already in use, then using GFP_ATOMIC with
* blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
* will cause blk_get_request() to sleep until an active command
* completes, freeing up a request. Neither option is ideal, but
* GFP_KERNEL is the better choice to prevent userspace from getting an
* unexpected EWOULDBLOCK.
*
* With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
* does not sleep except under memory pressure.
*/
rq = blk_get_request(q, rw, GFP_KERNEL);
if (IS_ERR(rq)) {
kfree(long_cmdp);
return PTR_ERR(rq);
}
blk_rq_set_block_pc(rq);
if (hp->cmd_len > BLK_MAX_CDB)
rq->cmd = long_cmdp;
memcpy(rq->cmd, cmd, hp->cmd_len);
rq->cmd_len = hp->cmd_len;
srp->rq = rq;
rq->end_io_data = srp;
rq->sense = srp->sense_b;
rq->retries = SG_DEFAULT_RETRIES;
if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
return 0;
if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
!sfp->parentdp->device->host->unchecked_isa_dma &&
blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
md = NULL;
else
md = &map_data;
if (md) {
if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
sg_link_reserve(sfp, srp, dxfer_len);
else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
if (res)
return res;
}
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
md->nr_entries = req_schp->k_use_sg;
md->offset = 0;
md->null_mapped = hp->dxferp ? 0 : 1;
if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
md->from_user = 1;
else
md->from_user = 0;
}
if (unlikely(iov_count > MAX_UIOVEC))
return -EINVAL;
if (iov_count) {
int size = sizeof(struct iovec) * iov_count;
struct iovec *iov;
struct iov_iter i;
iov = memdup_user(hp->dxferp, size);
if (IS_ERR(iov))
return PTR_ERR(iov);
iov_iter_init(&i, rw, iov, iov_count,
min_t(size_t, hp->dxfer_len,
iov_length(iov, iov_count)));
res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
kfree(iov);
} else
res = blk_rq_map_user(q, rq, md, hp->dxferp,
hp->dxfer_len, GFP_ATOMIC);
if (!res) {
srp->bio = rq->bio;
if (!md) {
req_schp->dio_in_use = 1;
hp->info |= SG_INFO_DIRECT_IO;
}
}
return res;
}
| C | linux | 1 |
CVE-2017-15923 | https://www.cvedetails.com/cve/CVE-2017-15923/ | null | https://cgit.kde.org/konversation.git/commit/?h=1.7&id=6a7f59ee1b9dbc6e5cf9e5f3b306504d02b73ef0 | 6a7f59ee1b9dbc6e5cf9e5f3b306504d02b73ef0 | null | void IRCView::appendServerMessage(const QString& type, const QString& message, bool parseURL)
{
QString serverColor = Preferences::self()->color(Preferences::ServerMessage).name();
m_tabNotification = Konversation::tnfControl;
QString fixed;
if(Preferences::self()->fixedMOTD() && !m_fontDataBase.isFixedPitch(font().family()))
{
if(type == i18n("MOTD"))
fixed=" face=\"" + KGlobalSettings::fixedFont().family() + "\"";
}
QString line;
QChar::Direction dir;
QString text(filter(message, serverColor, 0 , true, parseURL, false, &dir));
bool rtl = (dir == QChar::DirR);
if(rtl)
{
line = RLE;
line += LRE;
line += "<font color=\"" + serverColor + "\"" + fixed + "><b>[</b>%2<b>]</b> %1" + PDF + " %3</font>";
}
else
{
if (!QApplication::isLeftToRight())
line += LRE;
line += "<font color=\"" + serverColor + "\"" + fixed + ">%1 <b>[</b>%2<b>]</b> %3</font>";
}
line = line.arg(timeStamp(), type, text);
emit textToLog(QString("%1\t%2").arg(type, message));
doAppend(line, rtl);
}
| void IRCView::appendServerMessage(const QString& type, const QString& message, bool parseURL)
{
QString serverColor = Preferences::self()->color(Preferences::ServerMessage).name();
m_tabNotification = Konversation::tnfControl;
QString fixed;
if(Preferences::self()->fixedMOTD() && !m_fontDataBase.isFixedPitch(font().family()))
{
if(type == i18n("MOTD"))
fixed=" face=\"" + KGlobalSettings::fixedFont().family() + "\"";
}
QString line;
QChar::Direction dir;
QString text(filter(message, serverColor, 0 , true, parseURL, false, &dir));
bool rtl = (dir == QChar::DirR);
if(rtl)
{
line = RLE;
line += LRE;
line += "<font color=\"" + serverColor + "\"" + fixed + "><b>[</b>%2<b>]</b> %1" + PDF + " %3</font>";
}
else
{
if (!QApplication::isLeftToRight())
line += LRE;
line += "<font color=\"" + serverColor + "\"" + fixed + ">%1 <b>[</b>%2<b>]</b> %3</font>";
}
line = line.arg(timeStamp(), type, text);
emit textToLog(QString("%1\t%2").arg(type, message));
doAppend(line, rtl);
}
| CPP | kde | 0 |
CVE-2011-2350 | https://www.cvedetails.com/cve/CVE-2011-2350/ | CWE-20 | https://github.com/chromium/chromium/commit/b944f670bb7a8a919daac497a4ea0536c954c201 | b944f670bb7a8a919daac497a4ea0536c954c201 | [JSC] Implement a helper method createNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=85102
Reviewed by Geoffrey Garen.
In bug 84787, kbr@ requested to avoid hard-coding
createTypeError(exec, "Not enough arguments") here and there.
This patch implements createNotEnoughArgumentsError(exec)
and uses it in JSC bindings.
c.f. a corresponding bug for V8 bindings is bug 85097.
Source/JavaScriptCore:
* runtime/Error.cpp:
(JSC::createNotEnoughArgumentsError):
(JSC):
* runtime/Error.h:
(JSC):
Source/WebCore:
Test: bindings/scripts/test/TestObj.idl
* bindings/scripts/CodeGeneratorJS.pm: Modified as described above.
(GenerateArgumentsCountCheck):
* bindings/js/JSDataViewCustom.cpp: Ditto.
(WebCore::getDataViewMember):
(WebCore::setDataViewMember):
* bindings/js/JSDeprecatedPeerConnectionCustom.cpp:
(WebCore::JSDeprecatedPeerConnectionConstructor::constructJSDeprecatedPeerConnection):
* bindings/js/JSDirectoryEntryCustom.cpp:
(WebCore::JSDirectoryEntry::getFile):
(WebCore::JSDirectoryEntry::getDirectory):
* bindings/js/JSSharedWorkerCustom.cpp:
(WebCore::JSSharedWorkerConstructor::constructJSSharedWorker):
* bindings/js/JSWebKitMutationObserverCustom.cpp:
(WebCore::JSWebKitMutationObserverConstructor::constructJSWebKitMutationObserver):
(WebCore::JSWebKitMutationObserver::observe):
* bindings/js/JSWorkerCustom.cpp:
(WebCore::JSWorkerConstructor::constructJSWorker):
* bindings/scripts/test/JS/JSFloat64Array.cpp: Updated run-bindings-tests.
(WebCore::jsFloat64ArrayPrototypeFunctionFoo):
* bindings/scripts/test/JS/JSTestActiveDOMObject.cpp:
(WebCore::jsTestActiveDOMObjectPrototypeFunctionExcitingFunction):
(WebCore::jsTestActiveDOMObjectPrototypeFunctionPostMessage):
* bindings/scripts/test/JS/JSTestCustomNamedGetter.cpp:
(WebCore::jsTestCustomNamedGetterPrototypeFunctionAnotherFunction):
* bindings/scripts/test/JS/JSTestEventTarget.cpp:
(WebCore::jsTestEventTargetPrototypeFunctionItem):
(WebCore::jsTestEventTargetPrototypeFunctionAddEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionRemoveEventListener):
(WebCore::jsTestEventTargetPrototypeFunctionDispatchEvent):
* bindings/scripts/test/JS/JSTestInterface.cpp:
(WebCore::JSTestInterfaceConstructor::constructJSTestInterface):
(WebCore::jsTestInterfacePrototypeFunctionSupplementalMethod2):
* bindings/scripts/test/JS/JSTestMediaQueryListListener.cpp:
(WebCore::jsTestMediaQueryListListenerPrototypeFunctionMethod):
* bindings/scripts/test/JS/JSTestNamedConstructor.cpp:
(WebCore::JSTestNamedConstructorNamedConstructor::constructJSTestNamedConstructor):
* bindings/scripts/test/JS/JSTestObj.cpp:
(WebCore::JSTestObjConstructor::constructJSTestObj):
(WebCore::jsTestObjPrototypeFunctionVoidMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionIntMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionObjMethodWithArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithSequenceArg):
(WebCore::jsTestObjPrototypeFunctionMethodReturningSequence):
(WebCore::jsTestObjPrototypeFunctionMethodThatRequiresAllArgsAndThrows):
(WebCore::jsTestObjPrototypeFunctionSerializedValue):
(WebCore::jsTestObjPrototypeFunctionIdbKey):
(WebCore::jsTestObjPrototypeFunctionOptionsObject):
(WebCore::jsTestObjPrototypeFunctionAddEventListener):
(WebCore::jsTestObjPrototypeFunctionRemoveEventListener):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndOptionalArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonOptionalArgAndTwoOptionalArgs):
(WebCore::jsTestObjPrototypeFunctionMethodWithCallbackArg):
(WebCore::jsTestObjPrototypeFunctionMethodWithNonCallbackArgAndCallbackArg):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod1):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod2):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod3):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod4):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod5):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod6):
(WebCore::jsTestObjPrototypeFunctionOverloadedMethod7):
(WebCore::jsTestObjConstructorFunctionClassMethod2):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod11):
(WebCore::jsTestObjConstructorFunctionOverloadedMethod12):
(WebCore::jsTestObjPrototypeFunctionMethodWithUnsignedLongArray):
(WebCore::jsTestObjPrototypeFunctionConvert1):
(WebCore::jsTestObjPrototypeFunctionConvert2):
(WebCore::jsTestObjPrototypeFunctionConvert3):
(WebCore::jsTestObjPrototypeFunctionConvert4):
(WebCore::jsTestObjPrototypeFunctionConvert5):
(WebCore::jsTestObjPrototypeFunctionStrictFunction):
* bindings/scripts/test/JS/JSTestSerializedScriptValueInterface.cpp:
(WebCore::JSTestSerializedScriptValueInterfaceConstructor::constructJSTestSerializedScriptValueInterface):
(WebCore::jsTestSerializedScriptValueInterfacePrototypeFunctionAcceptTransferList):
git-svn-id: svn://svn.chromium.org/blink/trunk@115536 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void setJSTestObjVoidSequenceAttr(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSTestObj* castedThis = jsCast<JSTestObj*>(thisObject);
TestObj* impl = static_cast<TestObj*>(castedThis->impl());
impl->setVoidSequenceAttr(toNativeArray<void>(exec, value));
}
| void setJSTestObjVoidSequenceAttr(ExecState* exec, JSObject* thisObject, JSValue value)
{
JSTestObj* castedThis = jsCast<JSTestObj*>(thisObject);
TestObj* impl = static_cast<TestObj*>(castedThis->impl());
impl->setVoidSequenceAttr(toNativeArray<void>(exec, value));
}
| C | Chrome | 0 |
CVE-2012-2890 | https://www.cvedetails.com/cve/CVE-2012-2890/ | CWE-399 | https://github.com/chromium/chromium/commit/a6f7726de20450074a01493e4e85409ce3f2595a | a6f7726de20450074a01493e4e85409ce3f2595a | Unreviewed, rolling out r147402.
http://trac.webkit.org/changeset/147402
https://bugs.webkit.org/show_bug.cgi?id=112903
Source/WebCore:
* dom/Document.cpp:
(WebCore::Document::processHttpEquiv):
* loader/DocumentLoader.cpp:
(WebCore::DocumentLoader::responseReceived):
LayoutTests:
* http/tests/security/XFrameOptions/x-frame-options-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body.html:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag.html:
* http/tests/security/XFrameOptions/x-frame-options-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny.html:
* http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny-expected.txt:
* http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny.html:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-in-body-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-deny-meta-tag-parent-same-origin-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-multiple-headers-sameorigin-deny-expected.txt:
* platform/chromium/http/tests/security/XFrameOptions/x-frame-options-parent-same-origin-deny-expected.txt:
git-svn-id: svn://svn.chromium.org/blink/trunk@147450 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void Document::webkitCancelFullScreen()
{
if (!topDocument()->webkitFullscreenElement())
return;
Vector<RefPtr<Element> > replacementFullscreenElementStack;
replacementFullscreenElementStack.append(topDocument()->webkitFullscreenElement());
topDocument()->m_fullScreenElementStack.swap(replacementFullscreenElementStack);
topDocument()->webkitExitFullscreen();
}
| void Document::webkitCancelFullScreen()
{
if (!topDocument()->webkitFullscreenElement())
return;
Vector<RefPtr<Element> > replacementFullscreenElementStack;
replacementFullscreenElementStack.append(topDocument()->webkitFullscreenElement());
topDocument()->m_fullScreenElementStack.swap(replacementFullscreenElementStack);
topDocument()->webkitExitFullscreen();
}
| C | Chrome | 0 |
CVE-2017-11147 | https://www.cvedetails.com/cve/CVE-2017-11147/ | CWE-125 | https://git.php.net/?p=php-src.git;a=commit;h=e5246580a85f031e1a3b8064edbaa55c1643a451 | e5246580a85f031e1a3b8064edbaa55c1643a451 | null | int phar_open_from_filename(char *fname, int fname_len, char *alias, int alias_len, int options, phar_archive_data** pphar, char **error TSRMLS_DC) /* {{{ */
{
php_stream *fp;
char *actual;
int ret, is_data = 0;
if (error) {
*error = NULL;
}
if (!strstr(fname, ".phar")) {
is_data = 1;
}
if (phar_open_parsed_phar(fname, fname_len, alias, alias_len, is_data, options, pphar, error TSRMLS_CC) == SUCCESS) {
return SUCCESS;
} else if (error && *error) {
return FAILURE;
}
#if PHP_API_VERSION < 20100412
if (PG(safe_mode) && (!php_checkuid(fname, NULL, CHECKUID_ALLOW_ONLY_FILE))) {
return FAILURE;
}
#endif
if (php_check_open_basedir(fname TSRMLS_CC)) {
return FAILURE;
}
fp = php_stream_open_wrapper(fname, "rb", IGNORE_URL|STREAM_MUST_SEEK, &actual);
if (!fp) {
if (options & REPORT_ERRORS) {
if (error) {
spprintf(error, 0, "unable to open phar for reading \"%s\"", fname);
}
}
if (actual) {
efree(actual);
}
return FAILURE;
}
if (actual) {
fname = actual;
fname_len = strlen(actual);
}
ret = phar_open_from_fp(fp, fname, fname_len, alias, alias_len, options, pphar, is_data, error TSRMLS_CC);
if (actual) {
efree(actual);
}
return ret;
}
/* }}}*/
| int phar_open_from_filename(char *fname, int fname_len, char *alias, int alias_len, int options, phar_archive_data** pphar, char **error TSRMLS_DC) /* {{{ */
{
php_stream *fp;
char *actual;
int ret, is_data = 0;
if (error) {
*error = NULL;
}
if (!strstr(fname, ".phar")) {
is_data = 1;
}
if (phar_open_parsed_phar(fname, fname_len, alias, alias_len, is_data, options, pphar, error TSRMLS_CC) == SUCCESS) {
return SUCCESS;
} else if (error && *error) {
return FAILURE;
}
#if PHP_API_VERSION < 20100412
if (PG(safe_mode) && (!php_checkuid(fname, NULL, CHECKUID_ALLOW_ONLY_FILE))) {
return FAILURE;
}
#endif
if (php_check_open_basedir(fname TSRMLS_CC)) {
return FAILURE;
}
fp = php_stream_open_wrapper(fname, "rb", IGNORE_URL|STREAM_MUST_SEEK, &actual);
if (!fp) {
if (options & REPORT_ERRORS) {
if (error) {
spprintf(error, 0, "unable to open phar for reading \"%s\"", fname);
}
}
if (actual) {
efree(actual);
}
return FAILURE;
}
if (actual) {
fname = actual;
fname_len = strlen(actual);
}
ret = phar_open_from_fp(fp, fname, fname_len, alias, alias_len, options, pphar, is_data, error TSRMLS_CC);
if (actual) {
efree(actual);
}
return ret;
}
/* }}}*/
| C | php | 0 |
CVE-2017-15127 | https://www.cvedetails.com/cve/CVE-2017-15127/ | null | https://github.com/torvalds/linux/commit/5af10dfd0afc559bb4b0f7e3e8227a1578333995 | 5af10dfd0afc559bb4b0f7e3e8227a1578333995 | userfaultfd: hugetlbfs: remove superfluous page unlock in VM_SHARED case
huge_add_to_page_cache->add_to_page_cache implicitly unlocks the page
before returning in case of errors.
The error returned was -EEXIST by running UFFDIO_COPY on a non-hole
offset of a VM_SHARED hugetlbfs mapping. It was an userland bug that
triggered it and the kernel must cope with it returning -EEXIST from
ioctl(UFFDIO_COPY) as expected.
page dumped because: VM_BUG_ON_PAGE(!PageLocked(page))
kernel BUG at mm/filemap.c:964!
invalid opcode: 0000 [#1] SMP
CPU: 1 PID: 22582 Comm: qemu-system-x86 Not tainted 4.11.11-300.fc26.x86_64 #1
RIP: unlock_page+0x4a/0x50
Call Trace:
hugetlb_mcopy_atomic_pte+0xc0/0x320
mcopy_atomic+0x96f/0xbe0
userfaultfd_ioctl+0x218/0xe90
do_vfs_ioctl+0xa5/0x600
SyS_ioctl+0x79/0x90
entry_SYSCALL_64_fastpath+0x1a/0xa9
Link: http://lkml.kernel.org/r/20170802165145.22628-2-aarcange@redhat.com
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Tested-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Alexey Perevalov <a.perevalov@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
struct kobject **hstate_kobjs,
struct attribute_group *hstate_attr_group)
{
int retval;
int hi = hstate_index(h);
hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
if (!hstate_kobjs[hi])
return -ENOMEM;
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
kobject_put(hstate_kobjs[hi]);
return retval;
}
| static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
struct kobject **hstate_kobjs,
struct attribute_group *hstate_attr_group)
{
int retval;
int hi = hstate_index(h);
hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
if (!hstate_kobjs[hi])
return -ENOMEM;
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
kobject_put(hstate_kobjs[hi]);
return retval;
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/df831400bcb63db4259b5858281b1727ba972a2a | df831400bcb63db4259b5858281b1727ba972a2a | WebKit2: Support window bounce when panning.
https://bugs.webkit.org/show_bug.cgi?id=58065
<rdar://problem/9244367>
Reviewed by Adam Roben.
Make gestureDidScroll synchronous, as once we scroll, we need to know
whether or not we are at the beginning or end of the scrollable document.
If we are at either end of the scrollable document, we call the Windows 7
API to bounce the window to give an indication that you are past an end
of the document.
* UIProcess/WebPageProxy.cpp:
(WebKit::WebPageProxy::gestureDidScroll): Pass a boolean for the reply, and return it.
* UIProcess/WebPageProxy.h:
* UIProcess/win/WebView.cpp:
(WebKit::WebView::WebView): Inititalize a new variable.
(WebKit::WebView::onGesture): Once we send the message to scroll, check if have gone to
an end of the document, and if we have, bounce the window.
* UIProcess/win/WebView.h:
* WebProcess/WebPage/WebPage.h:
* WebProcess/WebPage/WebPage.messages.in: GestureDidScroll is now sync.
* WebProcess/WebPage/win/WebPageWin.cpp:
(WebKit::WebPage::gestureDidScroll): When we are done scrolling, check if we have a vertical
scrollbar and if we are at the beginning or the end of the scrollable document.
git-svn-id: svn://svn.chromium.org/blink/trunk@83197 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void WebView::exitAcceleratedCompositingMode()
{
ASSERT(useNewDrawingArea());
ASSERT_NOT_REACHED();
}
| void WebView::exitAcceleratedCompositingMode()
{
ASSERT(useNewDrawingArea());
ASSERT_NOT_REACHED();
}
| C | Chrome | 0 |
CVE-2017-5023 | https://www.cvedetails.com/cve/CVE-2017-5023/ | CWE-476 | https://github.com/chromium/chromium/commit/03c2e97746a2c471ae136b0c669f8d0c033fe168 | 03c2e97746a2c471ae136b0c669f8d0c033fe168 | Convert DCHECKs to CHECKs for histogram types
When a histogram is looked up by name, there is currently a DCHECK that
verifies the type of the stored histogram matches the expected type.
A mismatch represents a significant problem because the returned
HistogramBase is cast to a Histogram in ValidateRangeChecksum,
potentially causing a crash.
This CL converts the DCHECK to a CHECK to prevent the possibility of
type confusion in release builds.
BUG=651443
R=isherman@chromium.org
Review-Url: https://codereview.chromium.org/2381893003
Cr-Commit-Position: refs/heads/master@{#421929} | HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
const std::string& name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t flags,
const DescriptionPair descriptions[]) {
bool valid_arguments = Histogram::InspectConstructionArguments(
name, &minimum, &maximum, &bucket_count);
DCHECK(valid_arguments);
return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
.Build();
}
| HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
const std::string& name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t flags,
const DescriptionPair descriptions[]) {
bool valid_arguments = Histogram::InspectConstructionArguments(
name, &minimum, &maximum, &bucket_count);
DCHECK(valid_arguments);
return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
.Build();
}
| C | Chrome | 0 |
CVE-2017-9465 | https://www.cvedetails.com/cve/CVE-2017-9465/ | CWE-125 | https://github.com/VirusTotal/yara/commit/992480c30f75943e9cd6245bb2015c7737f9b661 | 992480c30f75943e9cd6245bb2015c7737f9b661 | Fix buffer overrun (issue #678). Add assert for detecting this kind of issues earlier. | void _yr_re_fiber_append(
RE_FIBER_LIST* fiber_list,
RE_FIBER* fiber)
{
assert(fiber->prev == NULL);
assert(fiber->next == NULL);
fiber->prev = fiber_list->tail;
if (fiber_list->tail != NULL)
fiber_list->tail->next = fiber;
fiber_list->tail = fiber;
if (fiber_list->head == NULL)
fiber_list->head = fiber;
assert(fiber_list->tail->next == NULL);
assert(fiber_list->head->prev == NULL);
}
| void _yr_re_fiber_append(
RE_FIBER_LIST* fiber_list,
RE_FIBER* fiber)
{
assert(fiber->prev == NULL);
assert(fiber->next == NULL);
fiber->prev = fiber_list->tail;
if (fiber_list->tail != NULL)
fiber_list->tail->next = fiber;
fiber_list->tail = fiber;
if (fiber_list->head == NULL)
fiber_list->head = fiber;
assert(fiber_list->tail->next == NULL);
assert(fiber_list->head->prev == NULL);
}
| C | yara | 0 |
CVE-2019-15924 | https://www.cvedetails.com/cve/CVE-2019-15924/ | CWE-476 | https://github.com/torvalds/linux/commit/01ca667133d019edc9f0a1f70a272447c84ec41f | 01ca667133d019edc9f0a1f70a272447c84ec41f | fm10k: Fix a potential NULL pointer dereference
Syzkaller report this:
kasan: GPF could be caused by NULL-ptr deref or user memory access
general protection fault: 0000 [#1] SMP KASAN PTI
CPU: 0 PID: 4378 Comm: syz-executor.0 Tainted: G C 5.0.0+ #5
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
RIP: 0010:__lock_acquire+0x95b/0x3200 kernel/locking/lockdep.c:3573
Code: 00 0f 85 28 1e 00 00 48 81 c4 08 01 00 00 5b 5d 41 5c 41 5d 41 5e 41 5f c3 4c 89 ea 48 b8 00 00 00 00 00 fc ff df 48 c1 ea 03 <80> 3c 02 00 0f 85 cc 24 00 00 49 81 7d 00 e0 de 03 a6 41 bc 00 00
RSP: 0018:ffff8881e3c07a40 EFLAGS: 00010002
RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000
RDX: 0000000000000010 RSI: 0000000000000000 RDI: 0000000000000080
RBP: 0000000000000000 R08: 0000000000000001 R09: 0000000000000000
R10: ffff8881e3c07d98 R11: ffff8881c7f21f80 R12: 0000000000000001
R13: 0000000000000080 R14: 0000000000000000 R15: 0000000000000001
FS: 00007fce2252e700(0000) GS:ffff8881f2400000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007fffc7eb0228 CR3: 00000001e5bea002 CR4: 00000000007606f0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
PKRU: 55555554
Call Trace:
lock_acquire+0xff/0x2c0 kernel/locking/lockdep.c:4211
__mutex_lock_common kernel/locking/mutex.c:925 [inline]
__mutex_lock+0xdf/0x1050 kernel/locking/mutex.c:1072
drain_workqueue+0x24/0x3f0 kernel/workqueue.c:2934
destroy_workqueue+0x23/0x630 kernel/workqueue.c:4319
__do_sys_delete_module kernel/module.c:1018 [inline]
__se_sys_delete_module kernel/module.c:961 [inline]
__x64_sys_delete_module+0x30c/0x480 kernel/module.c:961
do_syscall_64+0x9f/0x450 arch/x86/entry/common.c:290
entry_SYSCALL_64_after_hwframe+0x49/0xbe
RIP: 0033:0x462e99
Code: f7 d8 64 89 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48
RSP: 002b:00007fce2252dc58 EFLAGS: 00000246 ORIG_RAX: 00000000000000b0
RAX: ffffffffffffffda RBX: 000000000073bf00 RCX: 0000000000462e99
RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000020000140
RBP: 0000000000000002 R08: 0000000000000000 R09: 0000000000000000
R10: 0000000000000000 R11: 0000000000000246 R12: 00007fce2252e6bc
R13: 00000000004bcca9 R14: 00000000006f6b48 R15: 00000000ffffffff
If alloc_workqueue fails, it should return -ENOMEM, otherwise may
trigger this NULL pointer dereference while unloading drivers.
Reported-by: Hulk Robot <hulkci@huawei.com>
Fixes: 0a38c17a21a0 ("fm10k: Remove create_workqueue")
Signed-off-by: Yue Haibing <yuehaibing@huawei.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> | static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
struct fm10k_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
struct fm10k_tx_desc *tx_desc;
union {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
u8 *raw;
} network_hdr;
u8 *transport_hdr;
__be16 frag_off;
__be16 protocol;
u8 l4_hdr = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
goto no_csum;
if (skb->encapsulation) {
protocol = fm10k_tx_encap_offload(skb);
if (!protocol) {
if (skb_checksum_help(skb)) {
dev_warn(tx_ring->dev,
"failed to offload encap csum!\n");
tx_ring->tx_stats.csum_err++;
}
goto no_csum;
}
network_hdr.raw = skb_inner_network_header(skb);
transport_hdr = skb_inner_transport_header(skb);
} else {
protocol = vlan_get_protocol(skb);
network_hdr.raw = skb_network_header(skb);
transport_hdr = skb_transport_header(skb);
}
switch (protocol) {
case htons(ETH_P_IP):
l4_hdr = network_hdr.ipv4->protocol;
break;
case htons(ETH_P_IPV6):
l4_hdr = network_hdr.ipv6->nexthdr;
if (likely((transport_hdr - network_hdr.raw) ==
sizeof(struct ipv6hdr)))
break;
ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
sizeof(struct ipv6hdr),
&l4_hdr, &frag_off);
if (unlikely(frag_off))
l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
break;
}
switch (l4_hdr) {
case IPPROTO_TCP:
case IPPROTO_UDP:
break;
case IPPROTO_GRE:
if (skb->encapsulation)
break;
/* fall through */
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum, version=%d l4 proto=%x\n",
protocol, l4_hdr);
}
skb_checksum_help(skb);
tx_ring->tx_stats.csum_err++;
goto no_csum;
}
/* update TX checksum flag */
first->tx_flags |= FM10K_TX_FLAGS_CSUM;
tx_ring->tx_stats.csum_good++;
no_csum:
/* populate Tx descriptor header size and mss */
tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
tx_desc->hdrlen = 0;
tx_desc->mss = 0;
}
| static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
struct fm10k_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
struct fm10k_tx_desc *tx_desc;
union {
struct iphdr *ipv4;
struct ipv6hdr *ipv6;
u8 *raw;
} network_hdr;
u8 *transport_hdr;
__be16 frag_off;
__be16 protocol;
u8 l4_hdr = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
goto no_csum;
if (skb->encapsulation) {
protocol = fm10k_tx_encap_offload(skb);
if (!protocol) {
if (skb_checksum_help(skb)) {
dev_warn(tx_ring->dev,
"failed to offload encap csum!\n");
tx_ring->tx_stats.csum_err++;
}
goto no_csum;
}
network_hdr.raw = skb_inner_network_header(skb);
transport_hdr = skb_inner_transport_header(skb);
} else {
protocol = vlan_get_protocol(skb);
network_hdr.raw = skb_network_header(skb);
transport_hdr = skb_transport_header(skb);
}
switch (protocol) {
case htons(ETH_P_IP):
l4_hdr = network_hdr.ipv4->protocol;
break;
case htons(ETH_P_IPV6):
l4_hdr = network_hdr.ipv6->nexthdr;
if (likely((transport_hdr - network_hdr.raw) ==
sizeof(struct ipv6hdr)))
break;
ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
sizeof(struct ipv6hdr),
&l4_hdr, &frag_off);
if (unlikely(frag_off))
l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
break;
}
switch (l4_hdr) {
case IPPROTO_TCP:
case IPPROTO_UDP:
break;
case IPPROTO_GRE:
if (skb->encapsulation)
break;
/* fall through */
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
"partial checksum, version=%d l4 proto=%x\n",
protocol, l4_hdr);
}
skb_checksum_help(skb);
tx_ring->tx_stats.csum_err++;
goto no_csum;
}
/* update TX checksum flag */
first->tx_flags |= FM10K_TX_FLAGS_CSUM;
tx_ring->tx_stats.csum_good++;
no_csum:
/* populate Tx descriptor header size and mss */
tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
tx_desc->hdrlen = 0;
tx_desc->mss = 0;
}
| C | linux | 0 |
CVE-2017-5011 | https://www.cvedetails.com/cve/CVE-2017-5011/ | CWE-200 | https://github.com/chromium/chromium/commit/eea3300239f0b53e172a320eb8de59d0bea65f27 | eea3300239f0b53e172a320eb8de59d0bea65f27 | DevTools: move front-end URL handling to DevToolsUIBindingds
BUG=662859
Review-Url: https://codereview.chromium.org/2607833002
Cr-Commit-Position: refs/heads/master@{#440926} | void DevToolsWindow::OpenDevToolsWindow(
scoped_refptr<content::DevToolsAgentHost> agent_host,
Profile* profile) {
if (!profile)
profile = Profile::FromBrowserContext(agent_host->GetBrowserContext());
if (!profile)
return;
std::string type = agent_host->GetType();
bool is_worker = type == DevToolsAgentHost::kTypeServiceWorker ||
type == DevToolsAgentHost::kTypeSharedWorker;
if (!agent_host->GetFrontendURL().empty()) {
bool is_v8_only = type == "node";
DevToolsWindow::OpenExternalFrontend(profile, agent_host->GetFrontendURL(),
agent_host, is_worker, is_v8_only);
return;
}
if (is_worker) {
DevToolsWindow::OpenDevToolsWindowForWorker(profile, agent_host);
return;
}
if (type == content::DevToolsAgentHost::kTypeFrame) {
DevToolsWindow::OpenDevToolsWindowForFrame(profile, agent_host);
return;
}
content::WebContents* web_contents = agent_host->GetWebContents();
if (web_contents)
DevToolsWindow::OpenDevToolsWindow(web_contents);
}
| void DevToolsWindow::OpenDevToolsWindow(
scoped_refptr<content::DevToolsAgentHost> agent_host,
Profile* profile) {
if (!profile)
profile = Profile::FromBrowserContext(agent_host->GetBrowserContext());
if (!profile)
return;
std::string type = agent_host->GetType();
bool is_worker = type == DevToolsAgentHost::kTypeServiceWorker ||
type == DevToolsAgentHost::kTypeSharedWorker;
if (!agent_host->GetFrontendURL().empty()) {
bool is_v8_only = type == "node";
DevToolsWindow::OpenExternalFrontend(profile, agent_host->GetFrontendURL(),
agent_host, is_worker, is_v8_only);
return;
}
if (is_worker) {
DevToolsWindow::OpenDevToolsWindowForWorker(profile, agent_host);
return;
}
if (type == content::DevToolsAgentHost::kTypeFrame) {
DevToolsWindow::OpenDevToolsWindowForFrame(profile, agent_host);
return;
}
content::WebContents* web_contents = agent_host->GetWebContents();
if (web_contents)
DevToolsWindow::OpenDevToolsWindow(web_contents);
}
| C | Chrome | 0 |
CVE-2016-5219 | https://www.cvedetails.com/cve/CVE-2016-5219/ | CWE-416 | https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae | a4150b688a754d3d10d2ca385155b1c95d77d6ae | Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568} | error::Error GLES2DecoderPassthroughImpl::DoVertexAttribIPointer(
GLuint indx,
GLint size,
GLenum type,
GLsizei stride,
const void* ptr) {
api()->glVertexAttribIPointerFn(indx, size, type, stride, ptr);
return error::kNoError;
}
| error::Error GLES2DecoderPassthroughImpl::DoVertexAttribIPointer(
GLuint indx,
GLint size,
GLenum type,
GLsizei stride,
const void* ptr) {
api()->glVertexAttribIPointerFn(indx, size, type, stride, ptr);
return error::kNoError;
}
| C | Chrome | 0 |
CVE-2018-20961 | https://www.cvedetails.com/cve/CVE-2018-20961/ | CWE-415 | https://github.com/torvalds/linux/commit/7fafcfdf6377b18b2a726ea554d6e593ba44349f | 7fafcfdf6377b18b2a726ea554d6e593ba44349f | USB: gadget: f_midi: fixing a possible double-free in f_midi
It looks like there is a possibility of a double-free vulnerability on an
error path of the f_midi_set_alt function in the f_midi driver. If the
path is feasible then free_ep_req gets called twice:
req->complete = f_midi_complete;
err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
=> ...
usb_gadget_giveback_request
=>
f_midi_complete (CALLBACK)
(inside f_midi_complete, for various cases of status)
free_ep_req(ep, req); // first kfree
if (err) {
ERROR(midi, "%s: couldn't enqueue request: %d\n",
midi->out_ep->name, err);
free_ep_req(midi->out_ep, req); // second kfree
return err;
}
The double-free possibility was introduced with commit ad0d1a058eac
("usb: gadget: f_midi: fix leak on failed to enqueue out requests").
Found by MOXCAFE tool.
Signed-off-by: Tuba Yavuz <tuba@ece.ufl.edu>
Fixes: ad0d1a058eac ("usb: gadget: f_midi: fix leak on failed to enqueue out requests")
Acked-by: Felipe Balbi <felipe.balbi@linux.intel.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | static inline struct f_midi *func_to_midi(struct usb_function *f)
{
return container_of(f, struct f_midi, func);
}
| static inline struct f_midi *func_to_midi(struct usb_function *f)
{
return container_of(f, struct f_midi, func);
}
| C | linux | 0 |
CVE-2014-0049 | https://www.cvedetails.com/cve/CVE-2014-0049/ | CWE-119 | https://github.com/torvalds/linux/commit/a08d3b3b99efd509133946056531cdf8f3a0c09b | a08d3b3b99efd509133946056531cdf8f3a0c09b | kvm: x86: fix emulator buffer overflow (CVE-2014-0049)
The problem occurs when the guest performs a pusha with the stack
address pointing to an mmio address (or an invalid guest physical
address) to start with, but then extending into an ordinary guest
physical address. When doing repeated emulated pushes
emulator_read_write sets mmio_needed to 1 on the first one. On a
later push when the stack points to regular memory,
mmio_nr_fragments is set to 0, but mmio_is_needed is not set to 0.
As a result, KVM exits to userspace, and then returns to
complete_emulated_mmio. In complete_emulated_mmio
vcpu->mmio_cur_fragment is incremented. The termination condition of
vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments is never achieved.
The code bounces back and fourth to userspace incrementing
mmio_cur_fragment past it's buffer. If the guest does nothing else it
eventually leads to a a crash on a memcpy from invalid memory address.
However if a guest code can cause the vm to be destroyed in another
vcpu with excellent timing, then kvm_clear_async_pf_completion_queue
can be used by the guest to control the data that's pointed to by the
call to cancel_work_item, which can be used to gain execution.
Fixes: f78146b0f9230765c6315b2e14f56112513389ad
Signed-off-by: Andrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org (3.5+)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
case KVM_CAP_EXT_EMUL_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI:
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_HYPERV_TIME:
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_PCI_2_3:
#endif
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_IOMMU:
r = iommu_present(&pci_bus_type);
break;
#endif
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
case KVM_CAP_XCRS:
r = cpu_has_xsave;
break;
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
case KVM_CAP_TSC_DEADLINE_TIMER:
r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
break;
default:
r = 0;
break;
}
return r;
}
| int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
case KVM_CAP_EXT_EMUL_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI:
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_HYPERV_TIME:
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_PCI_2_3:
#endif
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_IOMMU:
r = iommu_present(&pci_bus_type);
break;
#endif
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
case KVM_CAP_XCRS:
r = cpu_has_xsave;
break;
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
case KVM_CAP_TSC_DEADLINE_TIMER:
r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
break;
default:
r = 0;
break;
}
return r;
}
| C | linux | 0 |
CVE-2013-2884 | https://www.cvedetails.com/cve/CVE-2013-2884/ | CWE-399 | https://github.com/chromium/chromium/commit/4ac8bc08e3306f38a5ab3e551aef6ad43753579c | 4ac8bc08e3306f38a5ab3e551aef6ad43753579c | Set Attr.ownerDocument in Element#setAttributeNode()
Attr objects can move across documents by setAttributeNode().
So It needs to reset ownerDocument through TreeScopeAdoptr::adoptIfNeeded().
BUG=248950
TEST=set-attribute-node-from-iframe.html
Review URL: https://chromiumcodereview.appspot.com/17583003
git-svn-id: svn://svn.chromium.org/blink/trunk@152938 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void Element::focus(bool restorePreviousSelection, FocusDirection direction)
{
if (!inDocument())
return;
Document* doc = document();
if (doc->focusedNode() == this)
return;
if (doc->haveStylesheetsLoaded()) {
doc->updateLayoutIgnorePendingStylesheets();
if (!isFocusable())
return;
}
if (!supportsFocus())
return;
RefPtr<Node> protect;
if (Page* page = doc->page()) {
protect = this;
if (!page->focusController()->setFocusedNode(this, doc->frame(), direction))
return;
}
doc->updateLayoutIgnorePendingStylesheets();
if (!isFocusable()) {
ensureElementRareData()->setNeedsFocusAppearanceUpdateSoonAfterAttach(true);
return;
}
cancelFocusAppearanceUpdate();
updateFocusAppearance(restorePreviousSelection);
}
| void Element::focus(bool restorePreviousSelection, FocusDirection direction)
{
if (!inDocument())
return;
Document* doc = document();
if (doc->focusedNode() == this)
return;
if (doc->haveStylesheetsLoaded()) {
doc->updateLayoutIgnorePendingStylesheets();
if (!isFocusable())
return;
}
if (!supportsFocus())
return;
RefPtr<Node> protect;
if (Page* page = doc->page()) {
protect = this;
if (!page->focusController()->setFocusedNode(this, doc->frame(), direction))
return;
}
doc->updateLayoutIgnorePendingStylesheets();
if (!isFocusable()) {
ensureElementRareData()->setNeedsFocusAppearanceUpdateSoonAfterAttach(true);
return;
}
cancelFocusAppearanceUpdate();
updateFocusAppearance(restorePreviousSelection);
}
| C | Chrome | 0 |
CVE-2018-19489 | https://www.cvedetails.com/cve/CVE-2018-19489/ | CWE-362 | https://git.qemu.org/?p=qemu.git;a=commit;h=1d20398694a3b67a388d955b7a945ba4aa90a8a8 | 1d20398694a3b67a388d955b7a945ba4aa90a8a8 | null | static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
{
int retval = 0;
if (fidp->fs.xattr.xattrwalk_fid) {
/* getxattr/listxattr fid */
goto free_value;
}
/*
* if this is fid for setxattr. clunk should
* result in setxattr localcall
*/
if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) {
/* clunk after partial write */
retval = -EINVAL;
goto free_out;
}
if (fidp->fs.xattr.len) {
retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name,
fidp->fs.xattr.value,
fidp->fs.xattr.len,
fidp->fs.xattr.flags);
} else {
retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name);
}
free_out:
v9fs_string_free(&fidp->fs.xattr.name);
free_value:
g_free(fidp->fs.xattr.value);
return retval;
}
| static int coroutine_fn v9fs_xattr_fid_clunk(V9fsPDU *pdu, V9fsFidState *fidp)
{
int retval = 0;
if (fidp->fs.xattr.xattrwalk_fid) {
/* getxattr/listxattr fid */
goto free_value;
}
/*
* if this is fid for setxattr. clunk should
* result in setxattr localcall
*/
if (fidp->fs.xattr.len != fidp->fs.xattr.copied_len) {
/* clunk after partial write */
retval = -EINVAL;
goto free_out;
}
if (fidp->fs.xattr.len) {
retval = v9fs_co_lsetxattr(pdu, &fidp->path, &fidp->fs.xattr.name,
fidp->fs.xattr.value,
fidp->fs.xattr.len,
fidp->fs.xattr.flags);
} else {
retval = v9fs_co_lremovexattr(pdu, &fidp->path, &fidp->fs.xattr.name);
}
free_out:
v9fs_string_free(&fidp->fs.xattr.name);
free_value:
g_free(fidp->fs.xattr.value);
return retval;
}
| C | qemu | 0 |
CVE-2012-5152 | https://www.cvedetails.com/cve/CVE-2012-5152/ | CWE-119 | https://github.com/chromium/chromium/commit/802ecdb9cee0d66fe546bdf24e98150f8f716ad8 | 802ecdb9cee0d66fe546bdf24e98150f8f716ad8 | Protect AudioRendererAlgorithm from invalid step sizes.
BUG=165430
TEST=unittests and asan pass.
Review URL: https://codereview.chromium.org/11573023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@173249 0039d316-1c4b-4281-b951-d872f2087c98 | bool AudioRendererAlgorithm::CanFillBuffer() {
return audio_buffer_.forward_bytes() > 0 && !needs_more_data_;
}
| bool AudioRendererAlgorithm::CanFillBuffer() {
return audio_buffer_.forward_bytes() > 0 && !needs_more_data_;
}
| C | Chrome | 0 |
CVE-2018-16077 | https://www.cvedetails.com/cve/CVE-2018-16077/ | CWE-285 | https://github.com/chromium/chromium/commit/90f878780cce9c4b0475fcea14d91b8f510cce11 | 90f878780cce9c4b0475fcea14d91b8f510cce11 | Prevent sandboxed documents from reusing the default window
Bug: 377995
Change-Id: Iff66c6d214dfd0cb7ea9c80f83afeedfff703541
Reviewed-on: https://chromium-review.googlesource.com/983558
Commit-Queue: Andy Paicu <andypaicu@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Cr-Commit-Position: refs/heads/master@{#567663} | LocalFrame* LocalFrame::Create(LocalFrameClient* client,
Page& page,
FrameOwner* owner,
InterfaceRegistry* interface_registry) {
LocalFrame* frame = new LocalFrame(
client, page, owner,
interface_registry ? interface_registry
: InterfaceRegistry::GetEmptyInterfaceRegistry());
PageScheduler* page_scheduler = page.GetPageScheduler();
if (frame->IsMainFrame() && page_scheduler)
page_scheduler->SetIsMainFrameLocal(true);
probe::frameAttachedToParent(frame);
return frame;
}
| LocalFrame* LocalFrame::Create(LocalFrameClient* client,
Page& page,
FrameOwner* owner,
InterfaceRegistry* interface_registry) {
LocalFrame* frame = new LocalFrame(
client, page, owner,
interface_registry ? interface_registry
: InterfaceRegistry::GetEmptyInterfaceRegistry());
PageScheduler* page_scheduler = page.GetPageScheduler();
if (frame->IsMainFrame() && page_scheduler)
page_scheduler->SetIsMainFrameLocal(true);
probe::frameAttachedToParent(frame);
return frame;
}
| C | Chrome | 0 |
CVE-2016-1640 | https://www.cvedetails.com/cve/CVE-2016-1640/ | CWE-17 | https://github.com/chromium/chromium/commit/0a1c15fecb1240ab909e1431b6127410c3b380e0 | 0a1c15fecb1240ab909e1431b6127410c3b380e0 | Make the webstore inline install dialog be tab-modal
Also clean up a few minor lint errors while I'm in here.
BUG=550047
Review URL: https://codereview.chromium.org/1496033003
Cr-Commit-Position: refs/heads/master@{#363925} | void AddResourceIcon(const gfx::ImageSkia* skia_image, void* data) {
views::View* parent = static_cast<views::View*>(data);
views::ImageView* image_view = new views::ImageView();
image_view->SetImage(*skia_image);
parent->AddChildView(image_view);
}
| void AddResourceIcon(const gfx::ImageSkia* skia_image, void* data) {
views::View* parent = static_cast<views::View*>(data);
views::ImageView* image_view = new views::ImageView();
image_view->SetImage(*skia_image);
parent->AddChildView(image_view);
}
| C | Chrome | 0 |
CVE-2015-1274 | https://www.cvedetails.com/cve/CVE-2015-1274/ | CWE-254 | https://github.com/chromium/chromium/commit/d27468a832d5316884bd02f459cbf493697fd7e1 | d27468a832d5316884bd02f459cbf493697fd7e1 | Switch to equalIgnoringASCIICase throughout modules/accessibility
BUG=627682
Review-Url: https://codereview.chromium.org/2793913007
Cr-Commit-Position: refs/heads/master@{#461858} | AXLayoutObject* AXLayoutObject::getUnignoredObjectFromNode(Node& node) const {
if (isDetached())
return nullptr;
AXObject* axObject = axObjectCache().getOrCreate(&node);
if (!axObject)
return nullptr;
if (axObject->isAXLayoutObject() && !axObject->accessibilityIsIgnored())
return toAXLayoutObject(axObject);
return nullptr;
}
| AXLayoutObject* AXLayoutObject::getUnignoredObjectFromNode(Node& node) const {
if (isDetached())
return nullptr;
AXObject* axObject = axObjectCache().getOrCreate(&node);
if (!axObject)
return nullptr;
if (axObject->isAXLayoutObject() && !axObject->accessibilityIsIgnored())
return toAXLayoutObject(axObject);
return nullptr;
}
| C | Chrome | 0 |
CVE-2013-1929 | https://www.cvedetails.com/cve/CVE-2013-1929/ | CWE-119 | https://github.com/torvalds/linux/commit/715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <keescook@chromium.org>
Reported-by: Oded Horovitz <oded@privatecore.com>
Reported-by: Brad Spengler <spender@grsecurity.net>
Cc: stable@vger.kernel.org
Cc: Matt Carlson <mcarlson@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int tg3_mdio_init(struct tg3 *tp)
{
int i;
u32 reg;
struct phy_device *phydev;
if (tg3_flag(tp, 5717_PLUS)) {
u32 is_serdes;
tp->phy_addr = tp->pci_fn + 1;
if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
else
is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
} else
tp->phy_addr = TG3_PHY_MII_ADDR;
tg3_mdio_start(tp);
if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
return 0;
tp->mdio_bus = mdiobus_alloc();
if (tp->mdio_bus == NULL)
return -ENOMEM;
tp->mdio_bus->name = "tg3 mdio bus";
snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
(tp->pdev->bus->number << 8) | tp->pdev->devfn);
tp->mdio_bus->priv = tp;
tp->mdio_bus->parent = &tp->pdev->dev;
tp->mdio_bus->read = &tg3_mdio_read;
tp->mdio_bus->write = &tg3_mdio_write;
tp->mdio_bus->reset = &tg3_mdio_reset;
tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
tp->mdio_bus->irq = &tp->mdio_irq[0];
for (i = 0; i < PHY_MAX_ADDR; i++)
tp->mdio_bus->irq[i] = PHY_POLL;
/* The bus registration will look for all the PHYs on the mdio bus.
* Unfortunately, it does not ensure the PHY is powered up before
* accessing the PHY ID registers. A chip reset is the
* quickest way to bring the device back to an operational state..
*/
if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
tg3_bmcr_reset(tp);
i = mdiobus_register(tp->mdio_bus);
if (i) {
dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
dev_warn(&tp->pdev->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
}
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_BCM57780:
phydev->interface = PHY_INTERFACE_MODE_GMII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
break;
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
if (tg3_flag(tp, RGMII_INBAND_DISABLE))
phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
/* fallthru */
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
break;
case PHY_ID_RTL8201E:
case PHY_ID_BCMAC131:
phydev->interface = PHY_INTERFACE_MODE_MII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
tp->phy_flags |= TG3_PHYFLG_IS_FET;
break;
}
tg3_flag_set(tp, MDIOBUS_INITED);
if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
return 0;
}
| static int tg3_mdio_init(struct tg3 *tp)
{
int i;
u32 reg;
struct phy_device *phydev;
if (tg3_flag(tp, 5717_PLUS)) {
u32 is_serdes;
tp->phy_addr = tp->pci_fn + 1;
if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
else
is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
TG3_CPMU_PHY_STRAP_IS_SERDES;
if (is_serdes)
tp->phy_addr += 7;
} else
tp->phy_addr = TG3_PHY_MII_ADDR;
tg3_mdio_start(tp);
if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
return 0;
tp->mdio_bus = mdiobus_alloc();
if (tp->mdio_bus == NULL)
return -ENOMEM;
tp->mdio_bus->name = "tg3 mdio bus";
snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
(tp->pdev->bus->number << 8) | tp->pdev->devfn);
tp->mdio_bus->priv = tp;
tp->mdio_bus->parent = &tp->pdev->dev;
tp->mdio_bus->read = &tg3_mdio_read;
tp->mdio_bus->write = &tg3_mdio_write;
tp->mdio_bus->reset = &tg3_mdio_reset;
tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
tp->mdio_bus->irq = &tp->mdio_irq[0];
for (i = 0; i < PHY_MAX_ADDR; i++)
tp->mdio_bus->irq[i] = PHY_POLL;
/* The bus registration will look for all the PHYs on the mdio bus.
* Unfortunately, it does not ensure the PHY is powered up before
* accessing the PHY ID registers. A chip reset is the
* quickest way to bring the device back to an operational state..
*/
if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
tg3_bmcr_reset(tp);
i = mdiobus_register(tp->mdio_bus);
if (i) {
dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
dev_warn(&tp->pdev->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
}
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
case PHY_ID_BCM57780:
phydev->interface = PHY_INTERFACE_MODE_GMII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
break;
case PHY_ID_BCM50610:
case PHY_ID_BCM50610M:
phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
if (tg3_flag(tp, RGMII_INBAND_DISABLE))
phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
/* fallthru */
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
break;
case PHY_ID_RTL8201E:
case PHY_ID_BCMAC131:
phydev->interface = PHY_INTERFACE_MODE_MII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
tp->phy_flags |= TG3_PHYFLG_IS_FET;
break;
}
tg3_flag_set(tp, MDIOBUS_INITED);
if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
return 0;
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e | ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e | Clean up calls like "gfx::Rect(0, 0, size().width(), size().height()".
The caller can use the much shorter "gfx::Rect(size())", since gfx::Rect
has a constructor that just takes a Size.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/2204001
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@48283 0039d316-1c4b-4281-b951-d872f2087c98 | void AutomationProvider::GetWindowTitle(int handle, string16* text) {
gfx::NativeWindow window = window_tracker_->GetResource(handle);
const gchar* title = gtk_window_get_title(window);
text->assign(UTF8ToUTF16(title));
}
| void AutomationProvider::GetWindowTitle(int handle, string16* text) {
gfx::NativeWindow window = window_tracker_->GetResource(handle);
const gchar* title = gtk_window_get_title(window);
text->assign(UTF8ToUTF16(title));
}
| C | Chrome | 0 |
CVE-2014-5139 | https://www.cvedetails.com/cve/CVE-2014-5139/ | null | https://git.openssl.org/gitweb/?p=openssl.git;a=commit;h=83764a989dcc87fbea337da5f8f86806fe767b7e | 83764a989dcc87fbea337da5f8f86806fe767b7e | null | int SSL_export_keying_material(SSL *s, unsigned char *out, size_t olen,
const char *label, size_t llen, const unsigned char *p, size_t plen,
int use_context)
{
if (s->version < TLS1_VERSION)
return -1;
return s->method->ssl3_enc->export_keying_material(s, out, olen, label,
llen, p, plen,
use_context);
}
| int SSL_export_keying_material(SSL *s, unsigned char *out, size_t olen,
const char *label, size_t llen, const unsigned char *p, size_t plen,
int use_context)
{
if (s->version < TLS1_VERSION)
return -1;
return s->method->ssl3_enc->export_keying_material(s, out, olen, label,
llen, p, plen,
use_context);
}
| C | openssl | 0 |
CVE-2014-3191 | https://www.cvedetails.com/cve/CVE-2014-3191/ | CWE-416 | https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95 | 11a4cc4a6d6e665d9a118fada4b7c658d6f70d95 | Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea.
updateWidgetPositions() can destroy the render tree, so it should never
be called from inside RenderLayerScrollableArea. Leaving it there allows
for the potential of use-after-free bugs.
BUG=402407
R=vollick@chromium.org
Review URL: https://codereview.chromium.org/490473003
git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void FrameView::setFrameRect(const IntRect& newRect)
{
IntRect oldRect = frameRect();
if (newRect == oldRect)
return;
bool autosizerNeedsUpdating = false;
if (newRect.width() != oldRect.width() && m_frame->isMainFrame() && m_frame->settings()->textAutosizingEnabled())
autosizerNeedsUpdating = true;
ScrollView::setFrameRect(newRect);
updateScrollableAreaSet();
if (autosizerNeedsUpdating) {
if (TextAutosizer* textAutosizer = m_frame->document()->textAutosizer())
textAutosizer->updatePageInfoInAllFrames();
}
if (RenderView* renderView = this->renderView()) {
if (renderView->usesCompositing())
renderView->compositor()->frameViewDidChangeSize();
}
viewportConstrainedVisibleContentSizeChanged(newRect.width() != oldRect.width(), newRect.height() != oldRect.height());
if (oldRect.size() != newRect.size()
&& m_frame->isMainFrame()
&& m_frame->settings()->pinchVirtualViewportEnabled())
page()->frameHost().pinchViewport().mainFrameDidChangeSize();
}
| void FrameView::setFrameRect(const IntRect& newRect)
{
IntRect oldRect = frameRect();
if (newRect == oldRect)
return;
bool autosizerNeedsUpdating = false;
if (newRect.width() != oldRect.width() && m_frame->isMainFrame() && m_frame->settings()->textAutosizingEnabled())
autosizerNeedsUpdating = true;
ScrollView::setFrameRect(newRect);
updateScrollableAreaSet();
if (autosizerNeedsUpdating) {
if (TextAutosizer* textAutosizer = m_frame->document()->textAutosizer())
textAutosizer->updatePageInfoInAllFrames();
}
if (RenderView* renderView = this->renderView()) {
if (renderView->usesCompositing())
renderView->compositor()->frameViewDidChangeSize();
}
viewportConstrainedVisibleContentSizeChanged(newRect.width() != oldRect.width(), newRect.height() != oldRect.height());
if (oldRect.size() != newRect.size()
&& m_frame->isMainFrame()
&& m_frame->settings()->pinchVirtualViewportEnabled())
page()->frameHost().pinchViewport().mainFrameDidChangeSize();
}
| C | Chrome | 0 |
CVE-2011-3106 | https://www.cvedetails.com/cve/CVE-2011-3106/ | CWE-119 | https://github.com/chromium/chromium/commit/5385c44d9634d00b1cec2abf0fe7290d4205c7b0 | 5385c44d9634d00b1cec2abf0fe7290d4205c7b0 | Inherits SupportsWeakPtr<T> instead of having WeakPtrFactory<T>
This change refines r137676.
BUG=122654
TEST=browser_test
Review URL: https://chromiumcodereview.appspot.com/10332233
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139771 0039d316-1c4b-4281-b951-d872f2087c98 | bool ResourceDispatcherHostImpl::IsTransferredNavigation(
const GlobalRequestID& transferred_request_id) const {
return transferred_navigations_.find(transferred_request_id) !=
transferred_navigations_.end();
}
| bool ResourceDispatcherHostImpl::IsTransferredNavigation(
const GlobalRequestID& transferred_request_id) const {
return transferred_navigations_.find(transferred_request_id) !=
transferred_navigations_.end();
}
| C | Chrome | 0 |
CVE-2019-5827 | https://www.cvedetails.com/cve/CVE-2019-5827/ | CWE-190 | https://github.com/chromium/chromium/commit/517ac71c9ee27f856f9becde8abea7d1604af9d4 | 517ac71c9ee27f856f9becde8abea7d1604af9d4 | sqlite: backport bugfixes for dbfuzz2
Bug: 952406
Change-Id: Icbec429742048d6674828726c96d8e265c41b595
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1568152
Reviewed-by: Chris Mumford <cmumford@google.com>
Commit-Queue: Darwin Huang <huangdarwin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#651030} | static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){
sqlite3 *db = pParse->db;
if( pA && pY && pY->op==TK_NULL && !IN_RENAME_OBJECT ){
pA->op = (u8)op;
sqlite3ExprDelete(db, pA->pRight);
pA->pRight = 0;
}
}
| static void binaryToUnaryIfNull(Parse *pParse, Expr *pY, Expr *pA, int op){
sqlite3 *db = pParse->db;
if( pA && pY && pY->op==TK_NULL && !IN_RENAME_OBJECT ){
pA->op = (u8)op;
sqlite3ExprDelete(db, pA->pRight);
pA->pRight = 0;
}
}
| C | Chrome | 0 |
CVE-2016-3830 | https://www.cvedetails.com/cve/CVE-2016-3830/ | CWE-20 | https://android.googlesource.com/platform/frameworks/av/+/8e438e153f661e9df8db0ac41d587e940352df06 | 8e438e153f661e9df8db0ac41d587e940352df06 | SoftAAC2: fix crash on all-zero adts buffer
Bug: 29153599
Change-Id: I1cb81c054098b86cf24f024f8479909ca7bc85a6
| void SoftAAC2::configureDownmix() const {
char value[PROPERTY_VALUE_MAX];
if (!(property_get("media.aac_51_output_enabled", value, NULL)
&& (!strcmp(value, "1") || !strcasecmp(value, "true")))) {
ALOGI("limiting to stereo output");
aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
}
}
| void SoftAAC2::configureDownmix() const {
char value[PROPERTY_VALUE_MAX];
if (!(property_get("media.aac_51_output_enabled", value, NULL)
&& (!strcmp(value, "1") || !strcasecmp(value, "true")))) {
ALOGI("limiting to stereo output");
aacDecoder_SetParam(mAACDecoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
}
}
| C | Android | 0 |
CVE-2014-4943 | https://www.cvedetails.com/cve/CVE-2014-4943/ | CWE-264 | https://github.com/torvalds/linux/commit/3cf521f7dc87c031617fd47e4b7aa2593c2f3daf | 3cf521f7dc87c031617fd47e4b7aa2593c2f3daf | net/l2tp: don't fall back on UDP [get|set]sockopt
The l2tp [get|set]sockopt() code has fallen back to the UDP functions
for socket option levels != SOL_PPPOL2TP since day one, but that has
never actually worked, since the l2tp socket isn't an inet socket.
As David Miller points out:
"If we wanted this to work, it'd have to look up the tunnel and then
use tunnel->sk, but I wonder how useful that would be"
Since this can never have worked so nobody could possibly have depended
on that functionality, just remove the broken code and return -EINVAL.
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Acked-by: James Chapman <jchapman@katalix.com>
Acked-by: David Miller <davem@davemloft.net>
Cc: Phil Turnbull <phil.turnbull@oracle.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Willy Tarreau <w@1wt.eu>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static int pppol2tp_session_ioctl(struct l2tp_session *session,
unsigned int cmd, unsigned long arg)
{
struct ifreq ifr;
int err = 0;
struct sock *sk;
int val = (int) arg;
struct pppol2tp_session *ps = l2tp_session_priv(session);
struct l2tp_tunnel *tunnel = session->tunnel;
struct pppol2tp_ioc_stats stats;
l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
"%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
session->name, cmd, arg);
sk = ps->sock;
sock_hold(sk);
switch (cmd) {
case SIOCGIFMTU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
break;
ifr.ifr_mtu = session->mtu;
if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
session->name, session->mtu);
err = 0;
break;
case SIOCSIFMTU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
break;
session->mtu = ifr.ifr_mtu;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
session->name, session->mtu);
err = 0;
break;
case PPPIOCGMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (put_user(session->mru, (int __user *) arg))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
session->name, session->mru);
err = 0;
break;
case PPPIOCSMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (get_user(val, (int __user *) arg))
break;
session->mru = val;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
session->name, session->mru);
err = 0;
break;
case PPPIOCGFLAGS:
err = -EFAULT;
if (put_user(ps->flags, (int __user *) arg))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
session->name, ps->flags);
err = 0;
break;
case PPPIOCSFLAGS:
err = -EFAULT;
if (get_user(val, (int __user *) arg))
break;
ps->flags = val;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
session->name, ps->flags);
err = 0;
break;
case PPPIOCGL2TPSTATS:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
memset(&stats, 0, sizeof(stats));
stats.tunnel_id = tunnel->tunnel_id;
stats.session_id = session->session_id;
pppol2tp_copy_stats(&stats, &session->stats);
if (copy_to_user((void __user *) arg, &stats,
sizeof(stats)))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
session->name);
err = 0;
break;
default:
err = -ENOSYS;
break;
}
sock_put(sk);
return err;
}
| static int pppol2tp_session_ioctl(struct l2tp_session *session,
unsigned int cmd, unsigned long arg)
{
struct ifreq ifr;
int err = 0;
struct sock *sk;
int val = (int) arg;
struct pppol2tp_session *ps = l2tp_session_priv(session);
struct l2tp_tunnel *tunnel = session->tunnel;
struct pppol2tp_ioc_stats stats;
l2tp_dbg(session, PPPOL2TP_MSG_CONTROL,
"%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
session->name, cmd, arg);
sk = ps->sock;
sock_hold(sk);
switch (cmd) {
case SIOCGIFMTU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
break;
ifr.ifr_mtu = session->mtu;
if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mtu=%d\n",
session->name, session->mtu);
err = 0;
break;
case SIOCSIFMTU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
break;
session->mtu = ifr.ifr_mtu;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mtu=%d\n",
session->name, session->mtu);
err = 0;
break;
case PPPIOCGMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (put_user(session->mru, (int __user *) arg))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get mru=%d\n",
session->name, session->mru);
err = 0;
break;
case PPPIOCSMRU:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
err = -EFAULT;
if (get_user(val, (int __user *) arg))
break;
session->mru = val;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set mru=%d\n",
session->name, session->mru);
err = 0;
break;
case PPPIOCGFLAGS:
err = -EFAULT;
if (put_user(ps->flags, (int __user *) arg))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get flags=%d\n",
session->name, ps->flags);
err = 0;
break;
case PPPIOCSFLAGS:
err = -EFAULT;
if (get_user(val, (int __user *) arg))
break;
ps->flags = val;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set flags=%d\n",
session->name, ps->flags);
err = 0;
break;
case PPPIOCGL2TPSTATS:
err = -ENXIO;
if (!(sk->sk_state & PPPOX_CONNECTED))
break;
memset(&stats, 0, sizeof(stats));
stats.tunnel_id = tunnel->tunnel_id;
stats.session_id = session->session_id;
pppol2tp_copy_stats(&stats, &session->stats);
if (copy_to_user((void __user *) arg, &stats,
sizeof(stats)))
break;
l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: get L2TP stats\n",
session->name);
err = 0;
break;
default:
err = -ENOSYS;
break;
}
sock_put(sk);
return err;
}
| C | linux | 0 |
CVE-2013-0904 | https://www.cvedetails.com/cve/CVE-2013-0904/ | CWE-119 | https://github.com/chromium/chromium/commit/b2b21468c1f7f08b30a7c1755316f6026c50eb2a | b2b21468c1f7f08b30a7c1755316f6026c50eb2a | Separate repaint and layout requirements of StyleDifference (Step 1)
Previously StyleDifference was an enum that proximately bigger values
imply smaller values (e.g. StyleDifferenceLayout implies
StyleDifferenceRepaint). This causes unnecessary repaints in some cases
on layout change.
Convert StyleDifference to a structure containing relatively independent
flags.
This change doesn't directly improve the result, but can make further
repaint optimizations possible.
Step 1 doesn't change any functionality. RenderStyle still generate the
legacy StyleDifference enum when comparing styles and convert the result
to the new StyleDifference. Implicit requirements are not handled during
the conversion.
Converted call sites to use the new StyleDifference according to the
following conversion rules:
- diff == StyleDifferenceEqual (&& !context) => diff.hasNoChange()
- diff == StyleDifferenceRepaint => diff.needsRepaintObjectOnly()
- diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer()
- diff == StyleDifferenceRepaint || diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer()
- diff >= StyleDifferenceRepaint => diff.needsRepaint() || diff.needsLayout()
- diff >= StyleDifferenceRepaintLayer => diff.needsRepaintLayer() || diff.needsLayout()
- diff > StyleDifferenceRepaintLayer => diff.needsLayout()
- diff == StyleDifferencePositionedMovementLayoutOnly => diff.needsPositionedMovementLayoutOnly()
- diff == StyleDifferenceLayout => diff.needsFullLayout()
BUG=358460
TEST=All existing layout tests.
R=eseidel@chromium.org, esprehn@chromium.org, jchaffraix@chromium.org
Committed: https://src.chromium.org/viewvc/blink?view=rev&revision=171983
Review URL: https://codereview.chromium.org/236203020
git-svn-id: svn://svn.chromium.org/blink/trunk@172331 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | LayoutPoint RenderFlexibleBox::flowAwareLocationForChild(RenderBox* child) const
{
return isHorizontalFlow() ? child->location() : child->location().transposedPoint();
}
| LayoutPoint RenderFlexibleBox::flowAwareLocationForChild(RenderBox* child) const
{
return isHorizontalFlow() ? child->location() : child->location().transposedPoint();
}
| C | Chrome | 0 |
CVE-2015-4602 | https://www.cvedetails.com/cve/CVE-2015-4602/ | null | https://git.php.net/?p=php-src.git;a=commit;h=fb83c76deec58f1fab17c350f04c9f042e5977d1 | fb83c76deec58f1fab17c350f04c9f042e5977d1 | null | static zval **incomplete_class_get_property_ptr_ptr(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */
{
incomplete_class_message(object, E_NOTICE TSRMLS_CC);
return &EG(error_zval_ptr);
}
/* }}} */
| static zval **incomplete_class_get_property_ptr_ptr(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */
{
incomplete_class_message(object, E_NOTICE TSRMLS_CC);
return &EG(error_zval_ptr);
}
/* }}} */
| C | php | 0 |
CVE-2011-2521 | https://www.cvedetails.com/cve/CVE-2011-2521/ | CWE-189 | https://github.com/torvalds/linux/commit/fc66c5210ec2539e800e87d7b3a985323c7be96e | fc66c5210ec2539e800e87d7b3a985323c7be96e | perf, x86: Fix Intel fixed counters base initialization
The following patch solves the problems introduced by Robert's
commit 41bf498 and reported by Arun Sharma. This commit gets rid
of the base + index notation for reading and writing PMU msrs.
The problem is that for fixed counters, the new calculation for
the base did not take into account the fixed counter indexes,
thus all fixed counters were read/written from fixed counter 0.
Although all fixed counters share the same config MSR, they each
have their own counter register.
Without:
$ task -e unhalted_core_cycles -e instructions_retired -e baclears noploop 1 noploop for 1 seconds
242202299 unhalted_core_cycles (0.00% scaling, ena=1000790892, run=1000790892)
2389685946 instructions_retired (0.00% scaling, ena=1000790892, run=1000790892)
49473 baclears (0.00% scaling, ena=1000790892, run=1000790892)
With:
$ task -e unhalted_core_cycles -e instructions_retired -e baclears noploop 1 noploop for 1 seconds
2392703238 unhalted_core_cycles (0.00% scaling, ena=1000840809, run=1000840809)
2389793744 instructions_retired (0.00% scaling, ena=1000840809, run=1000840809)
47863 baclears (0.00% scaling, ena=1000840809, run=1000840809)
Signed-off-by: Stephane Eranian <eranian@google.com>
Cc: peterz@infradead.org
Cc: ming.m.lin@intel.com
Cc: robert.richter@amd.com
Cc: asharma@fb.com
Cc: perfmon2-devel@lists.sf.net
LKML-Reference: <20110319172005.GB4978@quad>
Signed-off-by: Ingo Molnar <mingo@elte.hu> | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->ip);
dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
}
| perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->ip);
dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
}
| C | linux | 0 |
CVE-2013-6626 | https://www.cvedetails.com/cve/CVE-2013-6626/ | null | https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3 | 90fb08ed0146c9beacfd4dde98a20fc45419fff3 | Cancel JavaScript dialogs when an interstitial appears.
BUG=295695
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/24360011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98 | void WebContentsImpl::DragSourceEndedAt(int client_x, int client_y,
int screen_x, int screen_y, WebKit::WebDragOperation operation) {
if (browser_plugin_embedder_.get())
browser_plugin_embedder_->DragSourceEndedAt(client_x, client_y,
screen_x, screen_y, operation);
if (GetRenderViewHost())
GetRenderViewHostImpl()->DragSourceEndedAt(client_x, client_y,
screen_x, screen_y, operation);
}
| void WebContentsImpl::DragSourceEndedAt(int client_x, int client_y,
int screen_x, int screen_y, WebKit::WebDragOperation operation) {
if (browser_plugin_embedder_.get())
browser_plugin_embedder_->DragSourceEndedAt(client_x, client_y,
screen_x, screen_y, operation);
if (GetRenderViewHost())
GetRenderViewHostImpl()->DragSourceEndedAt(client_x, client_y,
screen_x, screen_y, operation);
}
| C | Chrome | 0 |
CVE-2016-1666 | https://www.cvedetails.com/cve/CVE-2016-1666/ | null | https://github.com/chromium/chromium/commit/8b10115b2410b4bde18e094ad9fb8c5056134c87 | 8b10115b2410b4bde18e094ad9fb8c5056134c87 | Fix ChromeResourceDispatcherHostDelegateMirrorBrowserTest.MirrorRequestHeader with network service.
The functionality worked, as part of converting DICE, however the test code didn't work since it
depended on accessing the net objects directly. Switch the tests to use the EmbeddedTestServer, to
better match production, which removes the dependency on net/.
Also:
-make GetFilePathWithReplacements replace strings in the mock headers if they're present
-add a global to google_util to ignore ports; that way other tests can be converted without having
to modify each callsite to google_util
Bug: 881976
Change-Id: Ic52023495c1c98c1248025c11cdf37f433fef058
Reviewed-on: https://chromium-review.googlesource.com/c/1328142
Commit-Queue: John Abd-El-Malek <jam@chromium.org>
Reviewed-by: Ramin Halavati <rhalavati@chromium.org>
Reviewed-by: Maks Orlovich <morlovich@chromium.org>
Reviewed-by: Peter Kasting <pkasting@chromium.org>
Cr-Commit-Position: refs/heads/master@{#607652} | ChromeResourceDispatcherHostDelegateBrowserTest() {}
| ChromeResourceDispatcherHostDelegateBrowserTest() {}
| C | Chrome | 0 |
CVE-2018-6198 | https://www.cvedetails.com/cve/CVE-2018-6198/ | CWE-59 | https://github.com/tats/w3m/commit/18dcbadf2771cdb0c18509b14e4e73505b242753 | 18dcbadf2771cdb0c18509b14e4e73505b242753 | Make temporary directory safely when ~/.w3m is unwritable | is_wordchar(wc_uint32 c)
{
return wc_is_ucs_alnum(c);
}
| is_wordchar(wc_uint32 c)
{
return wc_is_ucs_alnum(c);
}
| C | w3m | 0 |
CVE-2017-13693 | https://www.cvedetails.com/cve/CVE-2017-13693/ | CWE-200 | https://github.com/acpica/acpica/pull/295/commits/987a3b5cf7175916e2a4b6ea5b8e70f830dfe732 | 987a3b5cf7175916e2a4b6ea5b8e70f830dfe732 | acpi: acpica: fix acpi operand cache leak in dswstate.c
I found an ACPI cache leak in ACPI early termination and boot continuing case.
When early termination occurs due to malicious ACPI table, Linux kernel
terminates ACPI function and continues to boot process. While kernel terminates
ACPI function, kmem_cache_destroy() reports Acpi-Operand cache leak.
Boot log of ACPI operand cache leak is as follows:
>[ 0.585957] ACPI: Added _OSI(Module Device)
>[ 0.587218] ACPI: Added _OSI(Processor Device)
>[ 0.588530] ACPI: Added _OSI(3.0 _SCP Extensions)
>[ 0.589790] ACPI: Added _OSI(Processor Aggregator Device)
>[ 0.591534] ACPI Error: Illegal I/O port address/length above 64K: C806E00000004002/0x2 (20170303/hwvalid-155)
>[ 0.594351] ACPI Exception: AE_LIMIT, Unable to initialize fixed events (20170303/evevent-88)
>[ 0.597858] ACPI: Unable to start the ACPI Interpreter
>[ 0.599162] ACPI Error: Could not remove SCI handler (20170303/evmisc-281)
>[ 0.601836] kmem_cache_destroy Acpi-Operand: Slab cache still has objects
>[ 0.603556] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.12.0-rc5 #26
>[ 0.605159] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
>[ 0.609177] Call Trace:
>[ 0.610063] ? dump_stack+0x5c/0x81
>[ 0.611118] ? kmem_cache_destroy+0x1aa/0x1c0
>[ 0.612632] ? acpi_sleep_proc_init+0x27/0x27
>[ 0.613906] ? acpi_os_delete_cache+0xa/0x10
>[ 0.617986] ? acpi_ut_delete_caches+0x3f/0x7b
>[ 0.619293] ? acpi_terminate+0xa/0x14
>[ 0.620394] ? acpi_init+0x2af/0x34f
>[ 0.621616] ? __class_create+0x4c/0x80
>[ 0.623412] ? video_setup+0x7f/0x7f
>[ 0.624585] ? acpi_sleep_proc_init+0x27/0x27
>[ 0.625861] ? do_one_initcall+0x4e/0x1a0
>[ 0.627513] ? kernel_init_freeable+0x19e/0x21f
>[ 0.628972] ? rest_init+0x80/0x80
>[ 0.630043] ? kernel_init+0xa/0x100
>[ 0.631084] ? ret_from_fork+0x25/0x30
>[ 0.633343] vgaarb: loaded
>[ 0.635036] EDAC MC: Ver: 3.0.0
>[ 0.638601] PCI: Probing PCI hardware
>[ 0.639833] PCI host bridge to bus 0000:00
>[ 0.641031] pci_bus 0000:00: root bus resource [io 0x0000-0xffff]
> ... Continue to boot and log is omitted ...
I analyzed this memory leak in detail and found acpi_ds_obj_stack_pop_and_
delete() function miscalculated the top of the stack. acpi_ds_obj_stack_push()
function uses walk_state->operand_index for start position of the top, but
acpi_ds_obj_stack_pop_and_delete() function considers index 0 for it.
Therefore, this causes acpi operand memory leak.
This cache leak causes a security threat because an old kernel (<= 4.9) shows
memory locations of kernel functions in stack dump. Some malicious users
could use this information to neutralize kernel ASLR.
I made a patch to fix ACPI operand cache leak.
Signed-off-by: Seunghun Han <kkamagui@gmail.com> | AcpiDsClearImplicitReturn (
ACPI_WALK_STATE *WalkState)
{
ACPI_FUNCTION_NAME (DsClearImplicitReturn);
/*
* Slack must be enabled for this feature
*/
if (!AcpiGbl_EnableInterpreterSlack)
{
return;
}
if (WalkState->ImplicitReturnObj)
{
/*
* Delete any "stale" implicit return. However, in
* complex statements, the implicit return value can be
* bubbled up several levels.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH,
"Removing reference on stale implicit return obj %p\n",
WalkState->ImplicitReturnObj));
AcpiUtRemoveReference (WalkState->ImplicitReturnObj);
WalkState->ImplicitReturnObj = NULL;
}
}
| AcpiDsClearImplicitReturn (
ACPI_WALK_STATE *WalkState)
{
ACPI_FUNCTION_NAME (DsClearImplicitReturn);
/*
* Slack must be enabled for this feature
*/
if (!AcpiGbl_EnableInterpreterSlack)
{
return;
}
if (WalkState->ImplicitReturnObj)
{
/*
* Delete any "stale" implicit return. However, in
* complex statements, the implicit return value can be
* bubbled up several levels.
*/
ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH,
"Removing reference on stale implicit return obj %p\n",
WalkState->ImplicitReturnObj));
AcpiUtRemoveReference (WalkState->ImplicitReturnObj);
WalkState->ImplicitReturnObj = NULL;
}
}
| C | acpica | 0 |
CVE-2019-3817 | https://www.cvedetails.com/cve/CVE-2019-3817/ | CWE-416 | https://github.com/rpm-software-management/libcomps/commit/e3a5d056633677959ad924a51758876d415e7046 | e3a5d056633677959ad924a51758876d415e7046 | Fix UAF in comps_objmrtree_unite function
The added field is not used at all in many places and it is probably the
left-over of some copy-paste. | void comps_rtree_set(COMPS_RTree * rt, char * key, void * data)
{
__comps_rtree_set(rt, key, strlen(key), data);
}
| void comps_rtree_set(COMPS_RTree * rt, char * key, void * data)
{
__comps_rtree_set(rt, key, strlen(key), data);
}
| C | libcomps | 0 |
CVE-2011-2803 | https://www.cvedetails.com/cve/CVE-2011-2803/ | CWE-119 | https://github.com/chromium/chromium/commit/48f2ec5c24570c9b96bb2798a9ffe956117c5066 | 48f2ec5c24570c9b96bb2798a9ffe956117c5066 | Add OVERRIDE to ui::TreeModelObserver overridden methods.
BUG=None
TEST=None
R=sky@chromium.org
Review URL: http://codereview.chromium.org/7046093
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88827 0039d316-1c4b-4281-b951-d872f2087c98 | void TreeView::RemoveObserverFromModel() {
if (model_ && observer_added_) {
model_->RemoveObserver(this);
observer_added_ = false;
}
}
| void TreeView::RemoveObserverFromModel() {
if (model_ && observer_added_) {
model_->RemoveObserver(this);
observer_added_ = false;
}
}
| C | Chrome | 0 |
CVE-2015-5366 | https://www.cvedetails.com/cve/CVE-2015-5366/ | CWE-399 | https://github.com/torvalds/linux/commit/beb39db59d14990e401e235faf66a6b9b31240b0 | beb39db59d14990e401e235faf66a6b9b31240b0 | udp: fix behavior of wrong checksums
We have two problems in UDP stack related to bogus checksums :
1) We return -EAGAIN to application even if receive queue is not empty.
This breaks applications using edge trigger epoll()
2) Under UDP flood, we can loop forever without yielding to other
processes, potentially hanging the host, especially on non SMP.
This patch is an attempt to make things better.
We might in the future add extra support for rt applications
wanting to better control time spent doing a recv() in a hostile
environment. For example we could validate checksums before queuing
packets in socket receive queue.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
struct flowi4 fl4_stack;
struct flowi4 *fl4;
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
int connected = 0;
__be32 daddr, faddr, saddr;
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
if (len > 0xFFFF)
return -EMSGSIZE;
/*
* Check the flags.
*/
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
fl4 = &inet->cork.fl.u.ip4;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET)) {
release_sock(sk);
return -EINVAL;
}
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
/*
* Get and verify the address.
*/
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc,
sk->sk_family == AF_INET6);
if (err)
return err;
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
saddr = ipc.addr;
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr)
return -EINVAL;
faddr = ipc.opt->opt.faddr;
connected = 0;
}
tos = get_rttos(&ipc, inet);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
}
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
if (!rt) {
struct net *net = sock_net(sk);
fl4 = &fl4_stack;
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk),
faddr, saddr, dport, inet->inet_sport);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
err = -EACCES;
if ((rt->rt_flags & RTCF_BROADCAST) &&
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
saddr = fl4->saddr;
if (!ipc.addr)
daddr = ipc.addr = fl4->daddr;
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_send_skb(skb, fl4);
goto out;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("cork app bug 2\n");
err = -EINVAL;
goto out;
}
/*
* Now cork the socket to pend data.
*/
fl4 = &inet->cork.fl.u.ip4;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
up->pending = AF_INET;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
release_sock(sk);
out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
| int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
struct flowi4 fl4_stack;
struct flowi4 *fl4;
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
int connected = 0;
__be32 daddr, faddr, saddr;
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
if (len > 0xFFFF)
return -EMSGSIZE;
/*
* Check the flags.
*/
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
fl4 = &inet->cork.fl.u.ip4;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET)) {
release_sock(sk);
return -EINVAL;
}
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
/*
* Get and verify the address.
*/
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc,
sk->sk_family == AF_INET6);
if (err)
return err;
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
saddr = ipc.addr;
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr)
return -EINVAL;
faddr = ipc.opt->opt.faddr;
connected = 0;
}
tos = get_rttos(&ipc, inet);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
}
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
if (!rt) {
struct net *net = sock_net(sk);
fl4 = &fl4_stack;
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk),
faddr, saddr, dport, inet->inet_sport);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
err = -EACCES;
if ((rt->rt_flags & RTCF_BROADCAST) &&
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
saddr = fl4->saddr;
if (!ipc.addr)
daddr = ipc.addr = fl4->daddr;
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_send_skb(skb, fl4);
goto out;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("cork app bug 2\n");
err = -EINVAL;
goto out;
}
/*
* Now cork the socket to pend data.
*/
fl4 = &inet->cork.fl.u.ip4;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
up->pending = AF_INET;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
release_sock(sk);
out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(&rt->dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
| C | linux | 0 |
CVE-2011-4930 | https://www.cvedetails.com/cve/CVE-2011-4930/ | CWE-134 | https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867 | 5e5571d1a431eb3c61977b6dd6ec90186ef79867 | null | GahpServer::Startup()
{
char *gahp_path = NULL;
ArgList gahp_args;
int stdin_pipefds[2];
int stdout_pipefds[2];
int stderr_pipefds[2];
int low_port;
int high_port;
Env newenv;
char *tmp_char;
if ( m_gahp_startup_failed ) {
return false;
} else if ( m_gahp_pid != -1 ) {
return true;
}
if ( binary_path && strcmp( binary_path, GAHPCLIENT_DEFAULT_SERVER_PATH ) != 0 ) {
gahp_path = strdup(binary_path);
gahp_args.AppendArgsFromArgList(binary_args);
} else {
gahp_path = param("GAHP");
char *args = param("GAHP_ARGS");
MyString args_error;
if(!gahp_args.AppendArgsV1RawOrV2Quoted(args,&args_error)) {
EXCEPT("Failed to parse arguments: %s",args_error.Value());
}
free(args);
}
if (!gahp_path) return false;
gahp_args.InsertArg( gahp_path, 0);
newenv.SetEnv( "GAHP_TEMP", GridmanagerScratchDir );
if ( get_port_range( FALSE, &low_port, &high_port ) == TRUE ) {
std::string buff;
sprintf( buff, "%d,%d", low_port, high_port );
newenv.SetEnv( "GLOBUS_TCP_PORT_RANGE", buff.c_str() );
}
if ( get_port_range( TRUE, &low_port, &high_port ) == TRUE ) {
std::string buff;
sprintf( buff, "%d,%d", low_port, high_port );
newenv.SetEnv( "GLOBUS_TCP_SOURCE_RANGE", buff.c_str() );
}
tmp_char = param("GLITE_LOCATION");
if ( tmp_char ) {
newenv.SetEnv( "GLITE_LOCATION", tmp_char );
free( tmp_char );
}
tmp_char = param("SOAP_SSL_CA_FILE");
if( tmp_char ) {
newenv.SetEnv( "SOAP_SSL_CA_FILE", tmp_char );
free( tmp_char );
}
tmp_char = param("SOAP_SSL_CA_DIR");
if( tmp_char ) {
newenv.SetEnv( "SOAP_SSL_CA_DIR", tmp_char );
free( tmp_char );
}
if ( param_boolean( "SOAP_SSL_SKIP_HOST_CHECK", false ) ) {
newenv.SetEnv( "SOAP_SSL_SKIP_HOST_CHECK", "True" );
}
if ( m_reaperid == -1 ) {
m_reaperid = daemonCore->Register_Reaper(
"GAHP Server",
(ReaperHandler)&GahpServer::Reaper, // handler
"GahpServer::Reaper",
NULL
);
}
bool is_c_gahp = false;
if (strncmp(my_id, "CONDOR/", 7) == 0) {
is_c_gahp = true;
}
if ( (daemonCore->Create_Pipe(stdin_pipefds, is_c_gahp) == FALSE) ||
(daemonCore->Create_Pipe(stdout_pipefds, true, false, true) == FALSE) ||
(daemonCore->Create_Pipe(stderr_pipefds, true, false, true) == FALSE))
{
dprintf(D_ALWAYS,"GahpServer::Startup - pipe() failed, errno=%d\n",
errno);
free( gahp_path );
return false;
}
int io_redirect[3];
io_redirect[0] = stdin_pipefds[0]; // stdin gets read side of in pipe
io_redirect[1] = stdout_pipefds[1]; // stdout get write side of out pipe
io_redirect[2] = stderr_pipefds[1]; // stderr get write side of err pipe
m_gahp_pid = daemonCore->Create_Process(
gahp_path, // Name of executable
gahp_args, // Args
PRIV_USER_FINAL,// Priv State ---- drop root if we have it
m_reaperid, // id for our registered reaper
FALSE, // do not want a command port
&newenv, // env
NULL, // cwd
NULL, // process family info
NULL, // network sockets to inherit
io_redirect // redirect stdin/out/err
);
if ( m_gahp_pid == FALSE ) {
dprintf(D_ALWAYS,"Failed to start GAHP server (%s)\n",
gahp_path);
free( gahp_path );
m_gahp_pid = -1;
return false;
} else {
dprintf(D_ALWAYS,"GAHP server pid = %d\n",m_gahp_pid);
}
free( gahp_path );
daemonCore->Close_Pipe( io_redirect[0] );
daemonCore->Close_Pipe( io_redirect[1] );
daemonCore->Close_Pipe( io_redirect[2] );
m_gahp_errorfd = stderr_pipefds[0];
m_gahp_readfd = stdout_pipefds[0];
m_gahp_writefd = stdin_pipefds[1];
if ( command_version() == false ) {
dprintf(D_ALWAYS,"Failed to read GAHP server version\n");
m_gahp_startup_failed = true;
return false;
} else {
dprintf(D_FULLDEBUG,"GAHP server version: %s\n",m_gahp_version);
}
if ( command_commands() == false ) {
m_gahp_startup_failed = true;
return false;
}
use_prefix = command_response_prefix( GAHP_PREFIX );
if ( !command_async_mode_on() ) {
setPollInterval(m_pollInterval);
} else {
int result = daemonCore->Register_Pipe(m_gahp_readfd,
"m_gahp_readfd",static_cast<PipeHandlercpp>(&GahpServer::pipe_ready),
"&GahpServer::pipe_ready",this);
if ( result == -1 ) {
setPollInterval(m_pollInterval);
} else {
setPollInterval(m_pollInterval * 12);
}
result = daemonCore->Register_Pipe(m_gahp_errorfd,
"m_gahp_errorfd",static_cast<PipeHandlercpp>(&GahpServer::err_pipe_ready),
"&GahpServer::err_pipe_ready",this);
}
if ( m_commands_supported->contains_anycase("CACHE_PROXY_FROM_FILE")==TRUE &&
m_commands_supported->contains_anycase("UNCACHE_PROXY")==TRUE &&
m_commands_supported->contains_anycase("USE_CACHED_PROXY")==TRUE ) {
can_cache_proxies = true;
}
return true;
}
| GahpServer::Startup()
{
char *gahp_path = NULL;
ArgList gahp_args;
int stdin_pipefds[2];
int stdout_pipefds[2];
int stderr_pipefds[2];
int low_port;
int high_port;
Env newenv;
char *tmp_char;
if ( m_gahp_startup_failed ) {
return false;
} else if ( m_gahp_pid != -1 ) {
return true;
}
if ( binary_path && strcmp( binary_path, GAHPCLIENT_DEFAULT_SERVER_PATH ) != 0 ) {
gahp_path = strdup(binary_path);
gahp_args.AppendArgsFromArgList(binary_args);
} else {
gahp_path = param("GAHP");
char *args = param("GAHP_ARGS");
MyString args_error;
if(!gahp_args.AppendArgsV1RawOrV2Quoted(args,&args_error)) {
EXCEPT("Failed to parse arguments: %s",args_error.Value());
}
free(args);
}
if (!gahp_path) return false;
gahp_args.InsertArg( gahp_path, 0);
newenv.SetEnv( "GAHP_TEMP", GridmanagerScratchDir );
if ( get_port_range( FALSE, &low_port, &high_port ) == TRUE ) {
std::string buff;
sprintf( buff, "%d,%d", low_port, high_port );
newenv.SetEnv( "GLOBUS_TCP_PORT_RANGE", buff.c_str() );
}
if ( get_port_range( TRUE, &low_port, &high_port ) == TRUE ) {
std::string buff;
sprintf( buff, "%d,%d", low_port, high_port );
newenv.SetEnv( "GLOBUS_TCP_SOURCE_RANGE", buff.c_str() );
}
tmp_char = param("GLITE_LOCATION");
if ( tmp_char ) {
newenv.SetEnv( "GLITE_LOCATION", tmp_char );
free( tmp_char );
}
tmp_char = param("SOAP_SSL_CA_FILE");
if( tmp_char ) {
newenv.SetEnv( "SOAP_SSL_CA_FILE", tmp_char );
free( tmp_char );
}
tmp_char = param("SOAP_SSL_CA_DIR");
if( tmp_char ) {
newenv.SetEnv( "SOAP_SSL_CA_DIR", tmp_char );
free( tmp_char );
}
if ( param_boolean( "SOAP_SSL_SKIP_HOST_CHECK", false ) ) {
newenv.SetEnv( "SOAP_SSL_SKIP_HOST_CHECK", "True" );
}
if ( m_reaperid == -1 ) {
m_reaperid = daemonCore->Register_Reaper(
"GAHP Server",
(ReaperHandler)&GahpServer::Reaper, // handler
"GahpServer::Reaper",
NULL
);
}
bool is_c_gahp = false;
if (strncmp(my_id, "CONDOR/", 7) == 0) {
is_c_gahp = true;
}
if ( (daemonCore->Create_Pipe(stdin_pipefds, is_c_gahp) == FALSE) ||
(daemonCore->Create_Pipe(stdout_pipefds, true, false, true) == FALSE) ||
(daemonCore->Create_Pipe(stderr_pipefds, true, false, true) == FALSE))
{
dprintf(D_ALWAYS,"GahpServer::Startup - pipe() failed, errno=%d\n",
errno);
free( gahp_path );
return false;
}
int io_redirect[3];
io_redirect[0] = stdin_pipefds[0]; // stdin gets read side of in pipe
io_redirect[1] = stdout_pipefds[1]; // stdout get write side of out pipe
io_redirect[2] = stderr_pipefds[1]; // stderr get write side of err pipe
m_gahp_pid = daemonCore->Create_Process(
gahp_path, // Name of executable
gahp_args, // Args
PRIV_USER_FINAL,// Priv State ---- drop root if we have it
m_reaperid, // id for our registered reaper
FALSE, // do not want a command port
&newenv, // env
NULL, // cwd
NULL, // process family info
NULL, // network sockets to inherit
io_redirect // redirect stdin/out/err
);
if ( m_gahp_pid == FALSE ) {
dprintf(D_ALWAYS,"Failed to start GAHP server (%s)\n",
gahp_path);
free( gahp_path );
m_gahp_pid = -1;
return false;
} else {
dprintf(D_ALWAYS,"GAHP server pid = %d\n",m_gahp_pid);
}
free( gahp_path );
daemonCore->Close_Pipe( io_redirect[0] );
daemonCore->Close_Pipe( io_redirect[1] );
daemonCore->Close_Pipe( io_redirect[2] );
m_gahp_errorfd = stderr_pipefds[0];
m_gahp_readfd = stdout_pipefds[0];
m_gahp_writefd = stdin_pipefds[1];
if ( command_version() == false ) {
dprintf(D_ALWAYS,"Failed to read GAHP server version\n");
m_gahp_startup_failed = true;
return false;
} else {
dprintf(D_FULLDEBUG,"GAHP server version: %s\n",m_gahp_version);
}
if ( command_commands() == false ) {
m_gahp_startup_failed = true;
return false;
}
use_prefix = command_response_prefix( GAHP_PREFIX );
if ( !command_async_mode_on() ) {
setPollInterval(m_pollInterval);
} else {
int result = daemonCore->Register_Pipe(m_gahp_readfd,
"m_gahp_readfd",static_cast<PipeHandlercpp>(&GahpServer::pipe_ready),
"&GahpServer::pipe_ready",this);
if ( result == -1 ) {
setPollInterval(m_pollInterval);
} else {
setPollInterval(m_pollInterval * 12);
}
result = daemonCore->Register_Pipe(m_gahp_errorfd,
"m_gahp_errorfd",static_cast<PipeHandlercpp>(&GahpServer::err_pipe_ready),
"&GahpServer::err_pipe_ready",this);
}
if ( m_commands_supported->contains_anycase("CACHE_PROXY_FROM_FILE")==TRUE &&
m_commands_supported->contains_anycase("UNCACHE_PROXY")==TRUE &&
m_commands_supported->contains_anycase("USE_CACHED_PROXY")==TRUE ) {
can_cache_proxies = true;
}
return true;
}
| CPP | htcondor | 0 |
CVE-2018-6035 | https://www.cvedetails.com/cve/CVE-2018-6035/ | CWE-200 | https://github.com/chromium/chromium/commit/2649de11c562aa96d336c06136a1a20c01711be0 | 2649de11c562aa96d336c06136a1a20c01711be0 | Hide DevTools frontend from webRequest API
Prevent extensions from observing requests for remote DevTools frontends
and add regression tests.
And update ExtensionTestApi to support initializing the embedded test
server and port from SetUpCommandLine (before SetUpOnMainThread).
BUG=797497,797500
TEST=browser_test --gtest_filter=DevToolsFrontendInWebRequestApiTest.HiddenRequests
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_mojo
Change-Id: Ic8f44b5771f2d5796f8c3de128f0a7ab88a77735
Reviewed-on: https://chromium-review.googlesource.com/844316
Commit-Queue: Rob Wu <rob@robwu.nl>
Reviewed-by: Devlin <rdevlin.cronin@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Cr-Commit-Position: refs/heads/master@{#528187} | bool ExtensionApiTest::RunExtensionTestIncognitoNoFileAccess(
const std::string& extension_name) {
return RunExtensionTestImpl(
extension_name, std::string(), NULL, kFlagEnableIncognito);
}
| bool ExtensionApiTest::RunExtensionTestIncognitoNoFileAccess(
const std::string& extension_name) {
return RunExtensionTestImpl(
extension_name, std::string(), NULL, kFlagEnableIncognito);
}
| C | Chrome | 0 |
CVE-2018-18344 | https://www.cvedetails.com/cve/CVE-2018-18344/ | CWE-20 | https://github.com/chromium/chromium/commit/c71d8045ce0592cf3f4290744ab57b23c1d1b4c6 | c71d8045ce0592cf3f4290744ab57b23c1d1b4c6 | [DevTools] Do not allow Page.setDownloadBehavior for extensions
Bug: 866426
Change-Id: I71b672978e1a8ec779ede49da16b21198567d3a4
Reviewed-on: https://chromium-review.googlesource.com/c/1270007
Commit-Queue: Dmitry Gozman <dgozman@chromium.org>
Reviewed-by: Devlin <rdevlin.cronin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#598004} | void PageHandler::NotifyScreencastVisibility(bool visible) {
if (visible)
capture_retry_count_ = kCaptureRetryLimit;
frontend_->ScreencastVisibilityChanged(visible);
}
| void PageHandler::NotifyScreencastVisibility(bool visible) {
if (visible)
capture_retry_count_ = kCaptureRetryLimit;
frontend_->ScreencastVisibilityChanged(visible);
}
| C | Chrome | 0 |
CVE-2011-2918 | https://www.cvedetails.com/cve/CVE-2011-2918/ | CWE-399 | https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu> | static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
info.si_trapno = 0;
send_sig_info(SIGBUS, &info, current);
}
| static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
info.si_trapno = 0;
send_sig_info(SIGBUS, &info, current);
}
| C | linux | 0 |
CVE-2012-1601 | https://www.cvedetails.com/cve/CVE-2012-1601/ | CWE-399 | https://github.com/torvalds/linux/commit/9c895160d25a76c21b65bad141b08e8d4f99afef | 9c895160d25a76c21b65bad141b08e8d4f99afef | KVM: Ensure all vcpus are consistent with in-kernel irqchip settings
(cherry picked from commit 3e515705a1f46beb1c942bb8043c16f8ac7b1e9e)
If some vcpus are created before KVM_CREATE_IRQCHIP, then
irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading
to potential NULL pointer dereferences.
Fix by:
- ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called
- ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP
This is somewhat long winded because vcpu->arch.apic is created without
kvm->lock held.
Based on earlier patch by Michael Ellerman.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
| void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
| C | linux | 0 |
CVE-2018-20784 | https://www.cvedetails.com/cve/CVE-2018-20784/ | CWE-400 | https://github.com/torvalds/linux/commit/c40f7d74c741a907cfaeb73a7697081881c497d0 | c40f7d74c741a907cfaeb73a7697081881c497d0 | sched/fair: Fix infinite loop in update_blocked_averages() by reverting a9e7f6544b9c
Zhipeng Xie, Xie XiuQi and Sargun Dhillon reported lockups in the
scheduler under high loads, starting at around the v4.18 time frame,
and Zhipeng Xie tracked it down to bugs in the rq->leaf_cfs_rq_list
manipulation.
Do a (manual) revert of:
a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
It turns out that the list_del_leaf_cfs_rq() introduced by this commit
is a surprising property that was not considered in followup commits
such as:
9c2791f936ef ("sched/fair: Fix hierarchical order in rq->leaf_cfs_rq_list")
As Vincent Guittot explains:
"I think that there is a bigger problem with commit a9e7f6544b9c and
cfs_rq throttling:
Let take the example of the following topology TG2 --> TG1 --> root:
1) The 1st time a task is enqueued, we will add TG2 cfs_rq then TG1
cfs_rq to leaf_cfs_rq_list and we are sure to do the whole branch in
one path because it has never been used and can't be throttled so
tmp_alone_branch will point to leaf_cfs_rq_list at the end.
2) Then TG1 is throttled
3) and we add TG3 as a new child of TG1.
4) The 1st enqueue of a task on TG3 will add TG3 cfs_rq just before TG1
cfs_rq and tmp_alone_branch will stay on rq->leaf_cfs_rq_list.
With commit a9e7f6544b9c, we can del a cfs_rq from rq->leaf_cfs_rq_list.
So if the load of TG1 cfs_rq becomes NULL before step 2) above, TG1
cfs_rq is removed from the list.
Then at step 4), TG3 cfs_rq is added at the beginning of rq->leaf_cfs_rq_list
but tmp_alone_branch still points to TG3 cfs_rq because its throttled
parent can't be enqueued when the lock is released.
tmp_alone_branch doesn't point to rq->leaf_cfs_rq_list whereas it should.
So if TG3 cfs_rq is removed or destroyed before tmp_alone_branch
points on another TG cfs_rq, the next TG cfs_rq that will be added,
will be linked outside rq->leaf_cfs_rq_list - which is bad.
In addition, we can break the ordering of the cfs_rq in
rq->leaf_cfs_rq_list but this ordering is used to update and
propagate the update from leaf down to root."
Instead of trying to work through all these cases and trying to reproduce
the very high loads that produced the lockup to begin with, simplify
the code temporarily by reverting a9e7f6544b9c - which change was clearly
not thought through completely.
This (hopefully) gives us a kernel that doesn't lock up so people
can continue to enjoy their holidays without worrying about regressions. ;-)
[ mingo: Wrote changelog, fixed weird spelling in code comment while at it. ]
Analyzed-by: Xie XiuQi <xiexiuqi@huawei.com>
Analyzed-by: Vincent Guittot <vincent.guittot@linaro.org>
Reported-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Reported-by: Sargun Dhillon <sargun@sargun.me>
Reported-by: Xie XiuQi <xiexiuqi@huawei.com>
Tested-by: Zhipeng Xie <xiezhipeng1@huawei.com>
Tested-by: Sargun Dhillon <sargun@sargun.me>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: <stable@vger.kernel.org> # v4.13+
Cc: Bin Li <huawei.libin@huawei.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: a9e7f6544b9c ("sched/fair: Fix O(nr_cgroups) in load balance path")
Link: http://lkml.kernel.org/r/1545879866-27809-1-git-send-email-xiexiuqi@huawei.com
Signed-off-by: Ingo Molnar <mingo@kernel.org> | static inline int throttled_lb_pair(struct task_group *tg,
int src_cpu, int dest_cpu)
{
return 0;
}
| static inline int throttled_lb_pair(struct task_group *tg,
int src_cpu, int dest_cpu)
{
return 0;
}
| C | linux | 0 |
CVE-2011-2840 | https://www.cvedetails.com/cve/CVE-2011-2840/ | CWE-20 | https://github.com/chromium/chromium/commit/2db5a2048dfcacfe5ad4311c2b1e435c4c67febc | 2db5a2048dfcacfe5ad4311c2b1e435c4c67febc | chromeos: fix bug where "aw snap" page replaces first tab if it was a NTP when closing window with > 1 tab.
BUG=chromium-os:12088
TEST=verify bug per bug report.
Review URL: http://codereview.chromium.org/6882058
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@83031 0039d316-1c4b-4281-b951-d872f2087c98 | bool TabCloseableStateWatcher::CanCloseBrowserImpl(
const Browser* browser,
BrowserActionType* action_type) {
*action_type = NONE;
if (waiting_for_browser_)
return true;
if (signing_off_)
return true;
if (browser->type() != Browser::TYPE_NORMAL)
return true;
if (tabstrip_watchers_.size() > 1)
return true;
if (browser->profile()->IsOffTheRecord() && !guest_session_) {
*action_type = OPEN_WINDOW;
waiting_for_browser_ = true;
return true;
}
if (!can_close_tab_)
return false;
*action_type = OPEN_NTP;
return false;
}
| bool TabCloseableStateWatcher::CanCloseBrowserImpl(
const Browser* browser,
BrowserActionType* action_type) {
*action_type = NONE;
if (waiting_for_browser_)
return true;
if (signing_off_)
return true;
if (browser->type() != Browser::TYPE_NORMAL)
return true;
if (tabstrip_watchers_.size() > 1)
return true;
if (browser->profile()->IsOffTheRecord() && !guest_session_) {
*action_type = OPEN_WINDOW;
waiting_for_browser_ = true;
return true;
}
if (!can_close_tab_)
return false;
*action_type = OPEN_NTP;
return false;
}
| C | Chrome | 0 |
CVE-2013-4118 | https://www.cvedetails.com/cve/CVE-2013-4118/ | CWE-476 | https://github.com/FreeRDP/FreeRDP/commit/7d58aac24fe20ffaad7bd9b40c9ddf457c1b06e7 | 7d58aac24fe20ffaad7bd9b40c9ddf457c1b06e7 | security: add a NULL pointer check to fix a server crash. | BOOL security_establish_keys(const BYTE* client_random, rdpRdp* rdp)
{
BYTE pre_master_secret[48];
BYTE master_secret[48];
BYTE session_key_blob[48];
BYTE* server_random;
BYTE salt40[] = { 0xD1, 0x26, 0x9E };
rdpSettings* settings;
settings = rdp->settings;
server_random = settings->ServerRandom;
if (settings->EncryptionMethods == ENCRYPTION_METHOD_FIPS)
{
CryptoSha1 sha1;
BYTE client_encrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
BYTE client_decrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
printf("FIPS Compliant encryption level.\n");
/* disable fastpath input; it doesnt handle FIPS encryption yet */
rdp->settings->FastPathInput = FALSE;
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random + 16, 16);
crypto_sha1_update(sha1, server_random + 16, 16);
crypto_sha1_final(sha1, client_encrypt_key_t);
client_encrypt_key_t[20] = client_encrypt_key_t[0];
fips_expand_key_bits(client_encrypt_key_t, rdp->fips_encrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random, 16);
crypto_sha1_update(sha1, server_random, 16);
crypto_sha1_final(sha1, client_decrypt_key_t);
client_decrypt_key_t[20] = client_decrypt_key_t[0];
fips_expand_key_bits(client_decrypt_key_t, rdp->fips_decrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_decrypt_key_t, 20);
crypto_sha1_update(sha1, client_encrypt_key_t, 20);
crypto_sha1_final(sha1, rdp->fips_sign_key);
}
memcpy(pre_master_secret, client_random, 24);
memcpy(pre_master_secret + 24, server_random, 24);
security_A(pre_master_secret, client_random, server_random, master_secret);
security_X(master_secret, client_random, server_random, session_key_blob);
memcpy(rdp->sign_key, session_key_blob, 16);
if (rdp->settings->ServerMode)
{
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->encrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->decrypt_key);
}
else
{
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->decrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->encrypt_key);
}
if (settings->EncryptionMethods == 1) /* 40 and 56 bit */
{
memcpy(rdp->sign_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->decrypt_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->encrypt_key, salt40, 3); /* TODO 56 bit */
rdp->rc4_key_len = 8;
}
else if (settings->EncryptionMethods == 2) /* 128 bit */
{
rdp->rc4_key_len = 16;
}
memcpy(rdp->decrypt_update_key, rdp->decrypt_key, 16);
memcpy(rdp->encrypt_update_key, rdp->encrypt_key, 16);
rdp->decrypt_use_count = 0;
rdp->decrypt_checksum_use_count = 0;
rdp->encrypt_use_count =0;
rdp->encrypt_checksum_use_count =0;
return TRUE;
}
| BOOL security_establish_keys(const BYTE* client_random, rdpRdp* rdp)
{
BYTE pre_master_secret[48];
BYTE master_secret[48];
BYTE session_key_blob[48];
BYTE* server_random;
BYTE salt40[] = { 0xD1, 0x26, 0x9E };
rdpSettings* settings;
settings = rdp->settings;
server_random = settings->ServerRandom;
if (settings->EncryptionMethods == ENCRYPTION_METHOD_FIPS)
{
CryptoSha1 sha1;
BYTE client_encrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
BYTE client_decrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
printf("FIPS Compliant encryption level.\n");
/* disable fastpath input; it doesnt handle FIPS encryption yet */
rdp->settings->FastPathInput = FALSE;
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random + 16, 16);
crypto_sha1_update(sha1, server_random + 16, 16);
crypto_sha1_final(sha1, client_encrypt_key_t);
client_encrypt_key_t[20] = client_encrypt_key_t[0];
fips_expand_key_bits(client_encrypt_key_t, rdp->fips_encrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random, 16);
crypto_sha1_update(sha1, server_random, 16);
crypto_sha1_final(sha1, client_decrypt_key_t);
client_decrypt_key_t[20] = client_decrypt_key_t[0];
fips_expand_key_bits(client_decrypt_key_t, rdp->fips_decrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_decrypt_key_t, 20);
crypto_sha1_update(sha1, client_encrypt_key_t, 20);
crypto_sha1_final(sha1, rdp->fips_sign_key);
}
memcpy(pre_master_secret, client_random, 24);
memcpy(pre_master_secret + 24, server_random, 24);
security_A(pre_master_secret, client_random, server_random, master_secret);
security_X(master_secret, client_random, server_random, session_key_blob);
memcpy(rdp->sign_key, session_key_blob, 16);
if (rdp->settings->ServerMode)
{
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->encrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->decrypt_key);
}
else
{
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->decrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->encrypt_key);
}
if (settings->EncryptionMethods == 1) /* 40 and 56 bit */
{
memcpy(rdp->sign_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->decrypt_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->encrypt_key, salt40, 3); /* TODO 56 bit */
rdp->rc4_key_len = 8;
}
else if (settings->EncryptionMethods == 2) /* 128 bit */
{
rdp->rc4_key_len = 16;
}
memcpy(rdp->decrypt_update_key, rdp->decrypt_key, 16);
memcpy(rdp->encrypt_update_key, rdp->encrypt_key, 16);
rdp->decrypt_use_count = 0;
rdp->decrypt_checksum_use_count = 0;
rdp->encrypt_use_count =0;
rdp->encrypt_checksum_use_count =0;
return TRUE;
}
| C | FreeRDP | 0 |
CVE-2013-2873 | https://www.cvedetails.com/cve/CVE-2013-2873/ | CWE-416 | https://github.com/chromium/chromium/commit/370bd9b522d2ccd4a3113d6c93d30cdf8ca502ef | 370bd9b522d2ccd4a3113d6c93d30cdf8ca502ef | Protect WebURLLoaderImpl::Context while receiving responses.
A client's didReceiveResponse can cancel a request; by protecting the
Context we avoid a use after free in this case.
Interestingly, we really had very good warning about this problem, see
https://codereview.chromium.org/11900002/ back in January.
R=darin
BUG=241139
Review URL: https://chromiumcodereview.appspot.com/15738007
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@202821 0039d316-1c4b-4281-b951-d872f2087c98 | void set_client(WebURLLoaderClient* client) { client_ = client; }
| void set_client(WebURLLoaderClient* client) { client_ = client; }
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/5c9d37f8055700c36b4c9006b0d4d81f4f961a06 | 5c9d37f8055700c36b4c9006b0d4d81f4f961a06 | 2010-07-26 Tony Gentilcore <tonyg@chromium.org>
Reviewed by Darin Fisher.
Move DocumentLoadTiming struct to a new file
https://bugs.webkit.org/show_bug.cgi?id=42917
Also makes DocumentLoadTiming Noncopyable.
No new tests because no new functionality.
* GNUmakefile.am:
* WebCore.gypi:
* WebCore.vcproj/WebCore.vcproj:
* WebCore.xcodeproj/project.pbxproj:
* loader/DocumentLoadTiming.h: Added.
(WebCore::DocumentLoadTiming::DocumentLoadTiming):
* loader/DocumentLoader.h:
* loader/FrameLoader.cpp:
* loader/FrameLoaderTypes.h:
* loader/MainResourceLoader.cpp:
* page/Timing.cpp:
git-svn-id: svn://svn.chromium.org/blink/trunk@64051 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | static double getPossiblySkewedTimeInKnownRange(double skewedTime, double lowerBound, double upperBound)
{
#if PLATFORM(CHROMIUM)
if (skewedTime <= lowerBound)
return lowerBound;
if (skewedTime >= upperBound)
return upperBound;
#else
ASSERT_UNUSED(lowerBound, skewedTime >= lowerBound);
ASSERT_UNUSED(upperBound, skewedTime <= upperBound);
#endif
return skewedTime;
}
| static double getPossiblySkewedTimeInKnownRange(double skewedTime, double lowerBound, double upperBound)
{
#if PLATFORM(CHROMIUM)
if (skewedTime <= lowerBound)
return lowerBound;
if (skewedTime >= upperBound)
return upperBound;
#else
ASSERT_UNUSED(lowerBound, skewedTime >= lowerBound);
ASSERT_UNUSED(upperBound, skewedTime <= upperBound);
#endif
return skewedTime;
}
| C | Chrome | 0 |
CVE-2013-4353 | https://www.cvedetails.com/cve/CVE-2013-4353/ | CWE-20 | https://git.openssl.org/gitweb/?p=openssl.git;a=commit;h=197e0ea817ad64820789d86711d55ff50d71f631 | 197e0ea817ad64820789d86711d55ff50d71f631 | null | int ssl3_get_finished(SSL *s, int a, int b)
{
int al,i,ok;
long n;
unsigned char *p;
#ifdef OPENSSL_NO_NEXTPROTONEG
/* the mac has already been generated when we received the
* change cipher spec message and is in s->s3->tmp.peer_finish_md.
*/
#endif
n=s->method->ssl_get_message(s,
a,
b,
SSL3_MT_FINISHED,
64, /* should actually be 36+4 :-) */
&ok);
if (!ok) return((int)n);
/* If this occurs, we have missed a message */
if (!s->s3->change_cipher_spec)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_FINISHED,SSL_R_GOT_A_FIN_BEFORE_A_CCS);
goto f_err;
}
s->s3->change_cipher_spec=0;
p = (unsigned char *)s->init_msg;
i = s->s3->tmp.peer_finish_md_len;
if (i != n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_FINISHED,SSL_R_BAD_DIGEST_LENGTH);
goto f_err;
}
if (CRYPTO_memcmp(p, s->s3->tmp.peer_finish_md, i) != 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_FINISHED,SSL_R_DIGEST_CHECK_FAILED);
goto f_err;
}
/* Copy the finished so we can use it for
renegotiation checks */
if(s->type == SSL_ST_ACCEPT)
{
OPENSSL_assert(i <= EVP_MAX_MD_SIZE);
memcpy(s->s3->previous_client_finished,
s->s3->tmp.peer_finish_md, i);
s->s3->previous_client_finished_len=i;
}
else
{
OPENSSL_assert(i <= EVP_MAX_MD_SIZE);
memcpy(s->s3->previous_server_finished,
s->s3->tmp.peer_finish_md, i);
s->s3->previous_server_finished_len=i;
}
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
return(0);
}
| int ssl3_get_finished(SSL *s, int a, int b)
{
int al,i,ok;
long n;
unsigned char *p;
#ifdef OPENSSL_NO_NEXTPROTONEG
/* the mac has already been generated when we received the
* change cipher spec message and is in s->s3->tmp.peer_finish_md.
*/
#endif
n=s->method->ssl_get_message(s,
a,
b,
SSL3_MT_FINISHED,
64, /* should actually be 36+4 :-) */
&ok);
if (!ok) return((int)n);
/* If this occurs, we have missed a message */
if (!s->s3->change_cipher_spec)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_FINISHED,SSL_R_GOT_A_FIN_BEFORE_A_CCS);
goto f_err;
}
s->s3->change_cipher_spec=0;
p = (unsigned char *)s->init_msg;
i = s->s3->tmp.peer_finish_md_len;
if (i != n)
{
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_FINISHED,SSL_R_BAD_DIGEST_LENGTH);
goto f_err;
}
if (CRYPTO_memcmp(p, s->s3->tmp.peer_finish_md, i) != 0)
{
al=SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_SSL3_GET_FINISHED,SSL_R_DIGEST_CHECK_FAILED);
goto f_err;
}
/* Copy the finished so we can use it for
renegotiation checks */
if(s->type == SSL_ST_ACCEPT)
{
OPENSSL_assert(i <= EVP_MAX_MD_SIZE);
memcpy(s->s3->previous_client_finished,
s->s3->tmp.peer_finish_md, i);
s->s3->previous_client_finished_len=i;
}
else
{
OPENSSL_assert(i <= EVP_MAX_MD_SIZE);
memcpy(s->s3->previous_server_finished,
s->s3->tmp.peer_finish_md, i);
s->s3->previous_server_finished_len=i;
}
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
return(0);
}
| C | openssl | 0 |
null | null | null | https://github.com/chromium/chromium/commit/6a13a6c2fbae0b3269743e6a141fdfe0d9ec9793 | 6a13a6c2fbae0b3269743e6a141fdfe0d9ec9793 | Don't delete the current NavigationEntry when leaving an interstitial page.
BUG=107182
TEST=See bug
Review URL: http://codereview.chromium.org/8976014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115189 0039d316-1c4b-4281-b951-d872f2087c98 | ~TestSafeBrowsingBlockingPage() {
if (wait_for_delete_) {
MessageLoopForUI::current()->Quit();
}
}
| ~TestSafeBrowsingBlockingPage() {
if (wait_for_delete_) {
MessageLoopForUI::current()->Quit();
}
}
| C | Chrome | 0 |
CVE-2016-1641 | https://www.cvedetails.com/cve/CVE-2016-1641/ | null | https://github.com/chromium/chromium/commit/75ca8ffd7bd7c58ace1144df05e1307d8d707662 | 75ca8ffd7bd7c58ace1144df05e1307d8d707662 | Don't call WebContents::DownloadImage() callback if the WebContents were deleted
BUG=583718
Review URL: https://codereview.chromium.org/1685343004
Cr-Commit-Position: refs/heads/master@{#375700} | void WebContentsImpl::FriendZone::RemoveCreatedCallbackForTesting(
const CreatedCallback& callback) {
for (size_t i = 0; i < g_created_callbacks.Get().size(); ++i) {
if (g_created_callbacks.Get().at(i).Equals(callback)) {
g_created_callbacks.Get().erase(g_created_callbacks.Get().begin() + i);
return;
}
}
}
| void WebContentsImpl::FriendZone::RemoveCreatedCallbackForTesting(
const CreatedCallback& callback) {
for (size_t i = 0; i < g_created_callbacks.Get().size(); ++i) {
if (g_created_callbacks.Get().at(i).Equals(callback)) {
g_created_callbacks.Get().erase(g_created_callbacks.Get().begin() + i);
return;
}
}
}
| C | Chrome | 0 |
CVE-2011-2861 | https://www.cvedetails.com/cve/CVE-2011-2861/ | CWE-20 | https://github.com/chromium/chromium/commit/8262245d384be025f13e2a5b3a03b7e5c98374ce | 8262245d384be025f13e2a5b3a03b7e5c98374ce | DevTools: move DevToolsAgent/Client into content.
BUG=84078
TEST=
Review URL: http://codereview.chromium.org/7461019
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93596 0039d316-1c4b-4281-b951-d872f2087c98 | void RenderView::didFailLoad(WebFrame* frame, const WebURLError& error) {
FOR_EACH_OBSERVER(RenderViewObserver, observers_, DidFailLoad(frame, error));
}
| void RenderView::didFailLoad(WebFrame* frame, const WebURLError& error) {
FOR_EACH_OBSERVER(RenderViewObserver, observers_, DidFailLoad(frame, error));
}
| C | Chrome | 0 |
CVE-2016-1696 | https://www.cvedetails.com/cve/CVE-2016-1696/ | CWE-284 | https://github.com/chromium/chromium/commit/c0569cc04741cccf6548c2169fcc1609d958523f | c0569cc04741cccf6548c2169fcc1609d958523f | [Extensions] Expand bindings access checks
BUG=601149
BUG=601073
Review URL: https://codereview.chromium.org/1866103002
Cr-Commit-Position: refs/heads/master@{#387710} | void Dispatcher::OnSetChannel(int channel) {
delegate_->SetChannel(channel);
}
| void Dispatcher::OnSetChannel(int channel) {
delegate_->SetChannel(channel);
}
| C | Chrome | 0 |
CVE-2016-10068 | https://www.cvedetails.com/cve/CVE-2016-10068/ | CWE-20 | https://github.com/ImageMagick/ImageMagick/commit/56d6e20de489113617cbbddaf41e92600a34db22 | 56d6e20de489113617cbbddaf41e92600a34db22 | Prevent fault in MSL interpreter | static void MSLCharacters(void *context,const xmlChar *c,int length)
{
MSLInfo
*msl_info;
register char
*p;
register ssize_t
i;
/*
Receiving some characters from the parser.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" SAX.characters(%s,%d)",c,length);
msl_info=(MSLInfo *) context;
if (msl_info->content != (char *) NULL)
msl_info->content=(char *) ResizeQuantumMemory(msl_info->content,
strlen(msl_info->content)+length+MaxTextExtent,
sizeof(*msl_info->content));
else
{
msl_info->content=(char *) NULL;
if (~length >= (MaxTextExtent-1))
msl_info->content=(char *) AcquireQuantumMemory(length+MaxTextExtent,
sizeof(*msl_info->content));
if (msl_info->content != (char *) NULL)
*msl_info->content='\0';
}
if (msl_info->content == (char *) NULL)
return;
p=msl_info->content+strlen(msl_info->content);
for (i=0; i < length; i++)
*p++=c[i];
*p='\0';
}
| static void MSLCharacters(void *context,const xmlChar *c,int length)
{
MSLInfo
*msl_info;
register char
*p;
register ssize_t
i;
/*
Receiving some characters from the parser.
*/
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" SAX.characters(%s,%d)",c,length);
msl_info=(MSLInfo *) context;
if (msl_info->content != (char *) NULL)
msl_info->content=(char *) ResizeQuantumMemory(msl_info->content,
strlen(msl_info->content)+length+MaxTextExtent,
sizeof(*msl_info->content));
else
{
msl_info->content=(char *) NULL;
if (~length >= (MaxTextExtent-1))
msl_info->content=(char *) AcquireQuantumMemory(length+MaxTextExtent,
sizeof(*msl_info->content));
if (msl_info->content != (char *) NULL)
*msl_info->content='\0';
}
if (msl_info->content == (char *) NULL)
return;
p=msl_info->content+strlen(msl_info->content);
for (i=0; i < length; i++)
*p++=c[i];
*p='\0';
}
| C | ImageMagick | 0 |
CVE-2011-3107 | https://www.cvedetails.com/cve/CVE-2011-3107/ | null | https://github.com/chromium/chromium/commit/89e4098439f73cb5c16996511cbfdb171a26e173 | 89e4098439f73cb5c16996511cbfdb171a26e173 | [Qt][WK2] There's no way to test the gesture tap on WTR
https://bugs.webkit.org/show_bug.cgi?id=92895
Reviewed by Kenneth Rohde Christiansen.
Source/WebKit2:
Add an instance of QtViewportHandler to QQuickWebViewPrivate, so it's
now available on mobile and desktop modes, as a side effect gesture tap
events can now be created and sent to WebCore.
This is needed to test tap gestures and to get tap gestures working
when you have a WebView (in desktop mode) on notebooks equipped with
touch screens.
* UIProcess/API/qt/qquickwebview.cpp:
(QQuickWebViewPrivate::onComponentComplete):
(QQuickWebViewFlickablePrivate::onComponentComplete): Implementation
moved to QQuickWebViewPrivate::onComponentComplete.
* UIProcess/API/qt/qquickwebview_p_p.h:
(QQuickWebViewPrivate):
(QQuickWebViewFlickablePrivate):
Tools:
WTR doesn't create the QQuickItem from C++, not from QML, so a call
to componentComplete() was added to mimic the QML behaviour.
* WebKitTestRunner/qt/PlatformWebViewQt.cpp:
(WTR::PlatformWebView::PlatformWebView):
git-svn-id: svn://svn.chromium.org/blink/trunk@124625 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void QQuickWebViewPrivate::handleAuthenticationRequiredRequest(const QString& hostname, const QString& realm, const QString& prefilledUsername, QString& username, QString& password)
{
Q_Q(QQuickWebView);
QtDialogRunner dialogRunner(q);
if (!dialogRunner.initForAuthentication(hostname, realm, prefilledUsername))
return;
dialogRunner.run();
username = dialogRunner.username();
password = dialogRunner.password();
}
| void QQuickWebViewPrivate::handleAuthenticationRequiredRequest(const QString& hostname, const QString& realm, const QString& prefilledUsername, QString& username, QString& password)
{
Q_Q(QQuickWebView);
QtDialogRunner dialogRunner(q);
if (!dialogRunner.initForAuthentication(hostname, realm, prefilledUsername))
return;
dialogRunner.run();
username = dialogRunner.username();
password = dialogRunner.password();
}
| C | Chrome | 0 |
CVE-2018-12896 | https://www.cvedetails.com/cve/CVE-2018-12896/ | CWE-190 | https://github.com/torvalds/linux/commit/78c9c4dfbf8c04883941445a195276bb4bb92c76 | 78c9c4dfbf8c04883941445a195276bb4bb92c76 | posix-timers: Sanitize overrun handling
The posix timer overrun handling is broken because the forwarding functions
can return a huge number of overruns which does not fit in an int. As a
consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
random number generators.
The k_clock::timer_forward() callbacks return a 64 bit value now. Make
k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
accounting is correct. 3Remove the temporary (int) casts.
Add a helper function which clamps the overrun value returned to user space
via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
between 0 and INT_MAX. INT_MAX is an indicator for user space that the
overrun value has been clamped.
Reported-by: Team OWL337 <icytxw@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: John Stultz <john.stultz@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Michael Kerrisk <mtk.manpages@gmail.com>
Link: https://lkml.kernel.org/r/20180626132705.018623573@linutronix.de | SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
const struct __kernel_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 new_tp;
if (!kc || !kc->clock_set)
return -EINVAL;
if (get_timespec64(&new_tp, tp))
return -EFAULT;
return kc->clock_set(which_clock, &new_tp);
}
| SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
const struct __kernel_timespec __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 new_tp;
if (!kc || !kc->clock_set)
return -EINVAL;
if (get_timespec64(&new_tp, tp))
return -EFAULT;
return kc->clock_set(which_clock, &new_tp);
}
| C | linux | 0 |
CVE-2017-7501 | https://www.cvedetails.com/cve/CVE-2017-7501/ | CWE-59 | https://github.com/rpm-software-management/rpm/commit/404ef011c300207cdb1e531670384564aae04bdc | 404ef011c300207cdb1e531670384564aae04bdc | Don't follow symlinks on file creation (CVE-2017-7501)
Open newly created files with O_EXCL to prevent symlink tricks.
When reopening hardlinks for writing the actual content, use append
mode instead. This is compatible with the write-only permissions but
is not destructive in case we got redirected to somebody elses file,
verify the target before actually writing anything.
As these are files with the temporary suffix, errors mean a local
user with sufficient privileges to break the installation of the package
anyway is trying to goof us on purpose, don't bother trying to mend it
(we couldn't fix the hardlink case anyhow) but just bail out.
Based on a patch by Florian Festi. | static int fsmMkfifo(const char *path, mode_t mode)
{
int rc = mkfifo(path, (mode & 07777));
if (_fsm_debug) {
rpmlog(RPMLOG_DEBUG, " %8s (%s, 0%04o) %s\n",
__func__, path, (unsigned)(mode & 07777),
(rc < 0 ? strerror(errno) : ""));
}
if (rc < 0)
rc = RPMERR_MKFIFO_FAILED;
return rc;
}
| static int fsmMkfifo(const char *path, mode_t mode)
{
int rc = mkfifo(path, (mode & 07777));
if (_fsm_debug) {
rpmlog(RPMLOG_DEBUG, " %8s (%s, 0%04o) %s\n",
__func__, path, (unsigned)(mode & 07777),
(rc < 0 ? strerror(errno) : ""));
}
if (rc < 0)
rc = RPMERR_MKFIFO_FAILED;
return rc;
}
| C | rpm | 0 |
CVE-2014-1713 | https://www.cvedetails.com/cve/CVE-2014-1713/ | CWE-399 | https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154 | f85a87ec670ad0fce9d98d90c9a705b72a288154 | document.location bindings fix
BUG=352374
R=jochen@chromium.org
Review URL: https://codereview.chromium.org/196343011
git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | static void activityLoggedInIsolatedWorldsAttrAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
ExceptionState exceptionState(ExceptionState::SetterContext, "activityLoggedInIsolatedWorldsAttr", "TestObject", info.Holder(), info.GetIsolate());
TestObject* imp = V8TestObject::toNative(info.Holder());
V8TRYCATCH_EXCEPTION_VOID(int, cppValue, toInt32(jsValue, exceptionState), exceptionState);
imp->setActivityLoggedInIsolatedWorldsAttr(cppValue);
}
| static void activityLoggedInIsolatedWorldsAttrAttributeSetter(v8::Local<v8::Value> jsValue, const v8::PropertyCallbackInfo<void>& info)
{
ExceptionState exceptionState(ExceptionState::SetterContext, "activityLoggedInIsolatedWorldsAttr", "TestObject", info.Holder(), info.GetIsolate());
TestObject* imp = V8TestObject::toNative(info.Holder());
V8TRYCATCH_EXCEPTION_VOID(int, cppValue, toInt32(jsValue, exceptionState), exceptionState);
imp->setActivityLoggedInIsolatedWorldsAttr(cppValue);
}
| C | Chrome | 0 |
CVE-2011-2918 | https://www.cvedetails.com/cve/CVE-2011-2918/ | CWE-399 | https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu> | static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
struct perf_cgroup_info *info;
u64 now;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
}
| static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
struct perf_cgroup_info *info;
u64 now;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
}
| C | linux | 0 |
CVE-2012-3552 | https://www.cvedetails.com/cve/CVE-2012-3552/ | CWE-362 | https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net> | int ip_options_compile(struct net *net,
struct ip_options * opt, struct sk_buff * skb)
{
int l;
unsigned char * iph;
unsigned char * optptr;
int optlen;
unsigned char * pp_ptr = NULL;
struct rtable *rt = NULL;
if (skb != NULL) {
rt = skb_rtable(skb);
optptr = (unsigned char *)&(ip_hdr(skb)[1]);
} else
optptr = opt->__data;
iph = optptr - sizeof(struct iphdr);
for (l = opt->optlen; l > 0; ) {
switch (*optptr) {
case IPOPT_END:
for (optptr++, l--; l>0; optptr++, l--) {
if (*optptr != IPOPT_END) {
*optptr = IPOPT_END;
opt->is_changed = 1;
}
}
goto eol;
case IPOPT_NOOP:
l--;
optptr++;
continue;
}
optlen = optptr[1];
if (optlen<2 || optlen>l) {
pp_ptr = optptr;
goto error;
}
switch (*optptr) {
case IPOPT_SSRR:
case IPOPT_LSRR:
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
/* NB: cf RFC-1812 5.2.4.1 */
if (opt->srr) {
pp_ptr = optptr;
goto error;
}
if (!skb) {
if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) {
pp_ptr = optptr + 1;
goto error;
}
memcpy(&opt->faddr, &optptr[3], 4);
if (optlen > 7)
memmove(&optptr[3], &optptr[7], optlen-7);
}
opt->is_strictroute = (optptr[0] == IPOPT_SSRR);
opt->srr = optptr - iph;
break;
case IPOPT_RR:
if (opt->rr) {
pp_ptr = optptr;
goto error;
}
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
if (optptr[2]+3 > optlen) {
pp_ptr = optptr + 2;
goto error;
}
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
optptr[2] += 4;
opt->rr_needaddr = 1;
}
opt->rr = optptr - iph;
break;
case IPOPT_TIMESTAMP:
if (opt->ts) {
pp_ptr = optptr;
goto error;
}
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 5) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
__be32 *timeptr = NULL;
if (optptr[2]+3 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
switch (optptr[3]&0xF) {
case IPOPT_TS_TSONLY:
opt->ts = optptr - iph;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]-1];
opt->ts_needtime = 1;
optptr[2] += 4;
break;
case IPOPT_TS_TSANDADDR:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needaddr = 1;
opt->ts_needtime = 1;
optptr[2] += 8;
break;
case IPOPT_TS_PRESPEC:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
{
__be32 addr;
memcpy(&addr, &optptr[optptr[2]-1], 4);
if (inet_addr_type(net, addr) == RTN_UNICAST)
break;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needtime = 1;
optptr[2] += 8;
break;
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr + 3;
goto error;
}
break;
}
if (timeptr) {
struct timespec tv;
__be32 midtime;
getnstimeofday(&tv);
midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
memcpy(timeptr, &midtime, sizeof(__be32));
opt->is_changed = 1;
}
} else {
unsigned overflow = optptr[3]>>4;
if (overflow == 15) {
pp_ptr = optptr + 3;
goto error;
}
opt->ts = optptr - iph;
if (skb) {
optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
opt->is_changed = 1;
}
}
break;
case IPOPT_RA:
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] == 0 && optptr[3] == 0)
opt->router_alert = optptr - iph;
break;
case IPOPT_CIPSO:
if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) {
pp_ptr = optptr;
goto error;
}
opt->cipso = optptr - iph;
if (cipso_v4_validate(skb, &optptr)) {
pp_ptr = optptr;
goto error;
}
break;
case IPOPT_SEC:
case IPOPT_SID:
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr;
goto error;
}
break;
}
l -= optlen;
optptr += optlen;
}
eol:
if (!pp_ptr)
return 0;
error:
if (skb) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
}
return -EINVAL;
}
| int ip_options_compile(struct net *net,
struct ip_options * opt, struct sk_buff * skb)
{
int l;
unsigned char * iph;
unsigned char * optptr;
int optlen;
unsigned char * pp_ptr = NULL;
struct rtable *rt = NULL;
if (skb != NULL) {
rt = skb_rtable(skb);
optptr = (unsigned char *)&(ip_hdr(skb)[1]);
} else
optptr = opt->__data;
iph = optptr - sizeof(struct iphdr);
for (l = opt->optlen; l > 0; ) {
switch (*optptr) {
case IPOPT_END:
for (optptr++, l--; l>0; optptr++, l--) {
if (*optptr != IPOPT_END) {
*optptr = IPOPT_END;
opt->is_changed = 1;
}
}
goto eol;
case IPOPT_NOOP:
l--;
optptr++;
continue;
}
optlen = optptr[1];
if (optlen<2 || optlen>l) {
pp_ptr = optptr;
goto error;
}
switch (*optptr) {
case IPOPT_SSRR:
case IPOPT_LSRR:
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
/* NB: cf RFC-1812 5.2.4.1 */
if (opt->srr) {
pp_ptr = optptr;
goto error;
}
if (!skb) {
if (optptr[2] != 4 || optlen < 7 || ((optlen-3) & 3)) {
pp_ptr = optptr + 1;
goto error;
}
memcpy(&opt->faddr, &optptr[3], 4);
if (optlen > 7)
memmove(&optptr[3], &optptr[7], optlen-7);
}
opt->is_strictroute = (optptr[0] == IPOPT_SSRR);
opt->srr = optptr - iph;
break;
case IPOPT_RR:
if (opt->rr) {
pp_ptr = optptr;
goto error;
}
if (optlen < 3) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 4) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
if (optptr[2]+3 > optlen) {
pp_ptr = optptr + 2;
goto error;
}
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
opt->is_changed = 1;
}
optptr[2] += 4;
opt->rr_needaddr = 1;
}
opt->rr = optptr - iph;
break;
case IPOPT_TIMESTAMP:
if (opt->ts) {
pp_ptr = optptr;
goto error;
}
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] < 5) {
pp_ptr = optptr + 2;
goto error;
}
if (optptr[2] <= optlen) {
__be32 *timeptr = NULL;
if (optptr[2]+3 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
switch (optptr[3]&0xF) {
case IPOPT_TS_TSONLY:
opt->ts = optptr - iph;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]-1];
opt->ts_needtime = 1;
optptr[2] += 4;
break;
case IPOPT_TS_TSANDADDR:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
if (rt) {
memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needaddr = 1;
opt->ts_needtime = 1;
optptr[2] += 8;
break;
case IPOPT_TS_PRESPEC:
if (optptr[2]+7 > optptr[1]) {
pp_ptr = optptr + 2;
goto error;
}
opt->ts = optptr - iph;
{
__be32 addr;
memcpy(&addr, &optptr[optptr[2]-1], 4);
if (inet_addr_type(net, addr) == RTN_UNICAST)
break;
if (skb)
timeptr = (__be32*)&optptr[optptr[2]+3];
}
opt->ts_needtime = 1;
optptr[2] += 8;
break;
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr + 3;
goto error;
}
break;
}
if (timeptr) {
struct timespec tv;
__be32 midtime;
getnstimeofday(&tv);
midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
memcpy(timeptr, &midtime, sizeof(__be32));
opt->is_changed = 1;
}
} else {
unsigned overflow = optptr[3]>>4;
if (overflow == 15) {
pp_ptr = optptr + 3;
goto error;
}
opt->ts = optptr - iph;
if (skb) {
optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
opt->is_changed = 1;
}
}
break;
case IPOPT_RA:
if (optlen < 4) {
pp_ptr = optptr + 1;
goto error;
}
if (optptr[2] == 0 && optptr[3] == 0)
opt->router_alert = optptr - iph;
break;
case IPOPT_CIPSO:
if ((!skb && !capable(CAP_NET_RAW)) || opt->cipso) {
pp_ptr = optptr;
goto error;
}
opt->cipso = optptr - iph;
if (cipso_v4_validate(skb, &optptr)) {
pp_ptr = optptr;
goto error;
}
break;
case IPOPT_SEC:
case IPOPT_SID:
default:
if (!skb && !capable(CAP_NET_RAW)) {
pp_ptr = optptr;
goto error;
}
break;
}
l -= optlen;
optptr += optlen;
}
eol:
if (!pp_ptr)
return 0;
error:
if (skb) {
icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
}
return -EINVAL;
}
| C | linux | 0 |
CVE-2014-9425 | https://www.cvedetails.com/cve/CVE-2014-9425/ | null | https://git.php.net/?p=php-src.git;a=commit;h=2bcf69d073190e4f032d883f3416dea1b027a39e | 2bcf69d073190e4f032d883f3416dea1b027a39e | null | ZEND_API int zend_ts_hash_index_del(TsHashTable *ht, zend_ulong h)
{
int retval;
begin_write(ht);
retval = zend_hash_index_del(TS_HASH(ht), h);
end_write(ht);
return retval;
}
| ZEND_API int zend_ts_hash_index_del(TsHashTable *ht, zend_ulong h)
{
int retval;
begin_write(ht);
retval = zend_hash_index_del(TS_HASH(ht), h);
end_write(ht);
return retval;
}
| C | php | 0 |
CVE-2017-1000252 | https://www.cvedetails.com/cve/CVE-2017-1000252/ | CWE-20 | https://github.com/torvalds/linux/commit/36ae3c0a36b7456432fedce38ae2f7bd3e01a563 | 36ae3c0a36b7456432fedce38ae2f7bd3e01a563 | KVM: Don't accept obviously wrong gsi values via KVM_IRQFD
We cannot add routes for gsi values >= KVM_MAX_IRQ_ROUTES -- see
kvm_set_irq_routing(). Hence, there is no sense in accepting them
via KVM_IRQFD. Prevent them from entering the system in the first
place.
Signed-off-by: Jan H. Schönherr <jschoenh@amazon.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian)
{
mutex_lock(&kvm->irq_lock);
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
| void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian)
{
mutex_lock(&kvm->irq_lock);
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
kvm_arch_post_irq_ack_notifier_list_update(kvm);
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/9d02cda7a634fbd6e53d98091f618057f0174387 | 9d02cda7a634fbd6e53d98091f618057f0174387 | Coverity: Fixing pass by value.
CID=101462, 101458, 101437, 101471, 101467
BUG=NONE
TEST=NONE
Review URL: http://codereview.chromium.org/9006023
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@115257 0039d316-1c4b-4281-b951-d872f2087c98 | bool ExtensionPrefs::IsBlacklistBitSet(DictionaryValue* ext) {
return ReadBooleanFromPref(ext, kPrefBlacklist);
}
| bool ExtensionPrefs::IsBlacklistBitSet(DictionaryValue* ext) {
return ReadBooleanFromPref(ext, kPrefBlacklist);
}
| C | Chrome | 0 |
CVE-2014-1874 | https://www.cvedetails.com/cve/CVE-2014-1874/ | CWE-20 | https://github.com/torvalds/linux/commit/2172fa709ab32ca60e86179dc67d0857be8e2c98 | 2172fa709ab32ca60e86179dc67d0857be8e2c98 | SELinux: Fix kernel BUG on empty security contexts.
Setting an empty security context (length=0) on a file will
lead to incorrectly dereferencing the type and other fields
of the security context structure, yielding a kernel BUG.
As a zero-length security context is never valid, just reject
all such security contexts whether coming from userspace
via setxattr or coming from the filesystem upon a getxattr
request by SELinux.
Setting a security context value (empty or otherwise) unknown to
SELinux in the first place is only possible for a root process
(CAP_MAC_ADMIN), and, if running SELinux in enforcing mode, only
if the corresponding SELinux mac_admin permission is also granted
to the domain by policy. In Fedora policies, this is only allowed for
specific domains such as livecd for setting down security contexts
that are not defined in the build host policy.
Reproducer:
su
setenforce 0
touch foo
setfattr -n security.selinux foo
Caveat:
Relabeling or removing foo after doing the above may not be possible
without booting with SELinux disabled. Any subsequent access to foo
after doing the above will also trigger the BUG.
BUG output from Matthew Thode:
[ 473.893141] ------------[ cut here ]------------
[ 473.962110] kernel BUG at security/selinux/ss/services.c:654!
[ 473.995314] invalid opcode: 0000 [#6] SMP
[ 474.027196] Modules linked in:
[ 474.058118] CPU: 0 PID: 8138 Comm: ls Tainted: G D I
3.13.0-grsec #1
[ 474.116637] Hardware name: Supermicro X8ST3/X8ST3, BIOS 2.0
07/29/10
[ 474.149768] task: ffff8805f50cd010 ti: ffff8805f50cd488 task.ti:
ffff8805f50cd488
[ 474.183707] RIP: 0010:[<ffffffff814681c7>] [<ffffffff814681c7>]
context_struct_compute_av+0xce/0x308
[ 474.219954] RSP: 0018:ffff8805c0ac3c38 EFLAGS: 00010246
[ 474.252253] RAX: 0000000000000000 RBX: ffff8805c0ac3d94 RCX:
0000000000000100
[ 474.287018] RDX: ffff8805e8aac000 RSI: 00000000ffffffff RDI:
ffff8805e8aaa000
[ 474.321199] RBP: ffff8805c0ac3cb8 R08: 0000000000000010 R09:
0000000000000006
[ 474.357446] R10: 0000000000000000 R11: ffff8805c567a000 R12:
0000000000000006
[ 474.419191] R13: ffff8805c2b74e88 R14: 00000000000001da R15:
0000000000000000
[ 474.453816] FS: 00007f2e75220800(0000) GS:ffff88061fc00000(0000)
knlGS:0000000000000000
[ 474.489254] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 474.522215] CR2: 00007f2e74716090 CR3: 00000005c085e000 CR4:
00000000000207f0
[ 474.556058] Stack:
[ 474.584325] ffff8805c0ac3c98 ffffffff811b549b ffff8805c0ac3c98
ffff8805f1190a40
[ 474.618913] ffff8805a6202f08 ffff8805c2b74e88 00068800d0464990
ffff8805e8aac860
[ 474.653955] ffff8805c0ac3cb8 000700068113833a ffff880606c75060
ffff8805c0ac3d94
[ 474.690461] Call Trace:
[ 474.723779] [<ffffffff811b549b>] ? lookup_fast+0x1cd/0x22a
[ 474.778049] [<ffffffff81468824>] security_compute_av+0xf4/0x20b
[ 474.811398] [<ffffffff8196f419>] avc_compute_av+0x2a/0x179
[ 474.843813] [<ffffffff8145727b>] avc_has_perm+0x45/0xf4
[ 474.875694] [<ffffffff81457d0e>] inode_has_perm+0x2a/0x31
[ 474.907370] [<ffffffff81457e76>] selinux_inode_getattr+0x3c/0x3e
[ 474.938726] [<ffffffff81455cf6>] security_inode_getattr+0x1b/0x22
[ 474.970036] [<ffffffff811b057d>] vfs_getattr+0x19/0x2d
[ 475.000618] [<ffffffff811b05e5>] vfs_fstatat+0x54/0x91
[ 475.030402] [<ffffffff811b063b>] vfs_lstat+0x19/0x1b
[ 475.061097] [<ffffffff811b077e>] SyS_newlstat+0x15/0x30
[ 475.094595] [<ffffffff8113c5c1>] ? __audit_syscall_entry+0xa1/0xc3
[ 475.148405] [<ffffffff8197791e>] system_call_fastpath+0x16/0x1b
[ 475.179201] Code: 00 48 85 c0 48 89 45 b8 75 02 0f 0b 48 8b 45 a0 48
8b 3d 45 d0 b6 00 8b 40 08 89 c6 ff ce e8 d1 b0 06 00 48 85 c0 49 89 c7
75 02 <0f> 0b 48 8b 45 b8 4c 8b 28 eb 1e 49 8d 7d 08 be 80 01 00 00 e8
[ 475.255884] RIP [<ffffffff814681c7>]
context_struct_compute_av+0xce/0x308
[ 475.296120] RSP <ffff8805c0ac3c38>
[ 475.328734] ---[ end trace f076482e9d754adc ]---
Reported-by: Matthew Thode <mthode@mthode.org>
Signed-off-by: Stephen Smalley <sds@tycho.nsa.gov>
Cc: stable@vger.kernel.org
Signed-off-by: Paul Moore <pmoore@redhat.com> | static void security_dump_masked_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 permissions,
const char *reason)
{
struct common_datum *common_dat;
struct class_datum *tclass_dat;
struct audit_buffer *ab;
char *tclass_name;
char *scontext_name = NULL;
char *tcontext_name = NULL;
char *permission_names[32];
int index;
u32 length;
bool need_comma = false;
if (!permissions)
return;
tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1);
tclass_dat = policydb.class_val_to_struct[tclass - 1];
common_dat = tclass_dat->comdatum;
/* init permission_names */
if (common_dat &&
hashtab_map(common_dat->permissions.table,
dump_masked_av_helper, permission_names) < 0)
goto out;
if (hashtab_map(tclass_dat->permissions.table,
dump_masked_av_helper, permission_names) < 0)
goto out;
/* get scontext/tcontext in text form */
if (context_struct_to_string(scontext,
&scontext_name, &length) < 0)
goto out;
if (context_struct_to_string(tcontext,
&tcontext_name, &length) < 0)
goto out;
/* audit a message */
ab = audit_log_start(current->audit_context,
GFP_ATOMIC, AUDIT_SELINUX_ERR);
if (!ab)
goto out;
audit_log_format(ab, "op=security_compute_av reason=%s "
"scontext=%s tcontext=%s tclass=%s perms=",
reason, scontext_name, tcontext_name, tclass_name);
for (index = 0; index < 32; index++) {
u32 mask = (1 << index);
if ((mask & permissions) == 0)
continue;
audit_log_format(ab, "%s%s",
need_comma ? "," : "",
permission_names[index]
? permission_names[index] : "????");
need_comma = true;
}
audit_log_end(ab);
out:
/* release scontext/tcontext */
kfree(tcontext_name);
kfree(scontext_name);
return;
}
| static void security_dump_masked_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 permissions,
const char *reason)
{
struct common_datum *common_dat;
struct class_datum *tclass_dat;
struct audit_buffer *ab;
char *tclass_name;
char *scontext_name = NULL;
char *tcontext_name = NULL;
char *permission_names[32];
int index;
u32 length;
bool need_comma = false;
if (!permissions)
return;
tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1);
tclass_dat = policydb.class_val_to_struct[tclass - 1];
common_dat = tclass_dat->comdatum;
/* init permission_names */
if (common_dat &&
hashtab_map(common_dat->permissions.table,
dump_masked_av_helper, permission_names) < 0)
goto out;
if (hashtab_map(tclass_dat->permissions.table,
dump_masked_av_helper, permission_names) < 0)
goto out;
/* get scontext/tcontext in text form */
if (context_struct_to_string(scontext,
&scontext_name, &length) < 0)
goto out;
if (context_struct_to_string(tcontext,
&tcontext_name, &length) < 0)
goto out;
/* audit a message */
ab = audit_log_start(current->audit_context,
GFP_ATOMIC, AUDIT_SELINUX_ERR);
if (!ab)
goto out;
audit_log_format(ab, "op=security_compute_av reason=%s "
"scontext=%s tcontext=%s tclass=%s perms=",
reason, scontext_name, tcontext_name, tclass_name);
for (index = 0; index < 32; index++) {
u32 mask = (1 << index);
if ((mask & permissions) == 0)
continue;
audit_log_format(ab, "%s%s",
need_comma ? "," : "",
permission_names[index]
? permission_names[index] : "????");
need_comma = true;
}
audit_log_end(ab);
out:
/* release scontext/tcontext */
kfree(tcontext_name);
kfree(scontext_name);
return;
}
| C | linux | 0 |
CVE-2018-6035 | https://www.cvedetails.com/cve/CVE-2018-6035/ | CWE-200 | https://github.com/chromium/chromium/commit/2649de11c562aa96d336c06136a1a20c01711be0 | 2649de11c562aa96d336c06136a1a20c01711be0 | Hide DevTools frontend from webRequest API
Prevent extensions from observing requests for remote DevTools frontends
and add regression tests.
And update ExtensionTestApi to support initializing the embedded test
server and port from SetUpCommandLine (before SetUpOnMainThread).
BUG=797497,797500
TEST=browser_test --gtest_filter=DevToolsFrontendInWebRequestApiTest.HiddenRequests
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_mojo
Change-Id: Ic8f44b5771f2d5796f8c3de128f0a7ab88a77735
Reviewed-on: https://chromium-review.googlesource.com/844316
Commit-Queue: Rob Wu <rob@robwu.nl>
Reviewed-by: Devlin <rdevlin.cronin@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Cr-Commit-Position: refs/heads/master@{#528187} | MetricsPrivateDelegate* ChromeExtensionsAPIClient::GetMetricsPrivateDelegate() {
if (!metrics_private_delegate_)
metrics_private_delegate_.reset(new ChromeMetricsPrivateDelegate());
return metrics_private_delegate_.get();
}
| MetricsPrivateDelegate* ChromeExtensionsAPIClient::GetMetricsPrivateDelegate() {
if (!metrics_private_delegate_)
metrics_private_delegate_.reset(new ChromeMetricsPrivateDelegate());
return metrics_private_delegate_.get();
}
| C | Chrome | 0 |
CVE-2016-0798 | https://www.cvedetails.com/cve/CVE-2016-0798/ | CWE-399 | https://git.openssl.org/?p=openssl.git;a=commit;h=259b664f950c2ba66fbf4b0fe5281327904ead21 | 259b664f950c2ba66fbf4b0fe5281327904ead21 | null | static SRP_user_pwd *SRP_user_pwd_new()
{
SRP_user_pwd *ret = OPENSSL_malloc(sizeof(SRP_user_pwd));
if (ret == NULL)
return NULL;
ret->N = NULL;
ret->g = NULL;
ret->s = NULL;
ret->v = NULL;
ret->id = NULL;
ret->info = NULL;
return ret;
}
| static SRP_user_pwd *SRP_user_pwd_new()
{
SRP_user_pwd *ret = OPENSSL_malloc(sizeof(SRP_user_pwd));
if (ret == NULL)
return NULL;
ret->N = NULL;
ret->g = NULL;
ret->s = NULL;
ret->v = NULL;
ret->id = NULL;
ret->info = NULL;
return ret;
}
| C | openssl | 0 |
CVE-2016-4565 | https://www.cvedetails.com/cve/CVE-2016-4565/ | CWE-264 | https://github.com/torvalds/linux/commit/e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3 | e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3 | IB/security: Restrict use of the write() interface
The drivers/infiniband stack uses write() as a replacement for
bi-directional ioctl(). This is not safe. There are ways to
trigger write calls that result in the return structure that
is normally written to user space being shunted off to user
specified kernel memory instead.
For the immediate repair, detect and deny suspicious accesses to
the write API.
For long term, update the user space libraries and the kernel API
to something that doesn't present the same security vulnerabilities
(likely a structured ioctl() interface).
The impacted uAPI interfaces are generally only available if
hardware from drivers/infiniband is installed in the system.
Reported-by: Jann Horn <jann@thejh.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
[ Expanded check to all known write() entry points ]
Cc: stable@vger.kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com> | static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
}
| static ssize_t ib_ucm_send_apr(struct ib_ucm_file *file,
const char __user *inbuf,
int in_len, int out_len)
{
return ib_ucm_send_info(file, inbuf, in_len, (void *)ib_send_cm_apr);
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/dc3857aac17be72c96f28d860d875235b3be349a | dc3857aac17be72c96f28d860d875235b3be349a | Unreviewed, rolling out r142736.
http://trac.webkit.org/changeset/142736
https://bugs.webkit.org/show_bug.cgi?id=109716
Broke ABI, nightly builds crash on launch (Requested by ap on
#webkit).
Patch by Sheriff Bot <webkit.review.bot@gmail.com> on 2013-02-13
Source/WebKit2:
* Shared/APIClientTraits.cpp:
(WebKit):
* Shared/APIClientTraits.h:
* UIProcess/API/C/WKPage.h:
* UIProcess/API/gtk/WebKitLoaderClient.cpp:
(attachLoaderClientToView):
* WebProcess/InjectedBundle/API/c/WKBundlePage.h:
* WebProcess/qt/QtBuiltinBundlePage.cpp:
(WebKit::QtBuiltinBundlePage::QtBuiltinBundlePage):
Tools:
* MiniBrowser/mac/WK2BrowserWindowController.m:
(-[WK2BrowserWindowController awakeFromNib]):
* WebKitTestRunner/InjectedBundle/InjectedBundlePage.cpp:
(WTR::InjectedBundlePage::InjectedBundlePage):
* WebKitTestRunner/TestController.cpp:
(WTR::TestController::createWebViewWithOptions):
git-svn-id: svn://svn.chromium.org/blink/trunk@142762 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | static void didCommitLoadForFrame(WKPageRef page, WKFrameRef frame, WKTypeRef userData, const void* clientInfo)
{
if (!WKFrameIsMainFrame(frame))
return;
webkitWebViewLoadChanged(WEBKIT_WEB_VIEW(clientInfo), WEBKIT_LOAD_COMMITTED);
}
| static void didCommitLoadForFrame(WKPageRef page, WKFrameRef frame, WKTypeRef userData, const void* clientInfo)
{
if (!WKFrameIsMainFrame(frame))
return;
webkitWebViewLoadChanged(WEBKIT_WEB_VIEW(clientInfo), WEBKIT_LOAD_COMMITTED);
}
| C | Chrome | 0 |
CVE-2017-16820 | https://www.cvedetails.com/cve/CVE-2017-16820/ | CWE-415 | https://github.com/collectd/collectd/commit/d16c24542b2f96a194d43a73c2e5778822b9cb47 | d16c24542b2f96a194d43a73c2e5778822b9cb47 | snmp plugin: Fix double free of request PDU
snmp_sess_synch_response() always frees request PDU, in both case of request
error and success. If error condition occurs inside of `while (status == 0)`
loop, double free of `req` happens.
Issue: #2291
Signed-off-by: Florian Forster <octo@collectd.org> | static void csnmp_host_open_session(host_definition_t *host) {
struct snmp_session sess;
int error;
if (host->sess_handle != NULL)
csnmp_host_close_session(host);
snmp_sess_init(&sess);
sess.peername = host->address;
switch (host->version) {
case 1:
sess.version = SNMP_VERSION_1;
break;
case 3:
sess.version = SNMP_VERSION_3;
break;
default:
sess.version = SNMP_VERSION_2c;
break;
}
if (host->version == 3) {
sess.securityName = host->username;
sess.securityNameLen = strlen(host->username);
sess.securityLevel = host->security_level;
if (sess.securityLevel == SNMP_SEC_LEVEL_AUTHNOPRIV ||
sess.securityLevel == SNMP_SEC_LEVEL_AUTHPRIV) {
sess.securityAuthProto = host->auth_protocol;
sess.securityAuthProtoLen = host->auth_protocol_len;
sess.securityAuthKeyLen = USM_AUTH_KU_LEN;
error = generate_Ku(sess.securityAuthProto, sess.securityAuthProtoLen,
(u_char *)host->auth_passphrase,
strlen(host->auth_passphrase), sess.securityAuthKey,
&sess.securityAuthKeyLen);
if (error != SNMPERR_SUCCESS) {
ERROR("snmp plugin: host %s: Error generating Ku from auth_passphrase. "
"(Error %d)",
host->name, error);
}
}
if (sess.securityLevel == SNMP_SEC_LEVEL_AUTHPRIV) {
sess.securityPrivProto = host->priv_protocol;
sess.securityPrivProtoLen = host->priv_protocol_len;
sess.securityPrivKeyLen = USM_PRIV_KU_LEN;
error = generate_Ku(sess.securityAuthProto, sess.securityAuthProtoLen,
(u_char *)host->priv_passphrase,
strlen(host->priv_passphrase), sess.securityPrivKey,
&sess.securityPrivKeyLen);
if (error != SNMPERR_SUCCESS) {
ERROR("snmp plugin: host %s: Error generating Ku from priv_passphrase. "
"(Error %d)",
host->name, error);
}
}
if (host->context != NULL) {
sess.contextName = host->context;
sess.contextNameLen = strlen(host->context);
}
} else /* SNMPv1/2 "authenticates" with community string */
{
sess.community = (u_char *)host->community;
sess.community_len = strlen(host->community);
}
/* snmp_sess_open will copy the `struct snmp_session *'. */
host->sess_handle = snmp_sess_open(&sess);
if (host->sess_handle == NULL) {
char *errstr = NULL;
snmp_error(&sess, NULL, NULL, &errstr);
ERROR("snmp plugin: host %s: snmp_sess_open failed: %s", host->name,
(errstr == NULL) ? "Unknown problem" : errstr);
sfree(errstr);
}
} /* void csnmp_host_open_session */
| static void csnmp_host_open_session(host_definition_t *host) {
struct snmp_session sess;
int error;
if (host->sess_handle != NULL)
csnmp_host_close_session(host);
snmp_sess_init(&sess);
sess.peername = host->address;
switch (host->version) {
case 1:
sess.version = SNMP_VERSION_1;
break;
case 3:
sess.version = SNMP_VERSION_3;
break;
default:
sess.version = SNMP_VERSION_2c;
break;
}
if (host->version == 3) {
sess.securityName = host->username;
sess.securityNameLen = strlen(host->username);
sess.securityLevel = host->security_level;
if (sess.securityLevel == SNMP_SEC_LEVEL_AUTHNOPRIV ||
sess.securityLevel == SNMP_SEC_LEVEL_AUTHPRIV) {
sess.securityAuthProto = host->auth_protocol;
sess.securityAuthProtoLen = host->auth_protocol_len;
sess.securityAuthKeyLen = USM_AUTH_KU_LEN;
error = generate_Ku(sess.securityAuthProto, sess.securityAuthProtoLen,
(u_char *)host->auth_passphrase,
strlen(host->auth_passphrase), sess.securityAuthKey,
&sess.securityAuthKeyLen);
if (error != SNMPERR_SUCCESS) {
ERROR("snmp plugin: host %s: Error generating Ku from auth_passphrase. "
"(Error %d)",
host->name, error);
}
}
if (sess.securityLevel == SNMP_SEC_LEVEL_AUTHPRIV) {
sess.securityPrivProto = host->priv_protocol;
sess.securityPrivProtoLen = host->priv_protocol_len;
sess.securityPrivKeyLen = USM_PRIV_KU_LEN;
error = generate_Ku(sess.securityAuthProto, sess.securityAuthProtoLen,
(u_char *)host->priv_passphrase,
strlen(host->priv_passphrase), sess.securityPrivKey,
&sess.securityPrivKeyLen);
if (error != SNMPERR_SUCCESS) {
ERROR("snmp plugin: host %s: Error generating Ku from priv_passphrase. "
"(Error %d)",
host->name, error);
}
}
if (host->context != NULL) {
sess.contextName = host->context;
sess.contextNameLen = strlen(host->context);
}
} else /* SNMPv1/2 "authenticates" with community string */
{
sess.community = (u_char *)host->community;
sess.community_len = strlen(host->community);
}
/* snmp_sess_open will copy the `struct snmp_session *'. */
host->sess_handle = snmp_sess_open(&sess);
if (host->sess_handle == NULL) {
char *errstr = NULL;
snmp_error(&sess, NULL, NULL, &errstr);
ERROR("snmp plugin: host %s: snmp_sess_open failed: %s", host->name,
(errstr == NULL) ? "Unknown problem" : errstr);
sfree(errstr);
}
} /* void csnmp_host_open_session */
| C | collectd | 0 |
CVE-2011-2877 | https://www.cvedetails.com/cve/CVE-2011-2877/ | CWE-20 | https://github.com/chromium/chromium/commit/d31f450c723ba46b53c1762e51188557447d85fd | d31f450c723ba46b53c1762e51188557447d85fd | [WK2] LayerTreeCoordinator should release unused UpdatedAtlases
https://bugs.webkit.org/show_bug.cgi?id=95072
Reviewed by Jocelyn Turcotte.
Release graphic buffers that haven't been used for a while in order to save memory.
This way we can give back memory to the system when no user interaction happens
after a period of time, for example when we are in the background.
* Shared/ShareableBitmap.h:
* WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.cpp:
(WebKit::LayerTreeCoordinator::LayerTreeCoordinator):
(WebKit::LayerTreeCoordinator::beginContentUpdate):
(WebKit):
(WebKit::LayerTreeCoordinator::scheduleReleaseInactiveAtlases):
(WebKit::LayerTreeCoordinator::releaseInactiveAtlasesTimerFired):
* WebProcess/WebPage/CoordinatedGraphics/LayerTreeCoordinator.h:
(LayerTreeCoordinator):
* WebProcess/WebPage/UpdateAtlas.cpp:
(WebKit::UpdateAtlas::UpdateAtlas):
(WebKit::UpdateAtlas::didSwapBuffers):
Don't call buildLayoutIfNeeded here. It's enought to call it in beginPaintingOnAvailableBuffer
and this way we can track whether this atlas is used with m_areaAllocator.
(WebKit::UpdateAtlas::beginPaintingOnAvailableBuffer):
* WebProcess/WebPage/UpdateAtlas.h:
(WebKit::UpdateAtlas::addTimeInactive):
(WebKit::UpdateAtlas::isInactive):
(WebKit::UpdateAtlas::isInUse):
(UpdateAtlas):
git-svn-id: svn://svn.chromium.org/blink/trunk@128473 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | bool LayerTreeCoordinator::forceRepaintAsync(uint64_t callbackID)
{
ASSERT(!m_forceRepaintAsyncCallbackID);
m_forceRepaintAsyncCallbackID = callbackID;
scheduleLayerFlush();
return true;
}
| bool LayerTreeCoordinator::forceRepaintAsync(uint64_t callbackID)
{
ASSERT(!m_forceRepaintAsyncCallbackID);
m_forceRepaintAsyncCallbackID = callbackID;
scheduleLayerFlush();
return true;
}
| C | Chrome | 0 |
CVE-2019-5827 | https://www.cvedetails.com/cve/CVE-2019-5827/ | CWE-190 | https://github.com/chromium/chromium/commit/517ac71c9ee27f856f9becde8abea7d1604af9d4 | 517ac71c9ee27f856f9becde8abea7d1604af9d4 | sqlite: backport bugfixes for dbfuzz2
Bug: 952406
Change-Id: Icbec429742048d6674828726c96d8e265c41b595
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1568152
Reviewed-by: Chris Mumford <cmumford@google.com>
Commit-Queue: Darwin Huang <huangdarwin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#651030} | static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){
int rc;
MemPage *pPage;
assert( cursorOwnsBtShared(pCur) );
assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 );
assert( pCur->info.nSize==0 );
if( pCur->eState!=CURSOR_VALID ){
rc = restoreCursorPosition(pCur);
if( rc!=SQLITE_OK ){
return rc;
}
if( CURSOR_INVALID==pCur->eState ){
return SQLITE_DONE;
}
if( CURSOR_SKIPNEXT==pCur->eState ){
pCur->eState = CURSOR_VALID;
if( pCur->skipNext<0 ) return SQLITE_OK;
}
}
pPage = pCur->pPage;
assert( pPage->isInit );
if( !pPage->leaf ){
int idx = pCur->ix;
rc = moveToChild(pCur, get4byte(findCell(pPage, idx)));
if( rc ) return rc;
rc = moveToRightmost(pCur);
}else{
while( pCur->ix==0 ){
if( pCur->iPage==0 ){
pCur->eState = CURSOR_INVALID;
return SQLITE_DONE;
}
moveToParent(pCur);
}
assert( pCur->info.nSize==0 );
assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 );
pCur->ix--;
pPage = pCur->pPage;
if( pPage->intKey && !pPage->leaf ){
rc = sqlite3BtreePrevious(pCur, 0);
}else{
rc = SQLITE_OK;
}
}
return rc;
}
| static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){
int rc;
MemPage *pPage;
assert( cursorOwnsBtShared(pCur) );
assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 );
assert( pCur->info.nSize==0 );
if( pCur->eState!=CURSOR_VALID ){
rc = restoreCursorPosition(pCur);
if( rc!=SQLITE_OK ){
return rc;
}
if( CURSOR_INVALID==pCur->eState ){
return SQLITE_DONE;
}
if( CURSOR_SKIPNEXT==pCur->eState ){
pCur->eState = CURSOR_VALID;
if( pCur->skipNext<0 ) return SQLITE_OK;
}
}
pPage = pCur->pPage;
assert( pPage->isInit );
if( !pPage->leaf ){
int idx = pCur->ix;
rc = moveToChild(pCur, get4byte(findCell(pPage, idx)));
if( rc ) return rc;
rc = moveToRightmost(pCur);
}else{
while( pCur->ix==0 ){
if( pCur->iPage==0 ){
pCur->eState = CURSOR_INVALID;
return SQLITE_DONE;
}
moveToParent(pCur);
}
assert( pCur->info.nSize==0 );
assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 );
pCur->ix--;
pPage = pCur->pPage;
if( pPage->intKey && !pPage->leaf ){
rc = sqlite3BtreePrevious(pCur, 0);
}else{
rc = SQLITE_OK;
}
}
return rc;
}
| C | Chrome | 0 |
CVE-2019-13295 | https://www.cvedetails.com/cve/CVE-2019-13295/ | CWE-125 | https://github.com/ImageMagick/ImageMagick/commit/a7759f410b773a1dd57b0e1fb28112e1cd8b97bc | a7759f410b773a1dd57b0e1fb28112e1cd8b97bc | https://github.com/ImageMagick/ImageMagick/issues/1608 | MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
| MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
| C | ImageMagick | 0 |
CVE-2017-18187 | https://www.cvedetails.com/cve/CVE-2017-18187/ | CWE-190 | https://github.com/ARMmbed/mbedtls/commit/83c9f495ffe70c7dd280b41fdfd4881485a3bc28 | 83c9f495ffe70c7dd280b41fdfd4881485a3bc28 | Prevent bounds check bypass through overflow in PSK identity parsing
The check `if( *p + n > end )` in `ssl_parse_client_psk_identity` is
unsafe because `*p + n` might overflow, thus bypassing the check. As
`n` is a user-specified value up to 65K, this is relevant if the
library happens to be located in the last 65K of virtual memory.
This commit replaces the check by a safe version. | static int ssl_parse_client_hello_v2( mbedtls_ssl_context *ssl )
{
int ret, got_common_suite;
unsigned int i, j;
size_t n;
unsigned int ciph_len, sess_len, chal_len;
unsigned char *buf, *p;
const int *ciphersuites;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client hello v2" ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client hello v2 illegal for renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
buf = ssl->in_hdr;
MBEDTLS_SSL_DEBUG_BUF( 4, "record header", buf, 5 );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message type: %d",
buf[2] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message len.: %d",
( ( buf[0] & 0x7F ) << 8 ) | buf[1] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, max. version: [%d:%d]",
buf[3], buf[4] ) );
/*
* SSLv2 Client Hello
*
* Record layer:
* 0 . 1 message length
*
* SSL layer:
* 2 . 2 message type
* 3 . 4 protocol version
*/
if( buf[2] != MBEDTLS_SSL_HS_CLIENT_HELLO ||
buf[3] != MBEDTLS_SSL_MAJOR_VERSION_3 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
n = ( ( buf[0] << 8 ) | buf[1] ) & 0x7FFF;
if( n < 17 || n > 512 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->major_ver = MBEDTLS_SSL_MAJOR_VERSION_3;
ssl->minor_ver = ( buf[4] <= ssl->conf->max_minor_ver )
? buf[4] : ssl->conf->max_minor_ver;
if( ssl->minor_ver < ssl->conf->min_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client only supports ssl smaller than minimum"
" [%d:%d] < [%d:%d]",
ssl->major_ver, ssl->minor_ver,
ssl->conf->min_major_ver, ssl->conf->min_minor_ver ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_PROTOCOL_VERSION );
}
ssl->handshake->max_major_ver = buf[3];
ssl->handshake->max_minor_ver = buf[4];
if( ( ret = mbedtls_ssl_fetch_input( ssl, 2 + n ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
ssl->handshake->update_checksum( ssl, buf + 2, n );
buf = ssl->in_msg;
n = ssl->in_left - 5;
/*
* 0 . 1 ciphersuitelist length
* 2 . 3 session id length
* 4 . 5 challenge length
* 6 . .. ciphersuitelist
* .. . .. session id
* .. . .. challenge
*/
MBEDTLS_SSL_DEBUG_BUF( 4, "record contents", buf, n );
ciph_len = ( buf[0] << 8 ) | buf[1];
sess_len = ( buf[2] << 8 ) | buf[3];
chal_len = ( buf[4] << 8 ) | buf[5];
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciph_len: %d, sess_len: %d, chal_len: %d",
ciph_len, sess_len, chal_len ) );
/*
* Make sure each parameter length is valid
*/
if( ciph_len < 3 || ( ciph_len % 3 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( sess_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( chal_len < 8 || chal_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( n != 6 + ciph_len + sess_len + chal_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, ciphersuitelist",
buf + 6, ciph_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, session id",
buf + 6 + ciph_len, sess_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, challenge",
buf + 6 + ciph_len + sess_len, chal_len );
p = buf + 6 + ciph_len;
ssl->session_negotiate->id_len = sess_len;
memset( ssl->session_negotiate->id, 0,
sizeof( ssl->session_negotiate->id ) );
memcpy( ssl->session_negotiate->id, p, ssl->session_negotiate->id_len );
p += sess_len;
memset( ssl->handshake->randbytes, 0, 64 );
memcpy( ssl->handshake->randbytes + 32 - chal_len, p, chal_len );
/*
* Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV
*/
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 && p[1] == 0 && p[2] == MBEDTLS_SSL_EMPTY_RENEGOTIATION_INFO )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received TLS_EMPTY_RENEGOTIATION_INFO " ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "received RENEGOTIATION SCSV "
"during renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
break;
}
}
#if defined(MBEDTLS_SSL_FALLBACK_SCSV)
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 &&
p[1] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE >> 8 ) & 0xff ) &&
p[2] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE ) & 0xff ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received FALLBACK_SCSV" ) );
if( ssl->minor_ver < ssl->conf->max_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inapropriate fallback" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INAPROPRIATE_FALLBACK );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
break;
}
}
#endif /* MBEDTLS_SSL_FALLBACK_SCSV */
got_common_suite = 0;
ciphersuites = ssl->conf->ciphersuite_list[ssl->minor_ver];
ciphersuite_info = NULL;
#if defined(MBEDTLS_SSL_SRV_RESPECT_CLIENT_PREFERENCE)
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
for( i = 0; ciphersuites[i] != 0; i++ )
#else
for( i = 0; ciphersuites[i] != 0; i++ )
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
#endif
{
if( p[0] != 0 ||
p[1] != ( ( ciphersuites[i] >> 8 ) & 0xFF ) ||
p[2] != ( ( ciphersuites[i] ) & 0xFF ) )
continue;
got_common_suite = 1;
if( ( ret = ssl_ciphersuite_match( ssl, ciphersuites[i],
&ciphersuite_info ) ) != 0 )
return( ret );
if( ciphersuite_info != NULL )
goto have_ciphersuite_v2;
}
if( got_common_suite )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got ciphersuites in common, "
"but none of them usable" ) );
return( MBEDTLS_ERR_SSL_NO_USABLE_CIPHERSUITE );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no ciphersuites in common" ) );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
have_ciphersuite_v2:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "selected ciphersuite: %s", ciphersuite_info->name ) );
ssl->session_negotiate->ciphersuite = ciphersuites[i];
ssl->transform_negotiate->ciphersuite_info = ciphersuite_info;
/*
* SSLv2 Client Hello relevant renegotiation security checks
*/
if( ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_BREAK_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation, breaking off handshake" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->in_left = 0;
ssl->state++;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client hello v2" ) );
return( 0 );
}
| static int ssl_parse_client_hello_v2( mbedtls_ssl_context *ssl )
{
int ret, got_common_suite;
unsigned int i, j;
size_t n;
unsigned int ciph_len, sess_len, chal_len;
unsigned char *buf, *p;
const int *ciphersuites;
const mbedtls_ssl_ciphersuite_t *ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> parse client hello v2" ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status != MBEDTLS_SSL_INITIAL_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client hello v2 illegal for renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
buf = ssl->in_hdr;
MBEDTLS_SSL_DEBUG_BUF( 4, "record header", buf, 5 );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message type: %d",
buf[2] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, message len.: %d",
( ( buf[0] & 0x7F ) << 8 ) | buf[1] ) );
MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v2, max. version: [%d:%d]",
buf[3], buf[4] ) );
/*
* SSLv2 Client Hello
*
* Record layer:
* 0 . 1 message length
*
* SSL layer:
* 2 . 2 message type
* 3 . 4 protocol version
*/
if( buf[2] != MBEDTLS_SSL_HS_CLIENT_HELLO ||
buf[3] != MBEDTLS_SSL_MAJOR_VERSION_3 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
n = ( ( buf[0] << 8 ) | buf[1] ) & 0x7FFF;
if( n < 17 || n > 512 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->major_ver = MBEDTLS_SSL_MAJOR_VERSION_3;
ssl->minor_ver = ( buf[4] <= ssl->conf->max_minor_ver )
? buf[4] : ssl->conf->max_minor_ver;
if( ssl->minor_ver < ssl->conf->min_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "client only supports ssl smaller than minimum"
" [%d:%d] < [%d:%d]",
ssl->major_ver, ssl->minor_ver,
ssl->conf->min_major_ver, ssl->conf->min_minor_ver ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_PROTOCOL_VERSION );
return( MBEDTLS_ERR_SSL_BAD_HS_PROTOCOL_VERSION );
}
ssl->handshake->max_major_ver = buf[3];
ssl->handshake->max_minor_ver = buf[4];
if( ( ret = mbedtls_ssl_fetch_input( ssl, 2 + n ) ) != 0 )
{
MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_fetch_input", ret );
return( ret );
}
ssl->handshake->update_checksum( ssl, buf + 2, n );
buf = ssl->in_msg;
n = ssl->in_left - 5;
/*
* 0 . 1 ciphersuitelist length
* 2 . 3 session id length
* 4 . 5 challenge length
* 6 . .. ciphersuitelist
* .. . .. session id
* .. . .. challenge
*/
MBEDTLS_SSL_DEBUG_BUF( 4, "record contents", buf, n );
ciph_len = ( buf[0] << 8 ) | buf[1];
sess_len = ( buf[2] << 8 ) | buf[3];
chal_len = ( buf[4] << 8 ) | buf[5];
MBEDTLS_SSL_DEBUG_MSG( 3, ( "ciph_len: %d, sess_len: %d, chal_len: %d",
ciph_len, sess_len, chal_len ) );
/*
* Make sure each parameter length is valid
*/
if( ciph_len < 3 || ( ciph_len % 3 ) != 0 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( sess_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( chal_len < 8 || chal_len > 32 )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
if( n != 6 + ciph_len + sess_len + chal_len )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, ciphersuitelist",
buf + 6, ciph_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, session id",
buf + 6 + ciph_len, sess_len );
MBEDTLS_SSL_DEBUG_BUF( 3, "client hello, challenge",
buf + 6 + ciph_len + sess_len, chal_len );
p = buf + 6 + ciph_len;
ssl->session_negotiate->id_len = sess_len;
memset( ssl->session_negotiate->id, 0,
sizeof( ssl->session_negotiate->id ) );
memcpy( ssl->session_negotiate->id, p, ssl->session_negotiate->id_len );
p += sess_len;
memset( ssl->handshake->randbytes, 0, 64 );
memcpy( ssl->handshake->randbytes + 32 - chal_len, p, chal_len );
/*
* Check for TLS_EMPTY_RENEGOTIATION_INFO_SCSV
*/
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 && p[1] == 0 && p[2] == MBEDTLS_SSL_EMPTY_RENEGOTIATION_INFO )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received TLS_EMPTY_RENEGOTIATION_INFO " ) );
#if defined(MBEDTLS_SSL_RENEGOTIATION)
if( ssl->renego_status == MBEDTLS_SSL_RENEGOTIATION_IN_PROGRESS )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "received RENEGOTIATION SCSV "
"during renegotiation" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
#endif /* MBEDTLS_SSL_RENEGOTIATION */
ssl->secure_renegotiation = MBEDTLS_SSL_SECURE_RENEGOTIATION;
break;
}
}
#if defined(MBEDTLS_SSL_FALLBACK_SCSV)
for( i = 0, p = buf + 6; i < ciph_len; i += 3, p += 3 )
{
if( p[0] == 0 &&
p[1] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE >> 8 ) & 0xff ) &&
p[2] == (unsigned char)( ( MBEDTLS_SSL_FALLBACK_SCSV_VALUE ) & 0xff ) )
{
MBEDTLS_SSL_DEBUG_MSG( 3, ( "received FALLBACK_SCSV" ) );
if( ssl->minor_ver < ssl->conf->max_minor_ver )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "inapropriate fallback" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_INAPROPRIATE_FALLBACK );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
break;
}
}
#endif /* MBEDTLS_SSL_FALLBACK_SCSV */
got_common_suite = 0;
ciphersuites = ssl->conf->ciphersuite_list[ssl->minor_ver];
ciphersuite_info = NULL;
#if defined(MBEDTLS_SSL_SRV_RESPECT_CLIENT_PREFERENCE)
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
for( i = 0; ciphersuites[i] != 0; i++ )
#else
for( i = 0; ciphersuites[i] != 0; i++ )
for( j = 0, p = buf + 6; j < ciph_len; j += 3, p += 3 )
#endif
{
if( p[0] != 0 ||
p[1] != ( ( ciphersuites[i] >> 8 ) & 0xFF ) ||
p[2] != ( ( ciphersuites[i] ) & 0xFF ) )
continue;
got_common_suite = 1;
if( ( ret = ssl_ciphersuite_match( ssl, ciphersuites[i],
&ciphersuite_info ) ) != 0 )
return( ret );
if( ciphersuite_info != NULL )
goto have_ciphersuite_v2;
}
if( got_common_suite )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got ciphersuites in common, "
"but none of them usable" ) );
return( MBEDTLS_ERR_SSL_NO_USABLE_CIPHERSUITE );
}
else
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "got no ciphersuites in common" ) );
return( MBEDTLS_ERR_SSL_NO_CIPHER_CHOSEN );
}
have_ciphersuite_v2:
MBEDTLS_SSL_DEBUG_MSG( 2, ( "selected ciphersuite: %s", ciphersuite_info->name ) );
ssl->session_negotiate->ciphersuite = ciphersuites[i];
ssl->transform_negotiate->ciphersuite_info = ciphersuite_info;
/*
* SSLv2 Client Hello relevant renegotiation security checks
*/
if( ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION &&
ssl->conf->allow_legacy_renegotiation == MBEDTLS_SSL_LEGACY_BREAK_HANDSHAKE )
{
MBEDTLS_SSL_DEBUG_MSG( 1, ( "legacy renegotiation, breaking off handshake" ) );
mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL,
MBEDTLS_SSL_ALERT_MSG_HANDSHAKE_FAILURE );
return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO );
}
ssl->in_left = 0;
ssl->state++;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "<= parse client hello v2" ) );
return( 0 );
}
| C | mbedtls | 0 |
CVE-2016-10168 | https://www.cvedetails.com/cve/CVE-2016-10168/ | CWE-190 | https://github.com/libgd/libgd/commit/69d2fd2c597ffc0c217de1238b9bf4d4bceba8e6 | 69d2fd2c597ffc0c217de1238b9bf4d4bceba8e6 | Fix #354: Signed Integer Overflow gd_io.c
GD2 stores the number of horizontal and vertical chunks as words (i.e. 2
byte unsigned). These values are multiplied and assigned to an int when
reading the image, what can cause integer overflows. We have to avoid
that, and also make sure that either chunk count is actually greater
than zero. If illegal chunk counts are detected, we bail out from
reading the image. | BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size)
{
_noLibzError();
return NULL;
}
| BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size)
{
_noLibzError();
return NULL;
}
| C | libgd | 0 |
CVE-2017-8065 | https://www.cvedetails.com/cve/CVE-2017-8065/ | CWE-119 | https://github.com/torvalds/linux/commit/3b30460c5b0ed762be75a004e924ec3f8711e032 | 3b30460c5b0ed762be75a004e924ec3f8711e032 | crypto: ccm - move cbcmac input off the stack
Commit f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")
refactored the CCM driver to allow separate implementations of the
underlying MAC to be provided by a platform. However, in doing so, it
moved some data from the linear region to the stack, which violates the
SG constraints when the stack is virtually mapped.
So move idata/odata back to the request ctx struct, of which we can
reasonably expect that it has been allocated using kmalloc() et al.
Reported-by: Johannes Berg <johannes@sipsolutions.net>
Fixes: f15f05b0a5de ("crypto: ccm - switch to separate cbcmac driver")
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> | static int crypto_ccm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct skcipher_request *skreq = &pctx->skreq;
struct scatterlist *dst;
unsigned int cryptlen = req->cryptlen;
u8 *odata = pctx->odata;
u8 *iv = req->iv;
int err;
err = crypto_ccm_init_crypt(req, odata);
if (err)
return err;
err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
if (err)
return err;
dst = pctx->src;
if (req->src != req->dst)
dst = pctx->dst;
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_encrypt_done, req);
skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
/* copy authtag to end of dst */
scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
crypto_aead_authsize(aead), 1);
return err;
}
| static int crypto_ccm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct skcipher_request *skreq = &pctx->skreq;
struct scatterlist *dst;
unsigned int cryptlen = req->cryptlen;
u8 *odata = pctx->odata;
u8 *iv = req->iv;
int err;
err = crypto_ccm_init_crypt(req, odata);
if (err)
return err;
err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
if (err)
return err;
dst = pctx->src;
if (req->src != req->dst)
dst = pctx->dst;
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_encrypt_done, req);
skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
/* copy authtag to end of dst */
scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
crypto_aead_authsize(aead), 1);
return err;
}
| C | linux | 0 |
CVE-2011-2918 | https://www.cvedetails.com/cve/CVE-2011-2918/ | CWE-399 | https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | a8b0ca17b80e92faab46ee7179ba9e99ccb61233 | perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.
For the various event classes:
- hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
the PMI-tail (ARM etc.)
- tracepoint: nmi=0; since tracepoint could be from NMI context.
- software: nmi=[0,1]; some, like the schedule thing cannot
perform wakeups, and hence need 0.
As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).
The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu> | static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
data->period = event->hw.last_period;
if (!overflow)
overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
if (__perf_event_overflow(event, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
throttle = 1;
}
}
| static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
int nmi, struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
data->period = event->hw.last_period;
if (!overflow)
overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
if (__perf_event_overflow(event, nmi, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
throttle = 1;
}
}
| C | linux | 1 |
CVE-2013-6627 | https://www.cvedetails.com/cve/CVE-2013-6627/ | CWE-119 | https://github.com/chromium/chromium/commit/d6805d0d1d21976cf16d0237d9091f7eebea4ea5 | d6805d0d1d21976cf16d0237d9091f7eebea4ea5 | Content Shell: Move shell_layout_tests_android into layout_tests/.
BUG=420994
Review URL: https://codereview.chromium.org/661743002
Cr-Commit-Position: refs/heads/master@{#299892} | bool RunOneTest(const std::string& test_string,
bool* ran_at_least_once,
const scoped_ptr<content::BrowserMainRunner>& main_runner) {
if (test_string.empty())
return true;
if (test_string == "QUIT")
return false;
bool enable_pixel_dumps;
std::string pixel_hash;
base::FilePath cwd;
GURL test_url =
GetURLForLayoutTest(test_string, &cwd, &enable_pixel_dumps, &pixel_hash);
if (!content::WebKitTestController::Get()->PrepareForLayoutTest(
test_url, cwd, enable_pixel_dumps, pixel_hash)) {
return false;
}
*ran_at_least_once = true;
#if defined(OS_ANDROID)
base::RunLoop run_loop;
run_loop.Run();
#else
main_runner->Run();
#endif
if (!content::WebKitTestController::Get()->ResetAfterLayoutTest())
return false;
#if defined(OS_ANDROID)
base::MessageLoop::current()->RunUntilIdle();
#endif
return true;
}
| bool RunOneTest(const std::string& test_string,
bool* ran_at_least_once,
const scoped_ptr<content::BrowserMainRunner>& main_runner) {
if (test_string.empty())
return true;
if (test_string == "QUIT")
return false;
bool enable_pixel_dumps;
std::string pixel_hash;
base::FilePath cwd;
GURL test_url =
GetURLForLayoutTest(test_string, &cwd, &enable_pixel_dumps, &pixel_hash);
if (!content::WebKitTestController::Get()->PrepareForLayoutTest(
test_url, cwd, enable_pixel_dumps, pixel_hash)) {
return false;
}
*ran_at_least_once = true;
#if defined(OS_ANDROID)
base::RunLoop run_loop;
run_loop.Run();
#else
main_runner->Run();
#endif
if (!content::WebKitTestController::Get()->ResetAfterLayoutTest())
return false;
#if defined(OS_ANDROID)
base::MessageLoop::current()->RunUntilIdle();
#endif
return true;
}
| C | Chrome | 0 |
CVE-2015-6768 | https://www.cvedetails.com/cve/CVE-2015-6768/ | CWE-264 | https://github.com/chromium/chromium/commit/4c8b008f055f79e622344627fed7f820375a4f01 | 4c8b008f055f79e622344627fed7f820375a4f01 | Change Document::detach() to RELEASE_ASSERT all subframes are gone.
BUG=556724,577105
Review URL: https://codereview.chromium.org/1667573002
Cr-Commit-Position: refs/heads/master@{#373642} | NodeIntersectionObserverData& Document::ensureIntersectionObserverData()
{
if (!m_intersectionObserverData)
m_intersectionObserverData = new NodeIntersectionObserverData();
return *m_intersectionObserverData;
}
| NodeIntersectionObserverData& Document::ensureIntersectionObserverData()
{
if (!m_intersectionObserverData)
m_intersectionObserverData = new NodeIntersectionObserverData();
return *m_intersectionObserverData;
}
| C | Chrome | 0 |
CVE-2017-16529 | https://www.cvedetails.com/cve/CVE-2017-16529/ | CWE-125 | https://github.com/torvalds/linux/commit/bfc81a8bc18e3c4ba0cbaa7666ff76be2f998991 | bfc81a8bc18e3c4ba0cbaa7666ff76be2f998991 | ALSA: usb-audio: Check out-of-bounds access by corrupted buffer descriptor
When a USB-audio device receives a maliciously adjusted or corrupted
buffer descriptor, the USB-audio driver may access an out-of-bounce
value at its parser. This was detected by syzkaller, something like:
BUG: KASAN: slab-out-of-bounds in usb_audio_probe+0x27b2/0x2ab0
Read of size 1 at addr ffff88006b83a9e8 by task kworker/0:1/24
CPU: 0 PID: 24 Comm: kworker/0:1 Not tainted 4.14.0-rc1-42251-gebb2c2437d80 #224
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
Workqueue: usb_hub_wq hub_event
Call Trace:
__dump_stack lib/dump_stack.c:16
dump_stack+0x292/0x395 lib/dump_stack.c:52
print_address_description+0x78/0x280 mm/kasan/report.c:252
kasan_report_error mm/kasan/report.c:351
kasan_report+0x22f/0x340 mm/kasan/report.c:409
__asan_report_load1_noabort+0x19/0x20 mm/kasan/report.c:427
snd_usb_create_streams sound/usb/card.c:248
usb_audio_probe+0x27b2/0x2ab0 sound/usb/card.c:605
usb_probe_interface+0x35d/0x8e0 drivers/usb/core/driver.c:361
really_probe drivers/base/dd.c:413
driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
__device_attach_driver+0x230/0x290 drivers/base/dd.c:653
bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
__device_attach+0x26e/0x3d0 drivers/base/dd.c:710
device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
device_add+0xd0b/0x1660 drivers/base/core.c:1835
usb_set_configuration+0x104e/0x1870 drivers/usb/core/message.c:1932
generic_probe+0x73/0xe0 drivers/usb/core/generic.c:174
usb_probe_device+0xaf/0xe0 drivers/usb/core/driver.c:266
really_probe drivers/base/dd.c:413
driver_probe_device+0x610/0xa00 drivers/base/dd.c:557
__device_attach_driver+0x230/0x290 drivers/base/dd.c:653
bus_for_each_drv+0x161/0x210 drivers/base/bus.c:463
__device_attach+0x26e/0x3d0 drivers/base/dd.c:710
device_initial_probe+0x1f/0x30 drivers/base/dd.c:757
bus_probe_device+0x1eb/0x290 drivers/base/bus.c:523
device_add+0xd0b/0x1660 drivers/base/core.c:1835
usb_new_device+0x7b8/0x1020 drivers/usb/core/hub.c:2457
hub_port_connect drivers/usb/core/hub.c:4903
hub_port_connect_change drivers/usb/core/hub.c:5009
port_event drivers/usb/core/hub.c:5115
hub_event+0x194d/0x3740 drivers/usb/core/hub.c:5195
process_one_work+0xc7f/0x1db0 kernel/workqueue.c:2119
worker_thread+0x221/0x1850 kernel/workqueue.c:2253
kthread+0x3a1/0x470 kernel/kthread.c:231
ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:431
This patch adds the checks of out-of-bounce accesses at appropriate
places and bails out when it goes out of the given buffer.
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | get_alias_quirk(struct usb_device *dev, unsigned int id)
{
const struct usb_device_id *p;
for (p = usb_audio_ids; p->match_flags; p++) {
/* FIXME: this checks only vendor:product pair in the list */
if ((p->match_flags & USB_DEVICE_ID_MATCH_DEVICE) ==
USB_DEVICE_ID_MATCH_DEVICE &&
p->idVendor == USB_ID_VENDOR(id) &&
p->idProduct == USB_ID_PRODUCT(id))
return (const struct snd_usb_audio_quirk *)p->driver_info;
}
return NULL;
}
| get_alias_quirk(struct usb_device *dev, unsigned int id)
{
const struct usb_device_id *p;
for (p = usb_audio_ids; p->match_flags; p++) {
/* FIXME: this checks only vendor:product pair in the list */
if ((p->match_flags & USB_DEVICE_ID_MATCH_DEVICE) ==
USB_DEVICE_ID_MATCH_DEVICE &&
p->idVendor == USB_ID_VENDOR(id) &&
p->idProduct == USB_ID_PRODUCT(id))
return (const struct snd_usb_audio_quirk *)p->driver_info;
}
return NULL;
}
| C | linux | 0 |
CVE-2016-2429 | https://www.cvedetails.com/cve/CVE-2016-2429/ | CWE-119 | https://android.googlesource.com/platform/external/flac/+/b499389da21d89d32deff500376c5ee4f8f0b04c | b499389da21d89d32deff500376c5ee4f8f0b04c | Avoid free-before-initialize vulnerability in heap
Bug: 27211885
Change-Id: Ib9c93bd9ffdde2a5f8d31a86f06e267dc9c152db
| FLAC_API FLAC__bool FLAC__stream_decoder_set_metadata_respond(FLAC__StreamDecoder *decoder, FLAC__MetadataType type)
{
FLAC__ASSERT(0 != decoder);
FLAC__ASSERT(0 != decoder->private_);
FLAC__ASSERT(0 != decoder->protected_);
FLAC__ASSERT((unsigned)type <= FLAC__MAX_METADATA_TYPE_CODE);
/* double protection */
if((unsigned)type > FLAC__MAX_METADATA_TYPE_CODE)
return false;
if(decoder->protected_->state != FLAC__STREAM_DECODER_UNINITIALIZED)
return false;
decoder->private_->metadata_filter[type] = true;
if(type == FLAC__METADATA_TYPE_APPLICATION)
decoder->private_->metadata_filter_ids_count = 0;
return true;
}
| FLAC_API FLAC__bool FLAC__stream_decoder_set_metadata_respond(FLAC__StreamDecoder *decoder, FLAC__MetadataType type)
{
FLAC__ASSERT(0 != decoder);
FLAC__ASSERT(0 != decoder->private_);
FLAC__ASSERT(0 != decoder->protected_);
FLAC__ASSERT((unsigned)type <= FLAC__MAX_METADATA_TYPE_CODE);
/* double protection */
if((unsigned)type > FLAC__MAX_METADATA_TYPE_CODE)
return false;
if(decoder->protected_->state != FLAC__STREAM_DECODER_UNINITIALIZED)
return false;
decoder->private_->metadata_filter[type] = true;
if(type == FLAC__METADATA_TYPE_APPLICATION)
decoder->private_->metadata_filter_ids_count = 0;
return true;
}
| C | Android | 0 |
CVE-2018-0500 | https://www.cvedetails.com/cve/CVE-2018-0500/ | CWE-119 | https://github.com/curl/curl/commit/ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628 | ba1dbd78e5f1ed67c1b8d37ac89d90e5e330b628 | smtp: use the upload buffer size for scratch buffer malloc
... not the read buffer size, as that can be set smaller and thus cause
a buffer overflow! CVE-2018-0500
Reported-by: Peter Wu
Bug: https://curl.haxx.se/docs/adv_2018-70a2.html | static CURLcode smtp_perform_starttls(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
/* Send the STARTTLS command */
result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "STARTTLS");
if(!result)
state(conn, SMTP_STARTTLS);
return result;
}
| static CURLcode smtp_perform_starttls(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
/* Send the STARTTLS command */
result = Curl_pp_sendf(&conn->proto.smtpc.pp, "%s", "STARTTLS");
if(!result)
state(conn, SMTP_STARTTLS);
return result;
}
| C | curl | 0 |
CVE-2017-7616 | https://www.cvedetails.com/cve/CVE-2017-7616/ | CWE-388 | https://github.com/torvalds/linux/commit/cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 | cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 | mm/mempolicy.c: fix error handling in set_mempolicy and mbind.
In the case that compat_get_bitmap fails we do not want to copy the
bitmap to the user as it will contain uninitialized stack data and leak
sensitive data.
Signed-off-by: Chris Salls <salls@cs.ucsb.edu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, int node, bool hugepage)
{
struct mempolicy *pol;
struct page *page;
unsigned int cpuset_mems_cookie;
struct zonelist *zl;
nodemask_t *nmask;
retry_cpuset:
pol = get_vma_policy(vma, addr);
cpuset_mems_cookie = read_mems_allowed_begin();
if (pol->mode == MPOL_INTERLEAVE) {
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
goto out;
}
if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
int hpage_node = node;
/*
* For hugepage allocation and non-interleave policy which
* allows the current node (or other explicitly preferred
* node) we only try to allocate from the current/preferred
* node and don't fall back to other nodes, as the cost of
* remote accesses would likely offset THP benefits.
*
* If the policy is interleave, or does not allow the current
* node in its nodemask, we allocate the standard way.
*/
if (pol->mode == MPOL_PREFERRED &&
!(pol->flags & MPOL_F_LOCAL))
hpage_node = pol->v.preferred_node;
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
page = __alloc_pages_node(hpage_node,
gfp | __GFP_THISNODE, order);
goto out;
}
}
nmask = policy_nodemask(gfp, pol);
zl = policy_zonelist(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, zl, nmask);
mpol_cond_put(pol);
out:
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
}
| alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, int node, bool hugepage)
{
struct mempolicy *pol;
struct page *page;
unsigned int cpuset_mems_cookie;
struct zonelist *zl;
nodemask_t *nmask;
retry_cpuset:
pol = get_vma_policy(vma, addr);
cpuset_mems_cookie = read_mems_allowed_begin();
if (pol->mode == MPOL_INTERLEAVE) {
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
goto out;
}
if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
int hpage_node = node;
/*
* For hugepage allocation and non-interleave policy which
* allows the current node (or other explicitly preferred
* node) we only try to allocate from the current/preferred
* node and don't fall back to other nodes, as the cost of
* remote accesses would likely offset THP benefits.
*
* If the policy is interleave, or does not allow the current
* node in its nodemask, we allocate the standard way.
*/
if (pol->mode == MPOL_PREFERRED &&
!(pol->flags & MPOL_F_LOCAL))
hpage_node = pol->v.preferred_node;
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
page = __alloc_pages_node(hpage_node,
gfp | __GFP_THISNODE, order);
goto out;
}
}
nmask = policy_nodemask(gfp, pol);
zl = policy_zonelist(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, zl, nmask);
mpol_cond_put(pol);
out:
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
return page;
}
| C | linux | 0 |
CVE-2019-5826 | null | null | https://github.com/chromium/chromium/commit/eaf2e8bce3855d362e53034bd83f0e3aff8714e4 | eaf2e8bce3855d362e53034bd83f0e3aff8714e4 | [IndexedDB] Fixed force close during pending connection open
During a force close of the database, the connections to that database
are iterated and force closed. The iteration method was not safe to
modification, and if there was a pending connection waiting to open,
that request would execute once all the other connections were
destroyed and create a new connection.
This change changes the iteration method to account for new connections
that are added during the iteration.
R=cmp@chromium.org
Bug: 941746
Change-Id: If1b3137237dc2920ad369d6ac99c963ed9c57d0c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1522330
Commit-Queue: Daniel Murphy <dmurph@chromium.org>
Reviewed-by: Chase Phillips <cmp@chromium.org>
Cr-Commit-Position: refs/heads/master@{#640604} | void IndexedDBDatabase::TransactionFinished(
blink::mojom::IDBTransactionMode mode,
bool committed) {
--transaction_count_;
DCHECK_GE(transaction_count_, 0);
if (active_request_ &&
mode == blink::mojom::IDBTransactionMode::VersionChange) {
active_request_->UpgradeTransactionFinished(committed);
}
}
| void IndexedDBDatabase::TransactionFinished(
blink::mojom::IDBTransactionMode mode,
bool committed) {
--transaction_count_;
DCHECK_GE(transaction_count_, 0);
if (active_request_ &&
mode == blink::mojom::IDBTransactionMode::VersionChange) {
active_request_->UpgradeTransactionFinished(committed);
}
}
| C | Chrome | 0 |
CVE-2016-10066 | https://www.cvedetails.com/cve/CVE-2016-10066/ | CWE-119 | https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6 | f6e9d0d9955e85bdd7540b251cd50d598dacc5e6 | null | static Image *ReadMIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define BZipMaxExtent(x) ((x)+((x)/100)+600)
#define LZMAMaxExtent(x) ((x)+((x)/3)+128)
#define ZipMaxExtent(x) ((x)+(((x)+7) >> 3)+(((x)+63) >> 6)+11)
#if defined(MAGICKCORE_BZLIB_DELEGATE)
bz_stream
bzip_info;
#endif
char
id[MaxTextExtent],
keyword[MaxTextExtent],
*options;
const unsigned char
*p;
double
version;
GeometryInfo
geometry_info;
Image
*image;
IndexPacket
index;
int
c;
LinkedListInfo
*profiles;
#if defined(MAGICKCORE_LZMA_DELEGATE)
lzma_stream
initialize_lzma = LZMA_STREAM_INIT,
lzma_info;
lzma_allocator
allocator;
#endif
MagickBooleanType
status;
MagickStatusType
flags;
PixelPacket
pixel;
QuantumFormatType
quantum_format;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
length,
packet_size;
ssize_t
count;
unsigned char
*compress_pixels,
*pixels;
size_t
colors;
ssize_t
y;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
z_stream
zip_info;
#endif
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Decode image header; header terminates one character beyond a ':'.
*/
c=ReadBlobByte(image);
if (c == EOF)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
*id='\0';
(void) ResetMagickMemory(keyword,0,sizeof(keyword));
version=0.0;
(void) version;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
length=MaxTextExtent;
options=AcquireString((char *) NULL);
quantum_format=UndefinedQuantumFormat;
profiles=(LinkedListInfo *) NULL;
colors=0;
image->depth=8UL;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MaxTextExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MaxTextExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while ((isspace((int) ((unsigned char) c)) != 0) && (c != EOF))
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MaxTextExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
if (options == (char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorDatabase(options,&image->background_color,
exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorDatabase(options,&image->border_color,
exception);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"gravity") == 0)
{
ssize_t
gravity;
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
options);
if (gravity < 0)
break;
image->gravity=(GravityType) gravity;
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MaxTextExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"matte") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"matte-color") == 0)
{
(void) QueryColorDatabase(options,&image->matte_color,
exception);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"opaque") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if ((LocaleNCompare(keyword,"profile:",8) == 0) ||
(LocaleNCompare(keyword,"profile-",8) == 0))
{
StringInfo
*profile;
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(keyword+8));
profile=BlobToStringInfo((const void *) NULL,(size_t)
StringToLong(options));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
(void) SetImageProfile(image,keyword+8,profile);
profile=DestroyStringInfo(profile);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
if ((LocaleCompare(keyword,"quantum-format") == 0) ||
(LocaleCompare(keyword,"quantum:format") == 0))
{
ssize_t
format;
format=ParseCommandOption(MagickQuantumFormatOptions,
MagickFalse,options);
if (format < 0)
break;
quantum_format=(QuantumFormatType) format;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=
image->chromaticity.red_primary.x;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,
MagickFalse,options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'v':
case 'V':
{
if (LocaleCompare(keyword,"version") == 0)
{
version=StringToDouble(options,(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"ImageMagick") != 0) ||
(image->storage_class == UndefinedClass) ||
(image->columns == 0) || (image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MaxTextExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MaxTextExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MaxTextExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
const StringInfo
*profile;
/*
Read image profiles.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
register unsigned char
*p;
p=GetStringInfoDatum(profile);
count=ReadBlob(image,GetStringInfoLength(profile),p);
(void) count;
}
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
image->depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
/*
Create image colormap.
*/
status=AcquireImageColormap(image,colors != 0 ? colors : 256);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (colors != 0)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Read image colormap from file.
*/
packet_size=(size_t) (3UL*image->depth/8UL);
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,packet_size*image->colors,colormap);
p=colormap;
switch (image->depth)
{
default:
ThrowReaderException(CorruptImageError,
"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/*
Allocate image pixels.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (quantum_format != UndefinedQuantumFormat)
{
status=SetQuantumFormat(image,quantum_info,quantum_format);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
packet_size=(size_t) (quantum_info->depth/8);
if (image->storage_class == DirectClass)
packet_size=(size_t) (3*quantum_info->depth/8);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
packet_size=quantum_info->depth/8;
if (image->matte != MagickFalse)
packet_size+=quantum_info->depth/8;
if (image->colorspace == CMYKColorspace)
packet_size+=quantum_info->depth/8;
if (image->compression == RLECompression)
packet_size++;
length=image->columns;
length=MagickMax(MagickMax(BZipMaxExtent(packet_size*image->columns),
LZMAMaxExtent(packet_size*image->columns)),ZipMaxExtent(packet_size*
image->columns));
compress_pixels=(unsigned char *) AcquireQuantumMemory(length,
sizeof(*compress_pixels));
if (compress_pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Read image pixels.
*/
quantum_type=RGBQuantum;
if (image->matte != MagickFalse)
quantum_type=RGBAQuantum;
if (image->colorspace == CMYKColorspace)
{
quantum_type=CMYKQuantum;
if (image->matte != MagickFalse)
quantum_type=CMYKAQuantum;
}
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
quantum_type=GrayQuantum;
if (image->matte != MagickFalse)
quantum_type=GrayAlphaQuantum;
}
if (image->storage_class == PseudoClass)
{
quantum_type=IndexQuantum;
if (image->matte != MagickFalse)
quantum_type=IndexAlphaQuantum;
}
status=MagickTrue;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
#if defined(MAGICKCORE_BZLIB_DELEGATE)
(void) ResetMagickMemory(&bzip_info,0,sizeof(bzip_info));
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
(void) ResetMagickMemory(&allocator,0,sizeof(allocator));
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
(void) ResetMagickMemory(&zip_info,0,sizeof(zip_info));
#endif
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
bzip_info.bzalloc=AcquireBZIPMemory;
bzip_info.bzfree=RelinquishBZIPMemory;
bzip_info.opaque=(void *) NULL;
code=BZ2_bzDecompressInit(&bzip_info,(int) image_info->verbose,
MagickFalse);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
allocator.alloc=AcquireLZMAMemory;
allocator.free=RelinquishLZMAMemory;
lzma_info=initialize_lzma;
lzma_info.allocator=(&allocator);
(void) ResetMagickMemory(&allocator,0,sizeof(allocator));
allocator.alloc=AcquireLZMAMemory;
allocator.free=RelinquishLZMAMemory;
lzma_info=initialize_lzma;
lzma_info.allocator=(&allocator);
code=lzma_auto_decoder(&lzma_info,-1,0);
if (code != LZMA_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
zip_info.zalloc=AcquireZIPMemory;
zip_info.zfree=RelinquishZIPMemory;
zip_info.opaque=(voidpf) NULL;
code=inflateInit(&zip_info);
if (code != Z_OK)
status=MagickFalse;
break;
}
#endif
case RLECompression:
{
pixel.opacity=(Quantum) TransparentOpacity;
index=(IndexPacket) 0;
break;
}
default:
break;
}
pixels=GetQuantumPixels(quantum_info);
index=(IndexPacket) 0;
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
bzip_info.next_out=(char *) pixels;
bzip_info.avail_out=(unsigned int) (packet_size*image->columns);
do
{
if (bzip_info.avail_in == 0)
{
bzip_info.next_in=(char *) compress_pixels;
length=(size_t) BZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
bzip_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) bzip_info.next_in);
}
if (BZ2_bzDecompress(&bzip_info) == BZ_STREAM_END)
break;
} while (bzip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
lzma_info.next_out=pixels;
lzma_info.avail_out=packet_size*image->columns;
do
{
int
code;
if (lzma_info.avail_in == 0)
{
lzma_info.next_in=compress_pixels;
length=(size_t) ReadBlobMSBLong(image);
lzma_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) lzma_info.next_in);
}
code=lzma_code(&lzma_info,LZMA_RUN);
if (code < 0)
{
status=MagickFalse;
break;
}
if (code == LZMA_STREAM_END)
break;
} while (lzma_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
zip_info.next_out=pixels;
zip_info.avail_out=(uInt) (packet_size*image->columns);
do
{
if (zip_info.avail_in == 0)
{
zip_info.next_in=compress_pixels;
length=(size_t) ZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
zip_info.avail_in=(unsigned int) ReadBlob(image,length,
zip_info.next_in);
}
if (inflate(&zip_info,Z_SYNC_FLUSH) == Z_STREAM_END)
break;
} while (zip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
case RLECompression:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (length == 0)
{
count=ReadBlob(image,packet_size,pixels);
PushRunlengthPacket(image,pixels,&length,&pixel,&index);
}
length--;
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,index);
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
SetPixelOpacity(q,pixel.opacity);
q++;
}
break;
}
default:
{
count=ReadBlob(image,packet_size*image->columns,pixels);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
SetQuantumImageType(image,quantum_type);
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType) bzip_info.avail_in),
SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
code=BZ2_bzDecompressEnd(&bzip_info);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
code=lzma_code(&lzma_info,LZMA_FINISH);
if ((code != LZMA_STREAM_END) && (code != LZMA_OK))
status=MagickFalse;
lzma_end(&lzma_info);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType) zip_info.avail_in),
SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
code=inflateEnd(&zip_info);
if (code != LZMA_OK)
status=MagickFalse;
break;
}
#endif
default:
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
compress_pixels=(unsigned char *) RelinquishMagickMemory(compress_pixels);
if (((y != (ssize_t) image->rows)) || (status == MagickFalse))
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| static Image *ReadMIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define BZipMaxExtent(x) ((x)+((x)/100)+600)
#define LZMAMaxExtent(x) ((x)+((x)/3)+128)
#define ZipMaxExtent(x) ((x)+(((x)+7) >> 3)+(((x)+63) >> 6)+11)
#if defined(MAGICKCORE_BZLIB_DELEGATE)
bz_stream
bzip_info;
#endif
char
id[MaxTextExtent],
keyword[MaxTextExtent],
*options;
const unsigned char
*p;
double
version;
GeometryInfo
geometry_info;
Image
*image;
IndexPacket
index;
int
c;
LinkedListInfo
*profiles;
#if defined(MAGICKCORE_LZMA_DELEGATE)
lzma_stream
initialize_lzma = LZMA_STREAM_INIT,
lzma_info;
lzma_allocator
allocator;
#endif
MagickBooleanType
status;
MagickStatusType
flags;
PixelPacket
pixel;
QuantumFormatType
quantum_format;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
register ssize_t
i;
size_t
length,
packet_size;
ssize_t
count;
unsigned char
*compress_pixels,
*pixels;
size_t
colors;
ssize_t
y;
#if defined(MAGICKCORE_ZLIB_DELEGATE)
z_stream
zip_info;
#endif
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Decode image header; header terminates one character beyond a ':'.
*/
c=ReadBlobByte(image);
if (c == EOF)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
*id='\0';
(void) ResetMagickMemory(keyword,0,sizeof(keyword));
version=0.0;
(void) version;
do
{
/*
Decode image header; header terminates one character beyond a ':'.
*/
length=MaxTextExtent;
options=AcquireString((char *) NULL);
quantum_format=UndefinedQuantumFormat;
profiles=(LinkedListInfo *) NULL;
colors=0;
image->depth=8UL;
image->compression=NoCompression;
while ((isgraph(c) != MagickFalse) && (c != (int) ':'))
{
register char
*p;
if (c == (int) '{')
{
char
*comment;
/*
Read comment-- any text between { }.
*/
length=MaxTextExtent;
comment=AcquireString((char *) NULL);
for (p=comment; comment != (char *) NULL; p++)
{
c=ReadBlobByte(image);
if (c == (int) '\\')
c=ReadBlobByte(image);
else
if ((c == EOF) || (c == (int) '}'))
break;
if ((size_t) (p-comment+1) >= length)
{
*p='\0';
length<<=1;
comment=(char *) ResizeQuantumMemory(comment,length+
MaxTextExtent,sizeof(*comment));
if (comment == (char *) NULL)
break;
p=comment+strlen(comment);
}
*p=(char) c;
}
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
*p='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
c=ReadBlobByte(image);
}
else
if (isalnum(c) != MagickFalse)
{
/*
Get the keyword.
*/
p=keyword;
do
{
if (c == (int) '=')
break;
if ((size_t) (p-keyword) < (MaxTextExtent-1))
*p++=(char) c;
c=ReadBlobByte(image);
} while (c != EOF);
*p='\0';
p=options;
while ((isspace((int) ((unsigned char) c)) != 0) && (c != EOF))
c=ReadBlobByte(image);
if (c == (int) '=')
{
/*
Get the keyword value.
*/
c=ReadBlobByte(image);
while ((c != (int) '}') && (c != EOF))
{
if ((size_t) (p-options+1) >= length)
{
*p='\0';
length<<=1;
options=(char *) ResizeQuantumMemory(options,length+
MaxTextExtent,sizeof(*options));
if (options == (char *) NULL)
break;
p=options+strlen(options);
}
if (options == (char *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
*p++=(char) c;
c=ReadBlobByte(image);
if (c == '\\')
{
c=ReadBlobByte(image);
if (c == (int) '}')
{
*p++=(char) c;
c=ReadBlobByte(image);
}
}
if (*options != '{')
if (isspace((int) ((unsigned char) c)) != 0)
break;
}
}
*p='\0';
if (*options == '{')
(void) CopyMagickString(options,options+1,strlen(options));
/*
Assign a value to the specified keyword.
*/
switch (*keyword)
{
case 'b':
case 'B':
{
if (LocaleCompare(keyword,"background-color") == 0)
{
(void) QueryColorDatabase(options,&image->background_color,
exception);
break;
}
if (LocaleCompare(keyword,"blue-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=
image->chromaticity.blue_primary.x;
break;
}
if (LocaleCompare(keyword,"border-color") == 0)
{
(void) QueryColorDatabase(options,&image->border_color,
exception);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'c':
case 'C':
{
if (LocaleCompare(keyword,"class") == 0)
{
ssize_t
storage_class;
storage_class=ParseCommandOption(MagickClassOptions,
MagickFalse,options);
if (storage_class < 0)
break;
image->storage_class=(ClassType) storage_class;
break;
}
if (LocaleCompare(keyword,"colors") == 0)
{
colors=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"colorspace") == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,
MagickFalse,options);
if (colorspace < 0)
break;
image->colorspace=(ColorspaceType) colorspace;
break;
}
if (LocaleCompare(keyword,"compression") == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,
MagickFalse,options);
if (compression < 0)
break;
image->compression=(CompressionType) compression;
break;
}
if (LocaleCompare(keyword,"columns") == 0)
{
image->columns=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'd':
case 'D':
{
if (LocaleCompare(keyword,"delay") == 0)
{
image->delay=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"depth") == 0)
{
image->depth=StringToUnsignedLong(options);
break;
}
if (LocaleCompare(keyword,"dispose") == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
options);
if (dispose < 0)
break;
image->dispose=(DisposeType) dispose;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'e':
case 'E':
{
if (LocaleCompare(keyword,"endian") == 0)
{
ssize_t
endian;
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,
options);
if (endian < 0)
break;
image->endian=(EndianType) endian;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'g':
case 'G':
{
if (LocaleCompare(keyword,"gamma") == 0)
{
image->gamma=StringToDouble(options,(char **) NULL);
break;
}
if (LocaleCompare(keyword,"gravity") == 0)
{
ssize_t
gravity;
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
options);
if (gravity < 0)
break;
image->gravity=(GravityType) gravity;
break;
}
if (LocaleCompare(keyword,"green-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=
image->chromaticity.green_primary.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'i':
case 'I':
{
if (LocaleCompare(keyword,"id") == 0)
{
(void) CopyMagickString(id,options,MaxTextExtent);
break;
}
if (LocaleCompare(keyword,"iterations") == 0)
{
image->iterations=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'm':
case 'M':
{
if (LocaleCompare(keyword,"matte") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"matte-color") == 0)
{
(void) QueryColorDatabase(options,&image->matte_color,
exception);
break;
}
if (LocaleCompare(keyword,"montage") == 0)
{
(void) CloneString(&image->montage,options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'o':
case 'O':
{
if (LocaleCompare(keyword,"opaque") == 0)
{
ssize_t
matte;
matte=ParseCommandOption(MagickBooleanOptions,MagickFalse,
options);
if (matte < 0)
break;
image->matte=(MagickBooleanType) matte;
break;
}
if (LocaleCompare(keyword,"orientation") == 0)
{
ssize_t
orientation;
orientation=ParseCommandOption(MagickOrientationOptions,
MagickFalse,options);
if (orientation < 0)
break;
image->orientation=(OrientationType) orientation;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'p':
case 'P':
{
if (LocaleCompare(keyword,"page") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"pixel-intensity") == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,options);
if (intensity < 0)
break;
image->intensity=(PixelIntensityMethod) intensity;
break;
}
if ((LocaleNCompare(keyword,"profile:",8) == 0) ||
(LocaleNCompare(keyword,"profile-",8) == 0))
{
StringInfo
*profile;
if (profiles == (LinkedListInfo *) NULL)
profiles=NewLinkedList(0);
(void) AppendValueToLinkedList(profiles,
AcquireString(keyword+8));
profile=BlobToStringInfo((const void *) NULL,(size_t)
StringToLong(options));
if (profile == (StringInfo *) NULL)
ThrowReaderException(ResourceLimitError,
"MemoryAllocationFailed");
(void) SetImageProfile(image,keyword+8,profile);
profile=DestroyStringInfo(profile);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'q':
case 'Q':
{
if (LocaleCompare(keyword,"quality") == 0)
{
image->quality=StringToUnsignedLong(options);
break;
}
if ((LocaleCompare(keyword,"quantum-format") == 0) ||
(LocaleCompare(keyword,"quantum:format") == 0))
{
ssize_t
format;
format=ParseCommandOption(MagickQuantumFormatOptions,
MagickFalse,options);
if (format < 0)
break;
quantum_format=(QuantumFormatType) format;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'r':
case 'R':
{
if (LocaleCompare(keyword,"red-primary") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=
image->chromaticity.red_primary.x;
break;
}
if (LocaleCompare(keyword,"rendering-intent") == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,
MagickFalse,options);
if (rendering_intent < 0)
break;
image->rendering_intent=(RenderingIntent) rendering_intent;
break;
}
if (LocaleCompare(keyword,"resolution") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
break;
}
if (LocaleCompare(keyword,"rows") == 0)
{
image->rows=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 's':
case 'S':
{
if (LocaleCompare(keyword,"scene") == 0)
{
image->scene=StringToUnsignedLong(options);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 't':
case 'T':
{
if (LocaleCompare(keyword,"ticks-per-second") == 0)
{
image->ticks_per_second=(ssize_t) StringToLong(options);
break;
}
if (LocaleCompare(keyword,"tile-offset") == 0)
{
char
*geometry;
geometry=GetPageGeometry(options);
(void) ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
break;
}
if (LocaleCompare(keyword,"type") == 0)
{
ssize_t
type;
type=ParseCommandOption(MagickTypeOptions,MagickFalse,
options);
if (type < 0)
break;
image->type=(ImageType) type;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'u':
case 'U':
{
if (LocaleCompare(keyword,"units") == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,
MagickFalse,options);
if (units < 0)
break;
image->units=(ResolutionType) units;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'v':
case 'V':
{
if (LocaleCompare(keyword,"version") == 0)
{
version=StringToDouble(options,(char **) NULL);
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
case 'w':
case 'W':
{
if (LocaleCompare(keyword,"white-point") == 0)
{
flags=ParseGeometry(options,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=
image->chromaticity.white_point.x;
break;
}
(void) SetImageProperty(image,keyword,options);
break;
}
default:
{
(void) SetImageProperty(image,keyword,options);
break;
}
}
}
else
c=ReadBlobByte(image);
while (isspace((int) ((unsigned char) c)) != 0)
c=ReadBlobByte(image);
}
options=DestroyString(options);
(void) ReadBlobByte(image);
/*
Verify that required image information is defined.
*/
if ((LocaleCompare(id,"ImageMagick") != 0) ||
(image->storage_class == UndefinedClass) ||
(image->columns == 0) || (image->rows == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (image->montage != (char *) NULL)
{
register char
*p;
/*
Image directory.
*/
length=MaxTextExtent;
image->directory=AcquireString((char *) NULL);
p=image->directory;
do
{
*p='\0';
if ((strlen(image->directory)+MaxTextExtent) >= length)
{
/*
Allocate more memory for the image directory.
*/
length<<=1;
image->directory=(char *) ResizeQuantumMemory(image->directory,
length+MaxTextExtent,sizeof(*image->directory));
if (image->directory == (char *) NULL)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
p=image->directory+strlen(image->directory);
}
c=ReadBlobByte(image);
*p++=(char) c;
} while (c != (int) '\0');
}
if (profiles != (LinkedListInfo *) NULL)
{
const char
*name;
const StringInfo
*profile;
/*
Read image profiles.
*/
ResetLinkedListIterator(profiles);
name=(const char *) GetNextValueInLinkedList(profiles);
while (name != (const char *) NULL)
{
profile=GetImageProfile(image,name);
if (profile != (StringInfo *) NULL)
{
register unsigned char
*p;
p=GetStringInfoDatum(profile);
count=ReadBlob(image,GetStringInfoLength(profile),p);
(void) count;
}
name=(const char *) GetNextValueInLinkedList(profiles);
}
profiles=DestroyLinkedList(profiles,RelinquishMagickMemory);
}
image->depth=GetImageQuantumDepth(image,MagickFalse);
if (image->storage_class == PseudoClass)
{
/*
Create image colormap.
*/
status=AcquireImageColormap(image,colors != 0 ? colors : 256);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (colors != 0)
{
size_t
packet_size;
unsigned char
*colormap;
/*
Read image colormap from file.
*/
packet_size=(size_t) (3UL*image->depth/8UL);
colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
packet_size*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,packet_size*image->colors,colormap);
p=colormap;
switch (image->depth)
{
default:
ThrowReaderException(CorruptImageError,
"ImageDepthNotSupported");
case 8:
{
unsigned char
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushCharPixel(p,&pixel);
image->colormap[i].red=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].green=ScaleCharToQuantum(pixel);
p=PushCharPixel(p,&pixel);
image->colormap[i].blue=ScaleCharToQuantum(pixel);
}
break;
}
case 16:
{
unsigned short
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleShortToQuantum(pixel);
p=PushShortPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleShortToQuantum(pixel);
}
break;
}
case 32:
{
unsigned int
pixel;
for (i=0; i < (ssize_t) image->colors; i++)
{
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].red=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].green=ScaleLongToQuantum(pixel);
p=PushLongPixel(MSBEndian,p,&pixel);
image->colormap[i].blue=ScaleLongToQuantum(pixel);
}
break;
}
}
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
}
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
/*
Allocate image pixels.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (quantum_format != UndefinedQuantumFormat)
{
status=SetQuantumFormat(image,quantum_info,quantum_format);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
packet_size=(size_t) (quantum_info->depth/8);
if (image->storage_class == DirectClass)
packet_size=(size_t) (3*quantum_info->depth/8);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
packet_size=quantum_info->depth/8;
if (image->matte != MagickFalse)
packet_size+=quantum_info->depth/8;
if (image->colorspace == CMYKColorspace)
packet_size+=quantum_info->depth/8;
if (image->compression == RLECompression)
packet_size++;
length=image->columns;
length=MagickMax(MagickMax(BZipMaxExtent(packet_size*image->columns),
LZMAMaxExtent(packet_size*image->columns)),ZipMaxExtent(packet_size*
image->columns));
compress_pixels=(unsigned char *) AcquireQuantumMemory(length,
sizeof(*compress_pixels));
if (compress_pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
/*
Read image pixels.
*/
quantum_type=RGBQuantum;
if (image->matte != MagickFalse)
quantum_type=RGBAQuantum;
if (image->colorspace == CMYKColorspace)
{
quantum_type=CMYKQuantum;
if (image->matte != MagickFalse)
quantum_type=CMYKAQuantum;
}
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
quantum_type=GrayQuantum;
if (image->matte != MagickFalse)
quantum_type=GrayAlphaQuantum;
}
if (image->storage_class == PseudoClass)
{
quantum_type=IndexQuantum;
if (image->matte != MagickFalse)
quantum_type=IndexAlphaQuantum;
}
status=MagickTrue;
(void) ResetMagickMemory(&pixel,0,sizeof(pixel));
#if defined(MAGICKCORE_BZLIB_DELEGATE)
(void) ResetMagickMemory(&bzip_info,0,sizeof(bzip_info));
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
(void) ResetMagickMemory(&allocator,0,sizeof(allocator));
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
(void) ResetMagickMemory(&zip_info,0,sizeof(zip_info));
#endif
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
bzip_info.bzalloc=AcquireBZIPMemory;
bzip_info.bzfree=RelinquishBZIPMemory;
bzip_info.opaque=(void *) NULL;
code=BZ2_bzDecompressInit(&bzip_info,(int) image_info->verbose,
MagickFalse);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
allocator.alloc=AcquireLZMAMemory;
allocator.free=RelinquishLZMAMemory;
lzma_info=initialize_lzma;
lzma_info.allocator=(&allocator);
(void) ResetMagickMemory(&allocator,0,sizeof(allocator));
allocator.alloc=AcquireLZMAMemory;
allocator.free=RelinquishLZMAMemory;
lzma_info=initialize_lzma;
lzma_info.allocator=(&allocator);
code=lzma_auto_decoder(&lzma_info,-1,0);
if (code != LZMA_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
zip_info.zalloc=AcquireZIPMemory;
zip_info.zfree=RelinquishZIPMemory;
zip_info.opaque=(voidpf) NULL;
code=inflateInit(&zip_info);
if (code != Z_OK)
status=MagickFalse;
break;
}
#endif
case RLECompression:
{
pixel.opacity=(Quantum) TransparentOpacity;
index=(IndexPacket) 0;
break;
}
default:
break;
}
pixels=GetQuantumPixels(quantum_info);
index=(IndexPacket) 0;
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
bzip_info.next_out=(char *) pixels;
bzip_info.avail_out=(unsigned int) (packet_size*image->columns);
do
{
if (bzip_info.avail_in == 0)
{
bzip_info.next_in=(char *) compress_pixels;
length=(size_t) BZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
bzip_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) bzip_info.next_in);
}
if (BZ2_bzDecompress(&bzip_info) == BZ_STREAM_END)
break;
} while (bzip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
lzma_info.next_out=pixels;
lzma_info.avail_out=packet_size*image->columns;
do
{
int
code;
if (lzma_info.avail_in == 0)
{
lzma_info.next_in=compress_pixels;
length=(size_t) ReadBlobMSBLong(image);
lzma_info.avail_in=(unsigned int) ReadBlob(image,length,
(unsigned char *) lzma_info.next_in);
}
code=lzma_code(&lzma_info,LZMA_RUN);
if (code < 0)
{
status=MagickFalse;
break;
}
if (code == LZMA_STREAM_END)
break;
} while (lzma_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
zip_info.next_out=pixels;
zip_info.avail_out=(uInt) (packet_size*image->columns);
do
{
if (zip_info.avail_in == 0)
{
zip_info.next_in=compress_pixels;
length=(size_t) ZipMaxExtent(packet_size*image->columns);
if (version != 0.0)
length=(size_t) ReadBlobMSBLong(image);
zip_info.avail_in=(unsigned int) ReadBlob(image,length,
zip_info.next_in);
}
if (inflate(&zip_info,Z_SYNC_FLUSH) == Z_STREAM_END)
break;
} while (zip_info.avail_out != 0);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
#endif
case RLECompression:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
if (length == 0)
{
count=ReadBlob(image,packet_size,pixels);
PushRunlengthPacket(image,pixels,&length,&pixel,&index);
}
length--;
if ((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,index);
SetPixelRed(q,pixel.red);
SetPixelGreen(q,pixel.green);
SetPixelBlue(q,pixel.blue);
SetPixelOpacity(q,pixel.opacity);
q++;
}
break;
}
default:
{
count=ReadBlob(image,packet_size*image->columns,pixels);
(void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,exception);
break;
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
SetQuantumImageType(image,quantum_type);
switch (image->compression)
{
#if defined(MAGICKCORE_BZLIB_DELEGATE)
case BZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType) bzip_info.avail_in),
SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
code=BZ2_bzDecompressEnd(&bzip_info);
if (code != BZ_OK)
status=MagickFalse;
break;
}
#endif
#if defined(MAGICKCORE_LZMA_DELEGATE)
case LZMACompression:
{
int
code;
code=lzma_code(&lzma_info,LZMA_FINISH);
if ((code != LZMA_STREAM_END) && (code != LZMA_OK))
status=MagickFalse;
lzma_end(&lzma_info);
break;
}
#endif
#if defined(MAGICKCORE_ZLIB_DELEGATE)
case LZWCompression:
case ZipCompression:
{
int
code;
if (version == 0.0)
{
MagickOffsetType
offset;
offset=SeekBlob(image,-((MagickOffsetType) zip_info.avail_in),
SEEK_CUR);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
code=inflateEnd(&zip_info);
if (code != LZMA_OK)
status=MagickFalse;
break;
}
#endif
default:
break;
}
quantum_info=DestroyQuantumInfo(quantum_info);
compress_pixels=(unsigned char *) RelinquishMagickMemory(compress_pixels);
if (((y != (ssize_t) image->rows)) || (status == MagickFalse))
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
do
{
c=ReadBlobByte(image);
} while ((isgraph(c) == MagickFalse) && (c != EOF));
if (c != EOF)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (c != EOF);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| C | ImageMagick | 1 |
CVE-2015-6787 | https://www.cvedetails.com/cve/CVE-2015-6787/ | null | https://github.com/chromium/chromium/commit/f911e11e7f6b5c0d6f5ee694a9871de6619889f7 | f911e11e7f6b5c0d6f5ee694a9871de6619889f7 | Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <trchen@chromium.org>
> > Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> TBR=wangxianzhu@chromium.org,trchen@chromium.org,chrishtr@chromium.org
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <wangxianzhu@chromium.org>
> Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#554653}
TBR=wangxianzhu@chromium.org,trchen@chromium.org,chrishtr@chromium.org
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
Reviewed-by: Xianzhu Wang <wangxianzhu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#563930} | bool PaintPropertyTreeBuilder::ObjectIsRepeatingTableSectionInPagedMedia()
const {
if (!IsRepeatingTableSection(object_))
return false;
if (context_.painting_layer->EnclosingPaginationLayer())
return false;
if (!object_.View()->PageLogicalHeight())
return false;
if (!object_.View()->IsHorizontalWritingMode())
return false;
return true;
}
| bool PaintPropertyTreeBuilder::ObjectIsRepeatingTableSectionInPagedMedia()
const {
if (!IsRepeatingTableSection(object_))
return false;
if (context_.painting_layer->EnclosingPaginationLayer())
return false;
if (!object_.View()->PageLogicalHeight())
return false;
if (!object_.View()->IsHorizontalWritingMode())
return false;
return true;
}
| C | Chrome | 0 |
CVE-2016-2430 | https://www.cvedetails.com/cve/CVE-2016-2430/ | CWE-264 | https://android.googlesource.com/platform/system/core/+/ad54cfed4516292654c997910839153264ae00a0 | ad54cfed4516292654c997910839153264ae00a0 | Don't demangle symbol names.
Bug: http://b/27299236
Change-Id: I26ef47f80d4d6048a316ba51e83365ff65d70439
| Backtrace* Backtrace::Create(pid_t pid, pid_t tid, BacktraceMap* map) {
if (pid == BACKTRACE_CURRENT_PROCESS) {
pid = getpid();
if (tid == BACKTRACE_CURRENT_THREAD) {
tid = gettid();
}
} else if (tid == BACKTRACE_CURRENT_THREAD) {
tid = pid;
}
if (pid == getpid()) {
return new UnwindCurrent(pid, tid, map);
} else {
return new UnwindPtrace(pid, tid, map);
}
}
| Backtrace* Backtrace::Create(pid_t pid, pid_t tid, BacktraceMap* map) {
if (pid == BACKTRACE_CURRENT_PROCESS) {
pid = getpid();
if (tid == BACKTRACE_CURRENT_THREAD) {
tid = gettid();
}
} else if (tid == BACKTRACE_CURRENT_THREAD) {
tid = pid;
}
if (pid == getpid()) {
return new UnwindCurrent(pid, tid, map);
} else {
return new UnwindPtrace(pid, tid, map);
}
}
| C | Android | 0 |
CVE-2018-12714 | https://www.cvedetails.com/cve/CVE-2018-12714/ | CWE-787 | https://github.com/torvalds/linux/commit/81f9c4e4177d31ced6f52a89bb70e93bfb77ca03 | 81f9c4e4177d31ced6f52a89bb70e93bfb77ca03 | Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
"This contains a few fixes and a clean up.
- a bad merge caused an "endif" to go in the wrong place in
scripts/Makefile.build
- softirq tracing fix for tracing that corrupts lockdep and causes a
false splat
- histogram documentation typo fixes
- fix a bad memory reference when passing in no filter to the filter
code
- simplify code by using the swap macro instead of open coding the
swap"
* tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount
tracing: Fix some errors in histogram documentation
tracing: Use swap macro in update_max_tr
softirq: Reorder trace_softirqs_on to prevent lockdep splat
tracing: Check for no filter when processing event filters | static void disable_trace_buffered_event(void *data)
{
this_cpu_inc(trace_buffered_event_cnt);
}
| static void disable_trace_buffered_event(void *data)
{
this_cpu_inc(trace_buffered_event_cnt);
}
| C | linux | 0 |
CVE-2016-10708 | https://www.cvedetails.com/cve/CVE-2016-10708/ | CWE-476 | https://anongit.mindrot.org/openssh.git/commit/?id=28652bca29046f62c7045e933e6b931de1d16737 | 28652bca29046f62c7045e933e6b931de1d16737 | null | choose_enc(struct sshenc *enc, char *client, char *server)
{
char *name = match_list(client, server, NULL);
if (name == NULL)
return SSH_ERR_NO_CIPHER_ALG_MATCH;
if ((enc->cipher = cipher_by_name(name)) == NULL)
return SSH_ERR_INTERNAL_ERROR;
enc->name = name;
enc->enabled = 0;
enc->iv = NULL;
enc->iv_len = cipher_ivlen(enc->cipher);
enc->key = NULL;
enc->key_len = cipher_keylen(enc->cipher);
enc->block_size = cipher_blocksize(enc->cipher);
return 0;
}
| choose_enc(struct sshenc *enc, char *client, char *server)
{
char *name = match_list(client, server, NULL);
if (name == NULL)
return SSH_ERR_NO_CIPHER_ALG_MATCH;
if ((enc->cipher = cipher_by_name(name)) == NULL)
return SSH_ERR_INTERNAL_ERROR;
enc->name = name;
enc->enabled = 0;
enc->iv = NULL;
enc->iv_len = cipher_ivlen(enc->cipher);
enc->key = NULL;
enc->key_len = cipher_keylen(enc->cipher);
enc->block_size = cipher_blocksize(enc->cipher);
return 0;
}
| C | mindrot | 0 |
CVE-2016-3156 | https://www.cvedetails.com/cve/CVE-2016-3156/ | CWE-399 | https://github.com/torvalds/linux/commit/fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | fbd40ea0180a2d328c5adc61414dc8bab9335ce2 | ipv4: Don't do expensive useless work during inetdev destroy.
When an inetdev is destroyed, every address assigned to the interface
is removed. And in this scenerio we do two pointless things which can
be very expensive if the number of assigned interfaces is large:
1) Address promotion. We are deleting all addresses, so there is no
point in doing this.
2) A full nf conntrack table purge for every address. We only need to
do this once, as is already caught by the existing
masq_dev_notifier so masq_inet_event() can skip this.
Reported-by: Solar Designer <solar@openwall.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Tested-by: Cyrill Gorcunov <gorcunov@openvz.org> | static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct in_ifaddr *ifa;
struct in_ifaddr *ifa_existing;
__u32 valid_lft = INFINITY_LIFE_TIME;
__u32 prefered_lft = INFINITY_LIFE_TIME;
ASSERT_RTNL();
ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
if (IS_ERR(ifa))
return PTR_ERR(ifa);
ifa_existing = find_matching_ifa(ifa);
if (!ifa_existing) {
/* It would be best to check for !NLM_F_CREATE here but
* userspace already relies on not having to provide this.
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
true, ifa);
if (ret < 0) {
inet_free_ifa(ifa);
return ret;
}
}
return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
} else {
inet_free_ifa(ifa);
if (nlh->nlmsg_flags & NLM_F_EXCL ||
!(nlh->nlmsg_flags & NLM_F_REPLACE))
return -EEXIST;
ifa = ifa_existing;
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
cancel_delayed_work(&check_lifetime_work);
queue_delayed_work(system_power_efficient_wq,
&check_lifetime_work, 0);
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
}
return 0;
}
| static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct in_ifaddr *ifa;
struct in_ifaddr *ifa_existing;
__u32 valid_lft = INFINITY_LIFE_TIME;
__u32 prefered_lft = INFINITY_LIFE_TIME;
ASSERT_RTNL();
ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
if (IS_ERR(ifa))
return PTR_ERR(ifa);
ifa_existing = find_matching_ifa(ifa);
if (!ifa_existing) {
/* It would be best to check for !NLM_F_CREATE here but
* userspace already relies on not having to provide this.
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
true, ifa);
if (ret < 0) {
inet_free_ifa(ifa);
return ret;
}
}
return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
} else {
inet_free_ifa(ifa);
if (nlh->nlmsg_flags & NLM_F_EXCL ||
!(nlh->nlmsg_flags & NLM_F_REPLACE))
return -EEXIST;
ifa = ifa_existing;
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
cancel_delayed_work(&check_lifetime_work);
queue_delayed_work(system_power_efficient_wq,
&check_lifetime_work, 0);
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
}
return 0;
}
| C | linux | 0 |
CVE-2018-12684 | https://www.cvedetails.com/cve/CVE-2018-12684/ | CWE-125 | https://github.com/civetweb/civetweb/commit/8fd069f6dedb064339f1091069ac96f3f8bdb552 | 8fd069f6dedb064339f1091069ac96f3f8bdb552 | Check length of memcmp | change_slashes_to_backslashes(char *path)
{
int i;
for (i = 0; path[i] != '\0'; i++) {
if (path[i] == '/') {
path[i] = '\\';
}
/* remove double backslash (check i > 0 to preserve UNC paths,
* like \\server\file.txt) */
if ((path[i] == '\\') && (i > 0)) {
while ((path[i + 1] == '\\') || (path[i + 1] == '/')) {
(void)memmove(path + i + 1, path + i + 2, strlen(path + i + 1));
}
}
}
}
| change_slashes_to_backslashes(char *path)
{
int i;
for (i = 0; path[i] != '\0'; i++) {
if (path[i] == '/') {
path[i] = '\\';
}
/* remove double backslash (check i > 0 to preserve UNC paths,
* like \\server\file.txt) */
if ((path[i] == '\\') && (i > 0)) {
while ((path[i + 1] == '\\') || (path[i + 1] == '/')) {
(void)memmove(path + i + 1, path + i + 2, strlen(path + i + 1));
}
}
}
}
| C | civetweb | 0 |
null | null | null | https://github.com/chromium/chromium/commit/a44b00c88bc5ea35b5b150217c5fd6e4ce168e58 | a44b00c88bc5ea35b5b150217c5fd6e4ce168e58 | Apply behaviour change fix from upstream for previous XPath change.
BUG=58731
TEST=NONE
Review URL: http://codereview.chromium.org/4027006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@63572 0039d316-1c4b-4281-b951-d872f2087c98 | xmlXPathNodeSetCreate(xmlNodePtr val) {
xmlNodeSetPtr ret;
ret = (xmlNodeSetPtr) xmlMalloc(sizeof(xmlNodeSet));
if (ret == NULL) {
xmlXPathErrMemory(NULL, "creating nodeset\n");
return(NULL);
}
memset(ret, 0 , (size_t) sizeof(xmlNodeSet));
if (val != NULL) {
ret->nodeTab = (xmlNodePtr *) xmlMalloc(XML_NODESET_DEFAULT *
sizeof(xmlNodePtr));
if (ret->nodeTab == NULL) {
xmlXPathErrMemory(NULL, "creating nodeset\n");
xmlFree(ret);
return(NULL);
}
memset(ret->nodeTab, 0 ,
XML_NODESET_DEFAULT * (size_t) sizeof(xmlNodePtr));
ret->nodeMax = XML_NODESET_DEFAULT;
if (val->type == XML_NAMESPACE_DECL) {
xmlNsPtr ns = (xmlNsPtr) val;
ret->nodeTab[ret->nodeNr++] =
xmlXPathNodeSetDupNs((xmlNodePtr) ns->next, ns);
} else
ret->nodeTab[ret->nodeNr++] = val;
}
return(ret);
}
| xmlXPathNodeSetCreate(xmlNodePtr val) {
xmlNodeSetPtr ret;
ret = (xmlNodeSetPtr) xmlMalloc(sizeof(xmlNodeSet));
if (ret == NULL) {
xmlXPathErrMemory(NULL, "creating nodeset\n");
return(NULL);
}
memset(ret, 0 , (size_t) sizeof(xmlNodeSet));
if (val != NULL) {
ret->nodeTab = (xmlNodePtr *) xmlMalloc(XML_NODESET_DEFAULT *
sizeof(xmlNodePtr));
if (ret->nodeTab == NULL) {
xmlXPathErrMemory(NULL, "creating nodeset\n");
xmlFree(ret);
return(NULL);
}
memset(ret->nodeTab, 0 ,
XML_NODESET_DEFAULT * (size_t) sizeof(xmlNodePtr));
ret->nodeMax = XML_NODESET_DEFAULT;
if (val->type == XML_NAMESPACE_DECL) {
xmlNsPtr ns = (xmlNsPtr) val;
ret->nodeTab[ret->nodeNr++] =
xmlXPathNodeSetDupNs((xmlNodePtr) ns->next, ns);
} else
ret->nodeTab[ret->nodeNr++] = val;
}
return(ret);
}
| C | Chrome | 0 |
CVE-2019-13307 | https://www.cvedetails.com/cve/CVE-2019-13307/ | CWE-119 | https://github.com/ImageMagick/ImageMagick/commit/025e77fcb2f45b21689931ba3bf74eac153afa48 | 025e77fcb2f45b21689931ba3bf74eac153afa48 | https://github.com/ImageMagick/ImageMagick/issues/1615 | MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
| MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
| C | ImageMagick6 | 0 |
CVE-2016-1691 | https://www.cvedetails.com/cve/CVE-2016-1691/ | CWE-119 | https://github.com/chromium/chromium/commit/e3aa8a56706c4abe208934d5c294f7b594b8b693 | e3aa8a56706c4abe208934d5c294f7b594b8b693 | Enforce the WebUsbAllowDevicesForUrls policy
This change modifies UsbChooserContext to use the UsbAllowDevicesForUrls
class to consider devices allowed by the WebUsbAllowDevicesForUrls
policy. The WebUsbAllowDevicesForUrls policy overrides the other WebUSB
policies. Unit tests are also added to ensure that the policy is being
enforced correctly.
The design document for this feature is found at:
https://docs.google.com/document/d/1MPvsrWiVD_jAC8ELyk8njFpy6j1thfVU5aWT3TCWE8w
Bug: 854329
Change-Id: I5f82e662ca9dc544da5918eae766b5535a31296b
Reviewed-on: https://chromium-review.googlesource.com/c/1259289
Commit-Queue: Ovidio Henriquez <odejesush@chromium.org>
Reviewed-by: Reilly Grant <reillyg@chromium.org>
Reviewed-by: Julian Pastarmov <pastarmovj@chromium.org>
Cr-Commit-Position: refs/heads/master@{#597926} | content::RenderFrameHost* GetMostVisitedIframe(content::WebContents* tab) {
for (content::RenderFrameHost* frame : tab->GetAllFrames()) {
if (frame->GetFrameName() == "mv-single")
return frame;
}
return nullptr;
}
| content::RenderFrameHost* GetMostVisitedIframe(content::WebContents* tab) {
for (content::RenderFrameHost* frame : tab->GetAllFrames()) {
if (frame->GetFrameName() == "mv-single")
return frame;
}
return nullptr;
}
| C | Chrome | 0 |
CVE-2017-5016 | https://www.cvedetails.com/cve/CVE-2017-5016/ | CWE-1021 | https://github.com/chromium/chromium/commit/a8e17a3031b6ad69c399e5e04dd0084e577097fc | a8e17a3031b6ad69c399e5e04dd0084e577097fc | Form validation: Do not show validation bubble if the page is invisible.
BUG=673163
Review-Url: https://codereview.chromium.org/2572813003
Cr-Commit-Position: refs/heads/master@{#438476} | void HTMLFormControlElement::disabledAttributeChanged() {
setNeedsWillValidateCheck();
pseudoStateChanged(CSSSelector::PseudoDisabled);
pseudoStateChanged(CSSSelector::PseudoEnabled);
if (layoutObject())
LayoutTheme::theme().controlStateChanged(*layoutObject(),
EnabledControlState);
if (isDisabledFormControl() && adjustedFocusedElementInTreeScope() == this) {
document().setNeedsFocusedElementCheck();
}
}
| void HTMLFormControlElement::disabledAttributeChanged() {
setNeedsWillValidateCheck();
pseudoStateChanged(CSSSelector::PseudoDisabled);
pseudoStateChanged(CSSSelector::PseudoEnabled);
if (layoutObject())
LayoutTheme::theme().controlStateChanged(*layoutObject(),
EnabledControlState);
if (isDisabledFormControl() && adjustedFocusedElementInTreeScope() == this) {
document().setNeedsFocusedElementCheck();
}
}
| C | Chrome | 0 |
CVE-2014-3610 | https://www.cvedetails.com/cve/CVE-2014-3610/ | CWE-264 | https://github.com/torvalds/linux/commit/854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | 854e8bb1aa06c578c2c9145fa6bfe3680ef63b23 | KVM: x86: Check non-canonical addresses upon WRMSR
Upon WRMSR, the CPU should inject #GP if a non-canonical value (address) is
written to certain MSRs. The behavior is "almost" identical for AMD and Intel
(ignoring MSRs that are not implemented in either architecture since they would
anyhow #GP). However, IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
non-canonical address is written on Intel but not on AMD (which ignores the top
32-bits).
Accordingly, this patch injects a #GP on the MSRs which behave identically on
Intel and AMD. To eliminate the differences between the architecutres, the
value which is written to IA32_SYSENTER_ESP and IA32_SYSENTER_EIP is turned to
canonical value before writing instead of injecting a #GP.
Some references from Intel and AMD manuals:
According to Intel SDM description of WRMSR instruction #GP is expected on
WRMSR "If the source register contains a non-canonical address and ECX
specifies one of the following MSRs: IA32_DS_AREA, IA32_FS_BASE, IA32_GS_BASE,
IA32_KERNEL_GS_BASE, IA32_LSTAR, IA32_SYSENTER_EIP, IA32_SYSENTER_ESP."
According to AMD manual instruction manual:
LSTAR/CSTAR (SYSCALL): "The WRMSR instruction loads the target RIP into the
LSTAR and CSTAR registers. If an RIP written by WRMSR is not in canonical
form, a general-protection exception (#GP) occurs."
IA32_GS_BASE and IA32_FS_BASE (WRFSBASE/WRGSBASE): "The address written to the
base field must be in canonical form or a #GP fault will occur."
IA32_KERNEL_GS_BASE (SWAPGS): "The address stored in the KernelGSbase MSR must
be in canonical form."
This patch fixes CVE-2014-3610.
Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}
| static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}
| C | linux | 0 |
CVE-2018-6127 | https://www.cvedetails.com/cve/CVE-2018-6127/ | null | https://github.com/chromium/chromium/commit/28044cb7ef4488e7278c2b80f0e3a2c3707d03b6 | 28044cb7ef4488e7278c2b80f0e3a2c3707d03b6 | [IndexedDB] Fixing early destruction of connection during forceclose
Patch is as small as possible for merging.
Bug: 842990
Change-Id: I9968ffee1bf3279e61e1ec13e4d541f713caf12f
Reviewed-on: https://chromium-review.googlesource.com/1062935
Commit-Queue: Daniel Murphy <dmurph@chromium.org>
Commit-Queue: Victor Costan <pwnall@chromium.org>
Reviewed-by: Victor Costan <pwnall@chromium.org>
Cr-Commit-Position: refs/heads/master@{#559383} | Status IndexedDBDatabase::PutOperation(
std::unique_ptr<PutOperationParams> params,
IndexedDBTransaction* transaction) {
IDB_TRACE2("IndexedDBDatabase::PutOperation", "txn.id", transaction->id(),
"size", params->value.SizeEstimate());
DCHECK_NE(transaction->mode(), blink::kWebIDBTransactionModeReadOnly);
bool key_was_generated = false;
Status s = Status::OK();
DCHECK(metadata_.object_stores.find(params->object_store_id) !=
metadata_.object_stores.end());
const IndexedDBObjectStoreMetadata& object_store =
metadata_.object_stores[params->object_store_id];
DCHECK(object_store.auto_increment || params->key->IsValid());
std::unique_ptr<IndexedDBKey> key;
if (params->put_mode != blink::kWebIDBPutModeCursorUpdate &&
object_store.auto_increment && !params->key->IsValid()) {
std::unique_ptr<IndexedDBKey> auto_inc_key = GenerateKey(
backing_store_.get(), transaction, id(), params->object_store_id);
key_was_generated = true;
if (!auto_inc_key->IsValid()) {
params->callbacks->OnError(
IndexedDBDatabaseError(blink::kWebIDBDatabaseExceptionConstraintError,
"Maximum key generator value reached."));
return s;
}
key = std::move(auto_inc_key);
} else {
key = std::move(params->key);
}
DCHECK(key->IsValid());
IndexedDBBackingStore::RecordIdentifier record_identifier;
if (params->put_mode == blink::kWebIDBPutModeAddOnly) {
bool found = false;
Status found_status = backing_store_->KeyExistsInObjectStore(
transaction->BackingStoreTransaction(), id(), params->object_store_id,
*key, &record_identifier, &found);
if (!found_status.ok())
return found_status;
if (found) {
params->callbacks->OnError(
IndexedDBDatabaseError(blink::kWebIDBDatabaseExceptionConstraintError,
"Key already exists in the object store."));
return found_status;
}
}
std::vector<std::unique_ptr<IndexWriter>> index_writers;
base::string16 error_message;
bool obeys_constraints = false;
bool backing_store_success = MakeIndexWriters(transaction,
backing_store_.get(),
id(),
object_store,
*key,
key_was_generated,
params->index_keys,
&index_writers,
&error_message,
&obeys_constraints);
if (!backing_store_success) {
params->callbacks->OnError(IndexedDBDatabaseError(
blink::kWebIDBDatabaseExceptionUnknownError,
"Internal error: backing store error updating index keys."));
return s;
}
if (!obeys_constraints) {
params->callbacks->OnError(IndexedDBDatabaseError(
blink::kWebIDBDatabaseExceptionConstraintError, error_message));
return s;
}
s = backing_store_->PutRecord(transaction->BackingStoreTransaction(), id(),
params->object_store_id, *key, ¶ms->value,
¶ms->handles, &record_identifier);
if (!s.ok())
return s;
{
IDB_TRACE1("IndexedDBDatabase::PutOperation.UpdateIndexes", "txn.id",
transaction->id());
for (const auto& writer : index_writers) {
writer->WriteIndexKeys(record_identifier, backing_store_.get(),
transaction->BackingStoreTransaction(), id(),
params->object_store_id);
}
}
if (object_store.auto_increment &&
params->put_mode != blink::kWebIDBPutModeCursorUpdate &&
key->type() == kWebIDBKeyTypeNumber) {
IDB_TRACE1("IndexedDBDatabase::PutOperation.AutoIncrement", "txn.id",
transaction->id());
s = UpdateKeyGenerator(backing_store_.get(), transaction, id(),
params->object_store_id, *key, !key_was_generated);
if (!s.ok())
return s;
}
{
IDB_TRACE1("IndexedDBDatabase::PutOperation.Callbacks", "txn.id",
transaction->id());
params->callbacks->OnSuccess(*key);
}
FilterObservation(transaction, params->object_store_id,
params->put_mode == blink::kWebIDBPutModeAddOnly
? blink::kWebIDBAdd
: blink::kWebIDBPut,
IndexedDBKeyRange(*key), ¶ms->value);
factory_->NotifyIndexedDBContentChanged(
origin(), metadata_.name,
metadata_.object_stores[params->object_store_id].name);
return s;
}
| Status IndexedDBDatabase::PutOperation(
std::unique_ptr<PutOperationParams> params,
IndexedDBTransaction* transaction) {
IDB_TRACE2("IndexedDBDatabase::PutOperation", "txn.id", transaction->id(),
"size", params->value.SizeEstimate());
DCHECK_NE(transaction->mode(), blink::kWebIDBTransactionModeReadOnly);
bool key_was_generated = false;
Status s = Status::OK();
DCHECK(metadata_.object_stores.find(params->object_store_id) !=
metadata_.object_stores.end());
const IndexedDBObjectStoreMetadata& object_store =
metadata_.object_stores[params->object_store_id];
DCHECK(object_store.auto_increment || params->key->IsValid());
std::unique_ptr<IndexedDBKey> key;
if (params->put_mode != blink::kWebIDBPutModeCursorUpdate &&
object_store.auto_increment && !params->key->IsValid()) {
std::unique_ptr<IndexedDBKey> auto_inc_key = GenerateKey(
backing_store_.get(), transaction, id(), params->object_store_id);
key_was_generated = true;
if (!auto_inc_key->IsValid()) {
params->callbacks->OnError(
IndexedDBDatabaseError(blink::kWebIDBDatabaseExceptionConstraintError,
"Maximum key generator value reached."));
return s;
}
key = std::move(auto_inc_key);
} else {
key = std::move(params->key);
}
DCHECK(key->IsValid());
IndexedDBBackingStore::RecordIdentifier record_identifier;
if (params->put_mode == blink::kWebIDBPutModeAddOnly) {
bool found = false;
Status found_status = backing_store_->KeyExistsInObjectStore(
transaction->BackingStoreTransaction(), id(), params->object_store_id,
*key, &record_identifier, &found);
if (!found_status.ok())
return found_status;
if (found) {
params->callbacks->OnError(
IndexedDBDatabaseError(blink::kWebIDBDatabaseExceptionConstraintError,
"Key already exists in the object store."));
return found_status;
}
}
std::vector<std::unique_ptr<IndexWriter>> index_writers;
base::string16 error_message;
bool obeys_constraints = false;
bool backing_store_success = MakeIndexWriters(transaction,
backing_store_.get(),
id(),
object_store,
*key,
key_was_generated,
params->index_keys,
&index_writers,
&error_message,
&obeys_constraints);
if (!backing_store_success) {
params->callbacks->OnError(IndexedDBDatabaseError(
blink::kWebIDBDatabaseExceptionUnknownError,
"Internal error: backing store error updating index keys."));
return s;
}
if (!obeys_constraints) {
params->callbacks->OnError(IndexedDBDatabaseError(
blink::kWebIDBDatabaseExceptionConstraintError, error_message));
return s;
}
s = backing_store_->PutRecord(transaction->BackingStoreTransaction(), id(),
params->object_store_id, *key, ¶ms->value,
¶ms->handles, &record_identifier);
if (!s.ok())
return s;
{
IDB_TRACE1("IndexedDBDatabase::PutOperation.UpdateIndexes", "txn.id",
transaction->id());
for (const auto& writer : index_writers) {
writer->WriteIndexKeys(record_identifier, backing_store_.get(),
transaction->BackingStoreTransaction(), id(),
params->object_store_id);
}
}
if (object_store.auto_increment &&
params->put_mode != blink::kWebIDBPutModeCursorUpdate &&
key->type() == kWebIDBKeyTypeNumber) {
IDB_TRACE1("IndexedDBDatabase::PutOperation.AutoIncrement", "txn.id",
transaction->id());
s = UpdateKeyGenerator(backing_store_.get(), transaction, id(),
params->object_store_id, *key, !key_was_generated);
if (!s.ok())
return s;
}
{
IDB_TRACE1("IndexedDBDatabase::PutOperation.Callbacks", "txn.id",
transaction->id());
params->callbacks->OnSuccess(*key);
}
FilterObservation(transaction, params->object_store_id,
params->put_mode == blink::kWebIDBPutModeAddOnly
? blink::kWebIDBAdd
: blink::kWebIDBPut,
IndexedDBKeyRange(*key), ¶ms->value);
factory_->NotifyIndexedDBContentChanged(
origin(), metadata_.name,
metadata_.object_stores[params->object_store_id].name);
return s;
}
| C | Chrome | 0 |
CVE-2018-12714 | https://www.cvedetails.com/cve/CVE-2018-12714/ | CWE-787 | https://github.com/torvalds/linux/commit/81f9c4e4177d31ced6f52a89bb70e93bfb77ca03 | 81f9c4e4177d31ced6f52a89bb70e93bfb77ca03 | Merge tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt:
"This contains a few fixes and a clean up.
- a bad merge caused an "endif" to go in the wrong place in
scripts/Makefile.build
- softirq tracing fix for tracing that corrupts lockdep and causes a
false splat
- histogram documentation typo fixes
- fix a bad memory reference when passing in no filter to the filter
code
- simplify code by using the swap macro instead of open coding the
swap"
* tag 'trace-v4.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing: Fix SKIP_STACK_VALIDATION=1 build due to bad merge with -mrecord-mcount
tracing: Fix some errors in histogram documentation
tracing: Use swap macro in update_max_tr
softirq: Reorder trace_softirqs_on to prevent lockdep splat
tracing: Check for no filter when processing event filters | static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
int ret;
if (tracing_disabled)
return -ENODEV;
if (trace_array_get(tr) < 0)
return -ENODEV;
ret = single_open(file, tracing_trace_options_show, inode->i_private);
if (ret < 0)
trace_array_put(tr);
return ret;
}
| static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
int ret;
if (tracing_disabled)
return -ENODEV;
if (trace_array_get(tr) < 0)
return -ENODEV;
ret = single_open(file, tracing_trace_options_show, inode->i_private);
if (ret < 0)
trace_array_put(tr);
return ret;
}
| C | linux | 0 |
CVE-2015-8838 | https://www.cvedetails.com/cve/CVE-2015-8838/ | CWE-284 | https://git.php.net/?p=php-src.git;a=commit;h=97aa752fee61fccdec361279adbfb17a3c60f3f4 | 97aa752fee61fccdec361279adbfb17a3c60f3f4 | null | MYSQLND ** mysqlnd_stream_array_check_for_readiness(MYSQLND ** conn_array TSRMLS_DC)
{
int cnt = 0;
MYSQLND **p = conn_array, **p_p;
MYSQLND **ret = NULL;
while (*p) {
if (CONN_GET_STATE((*p)->data) <= CONN_READY || CONN_GET_STATE((*p)->data) == CONN_QUIT_SENT) {
cnt++;
}
p++;
}
if (cnt) {
MYSQLND **ret_p = ret = ecalloc(cnt + 1, sizeof(MYSQLND *));
p_p = p = conn_array;
while (*p) {
if (CONN_GET_STATE((*p)->data) <= CONN_READY || CONN_GET_STATE((*p)->data) == CONN_QUIT_SENT) {
*ret_p = *p;
*p = NULL;
ret_p++;
} else {
*p_p = *p;
p_p++;
}
p++;
}
*ret_p = NULL;
}
return ret;
}
| MYSQLND ** mysqlnd_stream_array_check_for_readiness(MYSQLND ** conn_array TSRMLS_DC)
{
int cnt = 0;
MYSQLND **p = conn_array, **p_p;
MYSQLND **ret = NULL;
while (*p) {
if (CONN_GET_STATE((*p)->data) <= CONN_READY || CONN_GET_STATE((*p)->data) == CONN_QUIT_SENT) {
cnt++;
}
p++;
}
if (cnt) {
MYSQLND **ret_p = ret = ecalloc(cnt + 1, sizeof(MYSQLND *));
p_p = p = conn_array;
while (*p) {
if (CONN_GET_STATE((*p)->data) <= CONN_READY || CONN_GET_STATE((*p)->data) == CONN_QUIT_SENT) {
*ret_p = *p;
*p = NULL;
ret_p++;
} else {
*p_p = *p;
p_p++;
}
p++;
}
*ret_p = NULL;
}
return ret;
}
| C | php | 0 |
CVE-2018-16080 | https://www.cvedetails.com/cve/CVE-2018-16080/ | CWE-20 | https://github.com/chromium/chromium/commit/c552cd7b8a0862f6b3c8c6a07f98bda3721101eb | c552cd7b8a0862f6b3c8c6a07f98bda3721101eb | Mac: turn popups into new tabs while in fullscreen.
It's platform convention to show popups as new tabs while in
non-HTML5 fullscreen. (Popups cause tabs to lose HTML5 fullscreen.)
This was implemented for Cocoa in a BrowserWindow override, but
it makes sense to just stick it into Browser and remove a ton
of override code put in just to support this.
BUG=858929, 868416
TEST=as in bugs
Change-Id: I43471f242813ec1159d9c690bab73dab3e610b7d
Reviewed-on: https://chromium-review.googlesource.com/1153455
Reviewed-by: Sidney San Martín <sdy@chromium.org>
Commit-Queue: Avi Drissman <avi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#578755} | bool WindowCanOpenTabs(Browser* browser) {
return browser->CanSupportWindowFeature(Browser::FEATURE_TABSTRIP) ||
browser->tab_strip_model()->empty();
}
| bool WindowCanOpenTabs(Browser* browser) {
return browser->CanSupportWindowFeature(Browser::FEATURE_TABSTRIP) ||
browser->tab_strip_model()->empty();
}
| C | Chrome | 0 |
CVE-2015-1870 | https://www.cvedetails.com/cve/CVE-2015-1870/ | CWE-200 | https://github.com/abrt/abrt/commit/8939398b82006ba1fec4ed491339fc075f43fc7c | 8939398b82006ba1fec4ed491339fc075f43fc7c | make the dump directories owned by root by default
It was discovered that the abrt event scripts create a user-readable
copy of a sosreport file in abrt problem directories, and include
excerpts of /var/log/messages selected by the user-controlled process
name, leading to an information disclosure.
This issue was discovered by Florian Weimer of Red Hat Product Security.
Related: #1212868
Signed-off-by: Jakub Filak <jfilak@redhat.com> | int low_free_space(unsigned setting_MaxCrashReportsSize, const char *dump_location)
{
struct statvfs vfs;
if (statvfs(dump_location, &vfs) != 0)
{
perror_msg("statvfs('%s')", dump_location);
return 0;
}
/* Check that at least MaxCrashReportsSize/4 MBs are free */
/* fs_free_mb_x4 ~= vfs.f_bfree * vfs.f_bsize * 4, expressed in MBytes.
* Need to neither overflow nor round f_bfree down too much. */
unsigned long fs_free_mb_x4 = ((unsigned long long)vfs.f_bfree / (1024/4)) * vfs.f_bsize / 1024;
if (fs_free_mb_x4 < setting_MaxCrashReportsSize)
{
error_msg("Only %luMiB is available on %s",
fs_free_mb_x4 / 4, dump_location);
return 1;
}
return 0;
}
| int low_free_space(unsigned setting_MaxCrashReportsSize, const char *dump_location)
{
struct statvfs vfs;
if (statvfs(dump_location, &vfs) != 0)
{
perror_msg("statvfs('%s')", dump_location);
return 0;
}
/* Check that at least MaxCrashReportsSize/4 MBs are free */
/* fs_free_mb_x4 ~= vfs.f_bfree * vfs.f_bsize * 4, expressed in MBytes.
* Need to neither overflow nor round f_bfree down too much. */
unsigned long fs_free_mb_x4 = ((unsigned long long)vfs.f_bfree / (1024/4)) * vfs.f_bsize / 1024;
if (fs_free_mb_x4 < setting_MaxCrashReportsSize)
{
error_msg("Only %luMiB is available on %s",
fs_free_mb_x4 / 4, dump_location);
return 1;
}
return 0;
}
| C | abrt | 0 |