CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2010-1152 | https://www.cvedetails.com/cve/CVE-2010-1152/ | CWE-20 | https://github.com/memcached/memcached/commit/d9cd01ede97f4145af9781d448c62a3318952719 | d9cd01ede97f4145af9781d448c62a3318952719 | Use strncmp when checking for large ascii multigets. | static const char *state_text(enum conn_states state) {
const char* const statenames[] = { "conn_listening",
"conn_new_cmd",
"conn_waiting",
"conn_read",
"conn_parse_cmd",
"conn_write",
"conn_nread",
"conn_swallow",
"conn_closing",
"conn_mwrite" };
return statenames[state];
}
| static const char *state_text(enum conn_states state) {
const char* const statenames[] = { "conn_listening",
"conn_new_cmd",
"conn_waiting",
"conn_read",
"conn_parse_cmd",
"conn_write",
"conn_nread",
"conn_swallow",
"conn_closing",
"conn_mwrite" };
return statenames[state];
}
| C | memcached | 0 |
CVE-2013-1790 | https://www.cvedetails.com/cve/CVE-2013-1790/ | CWE-119 | https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=b1026b5978c385328f2a15a2185c599a563edf91 | b1026b5978c385328f2a15a2185c599a563edf91 | null | char *Stream::getLine(char *buf, int size) {
int i;
int c;
if (lookChar() == EOF || size < 0)
return NULL;
for (i = 0; i < size - 1; ++i) {
c = getChar();
if (c == EOF || c == '\n')
break;
if (c == '\r') {
if ((c = lookChar()) == '\n')
getChar();
break;
}
buf[i] = c;
}
buf[i] = '\0';
return buf;
}
| char *Stream::getLine(char *buf, int size) {
int i;
int c;
if (lookChar() == EOF || size < 0)
return NULL;
for (i = 0; i < size - 1; ++i) {
c = getChar();
if (c == EOF || c == '\n')
break;
if (c == '\r') {
if ((c = lookChar()) == '\n')
getChar();
break;
}
buf[i] = c;
}
buf[i] = '\0';
return buf;
}
| CPP | poppler | 0 |
CVE-2017-5019 | https://www.cvedetails.com/cve/CVE-2017-5019/ | CWE-416 | https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93 | f03ea5a5c2ff26e239dfd23e263b15da2d9cee93 | Convert FrameHostMsg_DidAddMessageToConsole to Mojo.
Note: Since this required changing the test
RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually
re-introduced https://crbug.com/666714 locally (the bug the test was
added for), and reran the test to confirm that it still covers the bug.
Bug: 786836
Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270
Commit-Queue: Lowell Manners <lowell@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Camille Lamy <clamy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#653137} | NavigationRateLimiter::NavigationRateLimiter(Frame& frame)
: frame_(frame),
time_first_count_(base::TimeTicks::Now()),
enabled(frame_->GetSettings()->GetShouldProtectAgainstIpcFlooding()) {}
| NavigationRateLimiter::NavigationRateLimiter(Frame& frame)
: frame_(frame),
time_first_count_(base::TimeTicks::Now()),
enabled(frame_->GetSettings()->GetShouldProtectAgainstIpcFlooding()) {}
| C | Chrome | 0 |
CVE-2016-4805 | https://www.cvedetails.com/cve/CVE-2016-4805/ | CWE-416 | https://github.com/torvalds/linux/commit/1f461dcdd296eecedaffffc6bae2bfa90bd7eb89 | 1f461dcdd296eecedaffffc6bae2bfa90bd7eb89 | ppp: take reference on channels netns
Let channels hold a reference on their network namespace.
Some channel types, like ppp_async and ppp_synctty, can have their
userspace controller running in a different namespace. Therefore they
can't rely on them to preclude their netns from being removed from
under them.
==================================================================
BUG: KASAN: use-after-free in ppp_unregister_channel+0x372/0x3a0 at
addr ffff880064e217e0
Read of size 8 by task syz-executor/11581
=============================================================================
BUG net_namespace (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in copy_net_ns+0x6b/0x1a0 age=92569 cpu=3 pid=6906
[< none >] ___slab_alloc+0x4c7/0x500 kernel/mm/slub.c:2440
[< none >] __slab_alloc+0x4c/0x90 kernel/mm/slub.c:2469
[< inline >] slab_alloc_node kernel/mm/slub.c:2532
[< inline >] slab_alloc kernel/mm/slub.c:2574
[< none >] kmem_cache_alloc+0x23a/0x2b0 kernel/mm/slub.c:2579
[< inline >] kmem_cache_zalloc kernel/include/linux/slab.h:597
[< inline >] net_alloc kernel/net/core/net_namespace.c:325
[< none >] copy_net_ns+0x6b/0x1a0 kernel/net/core/net_namespace.c:360
[< none >] create_new_namespaces+0x2f6/0x610 kernel/kernel/nsproxy.c:95
[< none >] copy_namespaces+0x297/0x320 kernel/kernel/nsproxy.c:150
[< none >] copy_process.part.35+0x1bf4/0x5760 kernel/kernel/fork.c:1451
[< inline >] copy_process kernel/kernel/fork.c:1274
[< none >] _do_fork+0x1bc/0xcb0 kernel/kernel/fork.c:1723
[< inline >] SYSC_clone kernel/kernel/fork.c:1832
[< none >] SyS_clone+0x37/0x50 kernel/kernel/fork.c:1826
[< none >] entry_SYSCALL_64_fastpath+0x16/0x7a kernel/arch/x86/entry/entry_64.S:185
INFO: Freed in net_drop_ns+0x67/0x80 age=575 cpu=2 pid=2631
[< none >] __slab_free+0x1fc/0x320 kernel/mm/slub.c:2650
[< inline >] slab_free kernel/mm/slub.c:2805
[< none >] kmem_cache_free+0x2a0/0x330 kernel/mm/slub.c:2814
[< inline >] net_free kernel/net/core/net_namespace.c:341
[< none >] net_drop_ns+0x67/0x80 kernel/net/core/net_namespace.c:348
[< none >] cleanup_net+0x4e5/0x600 kernel/net/core/net_namespace.c:448
[< none >] process_one_work+0x794/0x1440 kernel/kernel/workqueue.c:2036
[< none >] worker_thread+0xdb/0xfc0 kernel/kernel/workqueue.c:2170
[< none >] kthread+0x23f/0x2d0 kernel/drivers/block/aoe/aoecmd.c:1303
[< none >] ret_from_fork+0x3f/0x70 kernel/arch/x86/entry/entry_64.S:468
INFO: Slab 0xffffea0001938800 objects=3 used=0 fp=0xffff880064e20000
flags=0x5fffc0000004080
INFO: Object 0xffff880064e20000 @offset=0 fp=0xffff880064e24200
CPU: 1 PID: 11581 Comm: syz-executor Tainted: G B 4.4.0+
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
rel-1.8.2-0-g33fbe13 by qemu-project.org 04/01/2014
00000000ffffffff ffff8800662c7790 ffffffff8292049d ffff88003e36a300
ffff880064e20000 ffff880064e20000 ffff8800662c77c0 ffffffff816f2054
ffff88003e36a300 ffffea0001938800 ffff880064e20000 0000000000000000
Call Trace:
[< inline >] __dump_stack kernel/lib/dump_stack.c:15
[<ffffffff8292049d>] dump_stack+0x6f/0xa2 kernel/lib/dump_stack.c:50
[<ffffffff816f2054>] print_trailer+0xf4/0x150 kernel/mm/slub.c:654
[<ffffffff816f875f>] object_err+0x2f/0x40 kernel/mm/slub.c:661
[< inline >] print_address_description kernel/mm/kasan/report.c:138
[<ffffffff816fb0c5>] kasan_report_error+0x215/0x530 kernel/mm/kasan/report.c:236
[< inline >] kasan_report kernel/mm/kasan/report.c:259
[<ffffffff816fb4de>] __asan_report_load8_noabort+0x3e/0x40 kernel/mm/kasan/report.c:280
[< inline >] ? ppp_pernet kernel/include/linux/compiler.h:218
[<ffffffff83ad71b2>] ? ppp_unregister_channel+0x372/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[< inline >] ppp_pernet kernel/include/linux/compiler.h:218
[<ffffffff83ad71b2>] ppp_unregister_channel+0x372/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[< inline >] ? ppp_pernet kernel/drivers/net/ppp/ppp_generic.c:293
[<ffffffff83ad6f26>] ? ppp_unregister_channel+0xe6/0x3a0 kernel/drivers/net/ppp/ppp_generic.c:2392
[<ffffffff83ae18f3>] ppp_asynctty_close+0xa3/0x130 kernel/drivers/net/ppp/ppp_async.c:241
[<ffffffff83ae1850>] ? async_lcp_peek+0x5b0/0x5b0 kernel/drivers/net/ppp/ppp_async.c:1000
[<ffffffff82c33239>] tty_ldisc_close.isra.1+0x99/0xe0 kernel/drivers/tty/tty_ldisc.c:478
[<ffffffff82c332c0>] tty_ldisc_kill+0x40/0x170 kernel/drivers/tty/tty_ldisc.c:744
[<ffffffff82c34943>] tty_ldisc_release+0x1b3/0x260 kernel/drivers/tty/tty_ldisc.c:772
[<ffffffff82c1ef21>] tty_release+0xac1/0x13e0 kernel/drivers/tty/tty_io.c:1901
[<ffffffff82c1e460>] ? release_tty+0x320/0x320 kernel/drivers/tty/tty_io.c:1688
[<ffffffff8174de36>] __fput+0x236/0x780 kernel/fs/file_table.c:208
[<ffffffff8174e405>] ____fput+0x15/0x20 kernel/fs/file_table.c:244
[<ffffffff813595ab>] task_work_run+0x16b/0x200 kernel/kernel/task_work.c:115
[< inline >] exit_task_work kernel/include/linux/task_work.h:21
[<ffffffff81307105>] do_exit+0x8b5/0x2c60 kernel/kernel/exit.c:750
[<ffffffff813fdd20>] ? debug_check_no_locks_freed+0x290/0x290 kernel/kernel/locking/lockdep.c:4123
[<ffffffff81306850>] ? mm_update_next_owner+0x6f0/0x6f0 kernel/kernel/exit.c:357
[<ffffffff813215e6>] ? __dequeue_signal+0x136/0x470 kernel/kernel/signal.c:550
[<ffffffff8132067b>] ? recalc_sigpending_tsk+0x13b/0x180 kernel/kernel/signal.c:145
[<ffffffff81309628>] do_group_exit+0x108/0x330 kernel/kernel/exit.c:880
[<ffffffff8132b9d4>] get_signal+0x5e4/0x14f0 kernel/kernel/signal.c:2307
[< inline >] ? kretprobe_table_lock kernel/kernel/kprobes.c:1113
[<ffffffff8151d355>] ? kprobe_flush_task+0xb5/0x450 kernel/kernel/kprobes.c:1158
[<ffffffff8115f7d3>] do_signal+0x83/0x1c90 kernel/arch/x86/kernel/signal.c:712
[<ffffffff8151d2a0>] ? recycle_rp_inst+0x310/0x310 kernel/include/linux/list.h:655
[<ffffffff8115f750>] ? setup_sigcontext+0x780/0x780 kernel/arch/x86/kernel/signal.c:165
[<ffffffff81380864>] ? finish_task_switch+0x424/0x5f0 kernel/kernel/sched/core.c:2692
[< inline >] ? finish_lock_switch kernel/kernel/sched/sched.h:1099
[<ffffffff81380560>] ? finish_task_switch+0x120/0x5f0 kernel/kernel/sched/core.c:2678
[< inline >] ? context_switch kernel/kernel/sched/core.c:2807
[<ffffffff85d794e9>] ? __schedule+0x919/0x1bd0 kernel/kernel/sched/core.c:3283
[<ffffffff81003901>] exit_to_usermode_loop+0xf1/0x1a0 kernel/arch/x86/entry/common.c:247
[< inline >] prepare_exit_to_usermode kernel/arch/x86/entry/common.c:282
[<ffffffff810062ef>] syscall_return_slowpath+0x19f/0x210 kernel/arch/x86/entry/common.c:344
[<ffffffff85d88022>] int_ret_from_sys_call+0x25/0x9f kernel/arch/x86/entry/entry_64.S:281
Memory state around the buggy address:
ffff880064e21680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880064e21700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff880064e21780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff880064e21800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff880064e21880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
Fixes: 273ec51dd7ce ("net: ppp_generic - introduce net-namespace functionality v2")
Reported-by: Baozeng Ding <sploving1@gmail.com>
Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net> | ppp_unregister_compressor(struct compressor *cp)
{
struct compressor_entry *ce;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(cp->compress_proto);
if (ce && ce->comp == cp) {
list_del(&ce->list);
kfree(ce);
}
spin_unlock(&compressor_list_lock);
}
| ppp_unregister_compressor(struct compressor *cp)
{
struct compressor_entry *ce;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(cp->compress_proto);
if (ce && ce->comp == cp) {
list_del(&ce->list);
kfree(ce);
}
spin_unlock(&compressor_list_lock);
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/0d04639df7a3b2425c671ab08f68eb32b3d04cb1 | 0d04639df7a3b2425c671ab08f68eb32b3d04cb1 | Fix crasher with autofill infobar.
The "save credit card" information was deleted by the AutofillManager when the form is submitted but was still pointed to by the TabContents, causing a crasher when navigating.
BUG=50428
TEST=See steps in 56238.
Review URL: http://codereview.chromium.org/3466006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@60095 0039d316-1c4b-4281-b951-d872f2087c98 | bool AutoFillCCInfoBarDelegate::ShouldExpire(
const NavigationController::LoadCommittedDetails& details) const {
return false;
}
| bool AutoFillCCInfoBarDelegate::ShouldExpire(
const NavigationController::LoadCommittedDetails& details) const {
return false;
}
| C | Chrome | 0 |
CVE-2013-1848 | https://www.cvedetails.com/cve/CVE-2013-1848/ | CWE-20 | https://github.com/torvalds/linux/commit/8d0c2d10dd72c5292eda7a06231056a4c972e4cc | 8d0c2d10dd72c5292eda7a06231056a4c972e4cc | ext3: Fix format string issues
ext3_msg() takes the printk prefix as the second parameter and the
format string as the third parameter. Two callers of ext3_msg omit the
prefix and pass the format string as the second parameter and the first
parameter to the format string as the third parameter. In both cases
this string comes from an arbitrary source. Which means the string may
contain format string characters, which will
lead to undefined and potentially harmful behavior.
The issue was introduced in commit 4cf46b67eb("ext3: Unify log messages
in ext3") and is fixed by this patch.
CC: stable@vger.kernel.org
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jan Kara <jack@suse.cz> | static void init_once(void *foo)
{
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT3_FS_XATTR
init_rwsem(&ei->xattr_sem);
#endif
mutex_init(&ei->truncate_mutex);
inode_init_once(&ei->vfs_inode);
}
| static void init_once(void *foo)
{
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
INIT_LIST_HEAD(&ei->i_orphan);
#ifdef CONFIG_EXT3_FS_XATTR
init_rwsem(&ei->xattr_sem);
#endif
mutex_init(&ei->truncate_mutex);
inode_init_once(&ei->vfs_inode);
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/a1ce1b69e269a7e61ea0bf0691b90be0cbe9b4c5 | a1ce1b69e269a7e61ea0bf0691b90be0cbe9b4c5 | 2009-05-04 Kai Brüning <kai@granus.net>
Reviewed by Eric Seidel.
https://bugs.webkit.org/show_bug.cgi?id=24883
24883: Bad success test in parseXMLDocumentFragment in XMLTokenizerLibxml2.cpp
Fixed test whether all the chunk has been processed to correctly count utf8 bytes.
Test: fast/innerHTML/innerHTML-nbsp.xhtml
* dom/XMLTokenizerLibxml2.cpp:
(WebCore::parseXMLDocumentFragment):
git-svn-id: svn://svn.chromium.org/blink/trunk@43195 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void XMLTokenizer::comment(const xmlChar* s)
{
if (m_parserStopped)
return;
if (m_parserPaused) {
m_pendingCallbacks->appendCommentCallback(s);
return;
}
exitText();
RefPtr<Node> newNode = new Comment(m_doc, toString(s));
m_currentNode->addChild(newNode.get());
if (m_view && !newNode->attached())
newNode->attach();
}
| void XMLTokenizer::comment(const xmlChar* s)
{
if (m_parserStopped)
return;
if (m_parserPaused) {
m_pendingCallbacks->appendCommentCallback(s);
return;
}
exitText();
RefPtr<Node> newNode = new Comment(m_doc, toString(s));
m_currentNode->addChild(newNode.get());
if (m_view && !newNode->attached())
newNode->attach();
}
| C | Chrome | 0 |
CVE-2011-4112 | https://www.cvedetails.com/cve/CVE-2011-4112/ | CWE-264 | https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162 | 550fd08c2cebad61c548def135f67aba284c6162 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Karsten Keil <isdn@linux-pingi.de>
CC: "David S. Miller" <davem@davemloft.net>
CC: Jay Vosburgh <fubar@us.ibm.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Krzysztof Halasa <khc@pm.waw.pl>
CC: "John W. Linville" <linville@tuxdriver.com>
CC: Greg Kroah-Hartman <gregkh@suse.de>
CC: Marcel Holtmann <marcel@holtmann.org>
CC: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int airo_get_rts(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq,
char *extra)
{
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
vwrq->value = le16_to_cpu(local->config.rtsThres);
vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
vwrq->fixed = 1;
return 0;
}
| static int airo_get_rts(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *vwrq,
char *extra)
{
struct airo_info *local = dev->ml_priv;
readConfigRid(local, 1);
vwrq->value = le16_to_cpu(local->config.rtsThres);
vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
vwrq->fixed = 1;
return 0;
}
| C | linux | 0 |
CVE-2014-9718 | https://www.cvedetails.com/cve/CVE-2014-9718/ | CWE-399 | https://git.qemu.org/?p=qemu.git;a=commit;h=3251bdcf1c67427d964517053c3d185b46e618e8 | 3251bdcf1c67427d964517053c3d185b46e618e8 | null | static void ide_rw_error(IDEState *s) {
ide_abort_command(s);
ide_set_irq(s->bus);
}
| static void ide_rw_error(IDEState *s) {
ide_abort_command(s);
ide_set_irq(s->bus);
}
| C | qemu | 0 |
CVE-2016-1683 | https://www.cvedetails.com/cve/CVE-2016-1683/ | CWE-119 | https://github.com/chromium/chromium/commit/96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab | 96dbafe288dbe2f0cc45fa3c39daf6d0c37acbab | Roll libxslt to 891681e3e948f31732229f53cb6db7215f740fc7
BUG=583156,583171
Review URL: https://codereview.chromium.org/1853083002
Cr-Commit-Position: refs/heads/master@{#385338} | xsltNumber(xsltTransformContextPtr ctxt, xmlNodePtr node,
xmlNodePtr inst, xsltStylePreCompPtr castedComp)
{
#ifdef XSLT_REFACTORED
xsltStyleItemNumberPtr comp = (xsltStyleItemNumberPtr) castedComp;
#else
xsltStylePreCompPtr comp = castedComp;
#endif
xmlXPathContextPtr xpctxt;
xmlNsPtr *oldXPNamespaces;
int oldXPNsNr;
if (comp == NULL) {
xsltTransformError(ctxt, NULL, inst,
"xsl:number : compilation failed\n");
return;
}
if ((ctxt == NULL) || (node == NULL) || (inst == NULL) || (comp == NULL))
return;
comp->numdata.doc = inst->doc;
comp->numdata.node = inst;
xpctxt = ctxt->xpathCtxt;
oldXPNsNr = xpctxt->nsNr;
oldXPNamespaces = xpctxt->namespaces;
#ifdef XSLT_REFACTORED
if (comp->inScopeNs != NULL) {
xpctxt->namespaces = comp->inScopeNs->list;
xpctxt->nsNr = comp->inScopeNs->xpathNumber;
} else {
xpctxt->namespaces = NULL;
xpctxt->nsNr = 0;
}
#else
xpctxt->namespaces = comp->nsList;
xpctxt->nsNr = comp->nsNr;
#endif
xsltNumberFormat(ctxt, &comp->numdata, node);
xpctxt->nsNr = oldXPNsNr;
xpctxt->namespaces = oldXPNamespaces;
}
| xsltNumber(xsltTransformContextPtr ctxt, xmlNodePtr node,
xmlNodePtr inst, xsltStylePreCompPtr castedComp)
{
#ifdef XSLT_REFACTORED
xsltStyleItemNumberPtr comp = (xsltStyleItemNumberPtr) castedComp;
#else
xsltStylePreCompPtr comp = castedComp;
#endif
if (comp == NULL) {
xsltTransformError(ctxt, NULL, inst,
"xsl:number : compilation failed\n");
return;
}
if ((ctxt == NULL) || (node == NULL) || (inst == NULL) || (comp == NULL))
return;
comp->numdata.doc = inst->doc;
comp->numdata.node = inst;
xsltNumberFormat(ctxt, &comp->numdata, node);
}
| C | Chrome | 1 |
CVE-2014-3160 | https://www.cvedetails.com/cve/CVE-2014-3160/ | CWE-264 | https://github.com/chromium/chromium/commit/ee281f7cac9df44fe241a37f188b28be8845ded0 | ee281f7cac9df44fe241a37f188b28be8845ded0 | Enforce SVG image security rules
SVG images have unique security rules that prevent them from loading
any external resources. This patch enforces these rules in
ResourceFetcher::canRequest for all non-data-uri resources. This locks
down our SVG resource handling and fixes two security bugs.
In the case of SVG images that reference other images, we had a bug
where a cached subresource would be used directly from the cache.
This has been fixed because the canRequest check occurs before we use
cached resources.
In the case of SVG images that use CSS imports, we had a bug where
imports were blindly requested. This has been fixed by stopping all
non-data-uri requests in SVG images.
With this patch we now match Gecko's behavior on both testcases.
BUG=380885, 382296
Review URL: https://codereview.chromium.org/320763002
git-svn-id: svn://svn.chromium.org/blink/trunk@176084 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | static ResourceRequest::TargetType requestTargetType(const ResourceFetcher* fetcher, const ResourceRequest& request, Resource::Type type)
{
switch (type) {
case Resource::MainResource:
if (fetcher->frame()->tree().parent())
return ResourceRequest::TargetIsSubframe;
return ResourceRequest::TargetIsMainFrame;
case Resource::XSLStyleSheet:
ASSERT(RuntimeEnabledFeatures::xsltEnabled());
case Resource::CSSStyleSheet:
return ResourceRequest::TargetIsStyleSheet;
case Resource::Script:
return ResourceRequest::TargetIsScript;
case Resource::Font:
return ResourceRequest::TargetIsFont;
case Resource::Image:
return ResourceRequest::TargetIsImage;
case Resource::Raw:
case Resource::ImportResource:
return ResourceRequest::TargetIsSubresource;
case Resource::LinkPrefetch:
return ResourceRequest::TargetIsPrefetch;
case Resource::LinkSubresource:
return ResourceRequest::TargetIsSubresource;
case Resource::TextTrack:
return ResourceRequest::TargetIsTextTrack;
case Resource::SVGDocument:
return ResourceRequest::TargetIsImage;
case Resource::Media:
return ResourceRequest::TargetIsMedia;
}
ASSERT_NOT_REACHED();
return ResourceRequest::TargetIsSubresource;
}
| static ResourceRequest::TargetType requestTargetType(const ResourceFetcher* fetcher, const ResourceRequest& request, Resource::Type type)
{
switch (type) {
case Resource::MainResource:
if (fetcher->frame()->tree().parent())
return ResourceRequest::TargetIsSubframe;
return ResourceRequest::TargetIsMainFrame;
case Resource::XSLStyleSheet:
ASSERT(RuntimeEnabledFeatures::xsltEnabled());
case Resource::CSSStyleSheet:
return ResourceRequest::TargetIsStyleSheet;
case Resource::Script:
return ResourceRequest::TargetIsScript;
case Resource::Font:
return ResourceRequest::TargetIsFont;
case Resource::Image:
return ResourceRequest::TargetIsImage;
case Resource::Raw:
case Resource::ImportResource:
return ResourceRequest::TargetIsSubresource;
case Resource::LinkPrefetch:
return ResourceRequest::TargetIsPrefetch;
case Resource::LinkSubresource:
return ResourceRequest::TargetIsSubresource;
case Resource::TextTrack:
return ResourceRequest::TargetIsTextTrack;
case Resource::SVGDocument:
return ResourceRequest::TargetIsImage;
case Resource::Media:
return ResourceRequest::TargetIsMedia;
}
ASSERT_NOT_REACHED();
return ResourceRequest::TargetIsSubresource;
}
| C | Chrome | 0 |
CVE-2011-2349 | https://www.cvedetails.com/cve/CVE-2011-2349/ | CWE-399 | https://github.com/chromium/chromium/commit/e755d9faf5c7d75a8ea290892cb1b5cc07c412ec | e755d9faf5c7d75a8ea290892cb1b5cc07c412ec | cros: The next 100 clang plugin errors.
BUG=none
TEST=none
TBR=dpolukhin
Review URL: http://codereview.chromium.org/7022008
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85418 0039d316-1c4b-4281-b951-d872f2087c98 | void OfflineLoadPage::CommandReceived(const std::string& cmd) {
std::string command(cmd);
if (command.length() > 1 && command[0] == '"') {
command = command.substr(1, command.length() - 2);
}
if (command == "proceed") {
Proceed();
} else if (command == "dontproceed") {
DontProceed();
} else if (command == "open_network_settings") {
Browser* browser = BrowserList::GetLastActive();
DCHECK(browser);
browser->ShowOptionsTab(chrome::kInternetOptionsSubPage);
} else if (command == "open_activate_broadband") {
Browser* browser = BrowserList::GetLastActive();
DCHECK(browser);
browser->OpenMobilePlanTabAndActivate();
} else {
LOG(WARNING) << "Unknown command:" << cmd;
}
}
| void OfflineLoadPage::CommandReceived(const std::string& cmd) {
std::string command(cmd);
if (command.length() > 1 && command[0] == '"') {
command = command.substr(1, command.length() - 2);
}
if (command == "proceed") {
Proceed();
} else if (command == "dontproceed") {
DontProceed();
} else if (command == "open_network_settings") {
Browser* browser = BrowserList::GetLastActive();
DCHECK(browser);
browser->ShowOptionsTab(chrome::kInternetOptionsSubPage);
} else if (command == "open_activate_broadband") {
Browser* browser = BrowserList::GetLastActive();
DCHECK(browser);
browser->OpenMobilePlanTabAndActivate();
} else {
LOG(WARNING) << "Unknown command:" << cmd;
}
}
| C | Chrome | 0 |
CVE-2015-1278 | https://www.cvedetails.com/cve/CVE-2015-1278/ | CWE-254 | https://github.com/chromium/chromium/commit/784f56a9c97a838448dd23f9bdc7c05fe8e639b3 | 784f56a9c97a838448dd23f9bdc7c05fe8e639b3 | Correctly reset FP in RFHI whenever origin changes
Bug: 713364
Change-Id: Id8bb923750e20f3db6fc9358b1d44120513ac95f
CQ_INCLUDE_TRYBOTS=master.tryserver.chromium.linux:linux_site_isolation
Change-Id: Id8bb923750e20f3db6fc9358b1d44120513ac95f
Reviewed-on: https://chromium-review.googlesource.com/482380
Commit-Queue: Ian Clelland <iclelland@chromium.org>
Reviewed-by: Charles Reis <creis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#466778} | void RenderFrameHostImpl::SetAccessibilityCallbackForTesting(
const base::Callback<void(RenderFrameHostImpl*, ui::AXEvent, int)>&
callback) {
accessibility_testing_callback_ = callback;
}
| void RenderFrameHostImpl::SetAccessibilityCallbackForTesting(
const base::Callback<void(RenderFrameHostImpl*, ui::AXEvent, int)>&
callback) {
accessibility_testing_callback_ = callback;
}
| C | Chrome | 0 |
CVE-2017-18120 | https://www.cvedetails.com/cve/CVE-2017-18120/ | CWE-415 | https://github.com/kohler/gifsicle/commit/118a46090c50829dc543179019e6140e1235f909 | 118a46090c50829dc543179019e6140e1235f909 | gif_read: Set last_name = NULL unconditionally.
With a non-malicious GIF, last_name is set to NULL when a name
extension is followed by an image. Reported in #117, via
Debian, via a KAIST fuzzing program. | read_image_data(Gif_Context *gfc, Gif_Reader *grr)
{
/* we need a bit more than GIF_MAX_BLOCK in case a single code is split
across blocks */
uint8_t buffer[GIF_MAX_BLOCK + 5];
int i;
uint32_t accum;
int bit_position;
int bit_length;
Gif_Code code;
Gif_Code old_code;
Gif_Code clear_code;
Gif_Code eoi_code;
Gif_Code next_code;
#define CUR_BUMP_CODE (1 << bits_needed)
#define CUR_CODE_MASK ((1 << bits_needed) - 1)
int min_code_size;
int bits_needed;
gfc->decodepos = 0;
min_code_size = gifgetbyte(grr);
GIF_DEBUG(("\n\nmin_code_size(%d) ", min_code_size));
if (min_code_size >= GIF_MAX_CODE_BITS) {
gif_read_error(gfc, 1, "image corrupted, min_code_size too big");
min_code_size = GIF_MAX_CODE_BITS - 1;
} else if (min_code_size < 2) {
gif_read_error(gfc, 1, "image corrupted, min_code_size too small");
min_code_size = 2;
}
clear_code = 1 << min_code_size;
for (code = 0; code < clear_code; code++) {
gfc->prefix[code] = 49428;
gfc->suffix[code] = (uint8_t)code;
gfc->length[code] = 1;
}
eoi_code = clear_code + 1;
next_code = eoi_code;
bits_needed = min_code_size + 1;
code = clear_code;
bit_length = bit_position = 0;
/* Thus the 'Read in the next data block.' code below will be invoked on the
first time through: exactly right! */
while (1) {
old_code = code;
/* GET A CODE INTO THE 'code' VARIABLE.
*
* 9.Dec.1998 - Rather than maintain a byte pointer and a bit offset into
* the current byte (and the processing associated with that), we maintain
* one number: the offset, in bits, from the beginning of 'buffer'. This
* much cleaner choice was inspired by Patrick J. Naughton
* <naughton@wind.sun.com>'s GIF-reading code, which does the same thing.
* His code distributed as part of XV in xvgif.c. */
if (bit_position + bits_needed > bit_length)
/* Read in the next data block. */
if (!read_image_block(grr, buffer, &bit_position, &bit_length,
bits_needed))
goto zero_length_block;
i = bit_position / 8;
accum = buffer[i] + (buffer[i+1] << 8);
if (bits_needed >= 8)
accum |= (buffer[i+2]) << 16;
code = (Gif_Code)((accum >> (bit_position % 8)) & CUR_CODE_MASK);
bit_position += bits_needed;
GIF_DEBUG(("%d ", code));
/* CHECK FOR SPECIAL OR BAD CODES: clear_code, eoi_code, or a code that is
* too large. */
if (code == clear_code) {
GIF_DEBUG(("clear "));
bits_needed = min_code_size + 1;
next_code = eoi_code;
continue;
} else if (code == eoi_code)
break;
else if (code > next_code && next_code && next_code != clear_code) {
/* code > next_code: a (hopefully recoverable) error.
Bug fix, 5/27: Do this even if old_code == clear_code, and set code
to 0 to prevent errors later. (If we didn't zero code, we'd later set
old_code = code; then we had old_code >= next_code; so the prefixes
array got all screwed up!)
Bug fix, 4/12/2010: It is not an error if next_code == clear_code.
This happens at the end of a large GIF: see the next comment ("If no
meaningful next code should be defined...."). */
if (gfc->errors[1] < 20)
gif_read_error(gfc, 1, "image corrupted, code out of range");
else if (gfc->errors[1] == 20)
gif_read_error(gfc, 1, "(not reporting more errors)");
code = 0;
}
/* PROCESS THE CURRENT CODE and define the next code. If no meaningful
* next code should be defined, then we have set next_code to either
* 'eoi_code' or 'clear_code' -- so we'll store useless prefix/suffix data
* in a useless place. */
/* *First,* set up the prefix and length for the next code
(in case code == next_code). */
gfc->prefix[next_code] = old_code;
gfc->length[next_code] = gfc->length[old_code] + 1;
/* Use one_code to process code. It's nice that it returns the first
pixel in code: that's what we need. */
gfc->suffix[next_code] = one_code(gfc, code);
/* Special processing if code == next_code: we didn't know code's final
suffix when we called one_code, but we do now. */
/* 7.Mar.2014 -- Avoid error if image has zero width/height. */
if (code == next_code && gfc->image + gfc->decodepos <= gfc->maximage)
gfc->image[gfc->decodepos - 1] = gfc->suffix[next_code];
/* Increment next_code except for the 'clear_code' special case (that's
when we're reading at the end of a GIF) */
if (next_code != clear_code) {
next_code++;
if (next_code == CUR_BUMP_CODE) {
if (bits_needed < GIF_MAX_CODE_BITS)
bits_needed++;
else
next_code = clear_code;
}
}
}
/* read blocks until zero-length reached. */
i = gifgetbyte(grr);
GIF_DEBUG(("\nafter_image(%d)\n", i));
while (i > 0) {
gifgetblock(buffer, i, grr);
i = gifgetbyte(grr);
GIF_DEBUG(("\nafter_image(%d)\n", i));
}
/* zero-length block reached. */
zero_length_block: {
long delta = (long) (gfc->maximage - gfc->image) - (long) gfc->decodepos;
char buf[BUFSIZ];
if (delta > 0) {
sprintf(buf, "missing %ld %s of image data", delta,
delta == 1 ? "pixel" : "pixels");
gif_read_error(gfc, 1, buf);
memset(&gfc->image[gfc->decodepos], 0, delta);
} else if (delta < -1) {
/* One pixel of superfluous data is OK; that could be the
code == next_code case. */
sprintf(buf, "%ld superfluous pixels of image data", -delta);
gif_read_error(gfc, 0, buf);
}
}
}
| read_image_data(Gif_Context *gfc, Gif_Reader *grr)
{
/* we need a bit more than GIF_MAX_BLOCK in case a single code is split
across blocks */
uint8_t buffer[GIF_MAX_BLOCK + 5];
int i;
uint32_t accum;
int bit_position;
int bit_length;
Gif_Code code;
Gif_Code old_code;
Gif_Code clear_code;
Gif_Code eoi_code;
Gif_Code next_code;
#define CUR_BUMP_CODE (1 << bits_needed)
#define CUR_CODE_MASK ((1 << bits_needed) - 1)
int min_code_size;
int bits_needed;
gfc->decodepos = 0;
min_code_size = gifgetbyte(grr);
GIF_DEBUG(("\n\nmin_code_size(%d) ", min_code_size));
if (min_code_size >= GIF_MAX_CODE_BITS) {
gif_read_error(gfc, 1, "image corrupted, min_code_size too big");
min_code_size = GIF_MAX_CODE_BITS - 1;
} else if (min_code_size < 2) {
gif_read_error(gfc, 1, "image corrupted, min_code_size too small");
min_code_size = 2;
}
clear_code = 1 << min_code_size;
for (code = 0; code < clear_code; code++) {
gfc->prefix[code] = 49428;
gfc->suffix[code] = (uint8_t)code;
gfc->length[code] = 1;
}
eoi_code = clear_code + 1;
next_code = eoi_code;
bits_needed = min_code_size + 1;
code = clear_code;
bit_length = bit_position = 0;
/* Thus the 'Read in the next data block.' code below will be invoked on the
first time through: exactly right! */
while (1) {
old_code = code;
/* GET A CODE INTO THE 'code' VARIABLE.
*
* 9.Dec.1998 - Rather than maintain a byte pointer and a bit offset into
* the current byte (and the processing associated with that), we maintain
* one number: the offset, in bits, from the beginning of 'buffer'. This
* much cleaner choice was inspired by Patrick J. Naughton
* <naughton@wind.sun.com>'s GIF-reading code, which does the same thing.
* His code distributed as part of XV in xvgif.c. */
if (bit_position + bits_needed > bit_length)
/* Read in the next data block. */
if (!read_image_block(grr, buffer, &bit_position, &bit_length,
bits_needed))
goto zero_length_block;
i = bit_position / 8;
accum = buffer[i] + (buffer[i+1] << 8);
if (bits_needed >= 8)
accum |= (buffer[i+2]) << 16;
code = (Gif_Code)((accum >> (bit_position % 8)) & CUR_CODE_MASK);
bit_position += bits_needed;
GIF_DEBUG(("%d ", code));
/* CHECK FOR SPECIAL OR BAD CODES: clear_code, eoi_code, or a code that is
* too large. */
if (code == clear_code) {
GIF_DEBUG(("clear "));
bits_needed = min_code_size + 1;
next_code = eoi_code;
continue;
} else if (code == eoi_code)
break;
else if (code > next_code && next_code && next_code != clear_code) {
/* code > next_code: a (hopefully recoverable) error.
Bug fix, 5/27: Do this even if old_code == clear_code, and set code
to 0 to prevent errors later. (If we didn't zero code, we'd later set
old_code = code; then we had old_code >= next_code; so the prefixes
array got all screwed up!)
Bug fix, 4/12/2010: It is not an error if next_code == clear_code.
This happens at the end of a large GIF: see the next comment ("If no
meaningful next code should be defined...."). */
if (gfc->errors[1] < 20)
gif_read_error(gfc, 1, "image corrupted, code out of range");
else if (gfc->errors[1] == 20)
gif_read_error(gfc, 1, "(not reporting more errors)");
code = 0;
}
/* PROCESS THE CURRENT CODE and define the next code. If no meaningful
* next code should be defined, then we have set next_code to either
* 'eoi_code' or 'clear_code' -- so we'll store useless prefix/suffix data
* in a useless place. */
/* *First,* set up the prefix and length for the next code
(in case code == next_code). */
gfc->prefix[next_code] = old_code;
gfc->length[next_code] = gfc->length[old_code] + 1;
/* Use one_code to process code. It's nice that it returns the first
pixel in code: that's what we need. */
gfc->suffix[next_code] = one_code(gfc, code);
/* Special processing if code == next_code: we didn't know code's final
suffix when we called one_code, but we do now. */
/* 7.Mar.2014 -- Avoid error if image has zero width/height. */
if (code == next_code && gfc->image + gfc->decodepos <= gfc->maximage)
gfc->image[gfc->decodepos - 1] = gfc->suffix[next_code];
/* Increment next_code except for the 'clear_code' special case (that's
when we're reading at the end of a GIF) */
if (next_code != clear_code) {
next_code++;
if (next_code == CUR_BUMP_CODE) {
if (bits_needed < GIF_MAX_CODE_BITS)
bits_needed++;
else
next_code = clear_code;
}
}
}
/* read blocks until zero-length reached. */
i = gifgetbyte(grr);
GIF_DEBUG(("\nafter_image(%d)\n", i));
while (i > 0) {
gifgetblock(buffer, i, grr);
i = gifgetbyte(grr);
GIF_DEBUG(("\nafter_image(%d)\n", i));
}
/* zero-length block reached. */
zero_length_block: {
long delta = (long) (gfc->maximage - gfc->image) - (long) gfc->decodepos;
char buf[BUFSIZ];
if (delta > 0) {
sprintf(buf, "missing %ld %s of image data", delta,
delta == 1 ? "pixel" : "pixels");
gif_read_error(gfc, 1, buf);
memset(&gfc->image[gfc->decodepos], 0, delta);
} else if (delta < -1) {
/* One pixel of superfluous data is OK; that could be the
code == next_code case. */
sprintf(buf, "%ld superfluous pixels of image data", -delta);
gif_read_error(gfc, 0, buf);
}
}
}
| C | gifsicle | 0 |
CVE-2012-5148 | https://www.cvedetails.com/cve/CVE-2012-5148/ | CWE-20 | https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599 | e89cfcb9090e8c98129ae9160c513f504db74599 | Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98 | void TabStripModel::NotifyIfActiveOrSelectionChanged(
TabContents* old_contents,
NotifyTypes notify_types,
const TabStripSelectionModel& old_model) {
NotifyIfActiveTabChanged(old_contents, notify_types);
if (!selection_model().Equals(old_model)) {
FOR_EACH_OBSERVER(TabStripModelObserver, observers_,
TabSelectionChanged(this, old_model));
}
}
| void TabStripModel::NotifyIfActiveOrSelectionChanged(
TabContents* old_contents,
NotifyTypes notify_types,
const TabStripSelectionModel& old_model) {
NotifyIfActiveTabChanged(old_contents, notify_types);
if (!selection_model().Equals(old_model)) {
FOR_EACH_OBSERVER(TabStripModelObserver, observers_,
TabSelectionChanged(this, old_model));
}
}
| C | Chrome | 0 |
CVE-2014-9888 | https://www.cvedetails.com/cve/CVE-2014-9888/ | CWE-264 | https://github.com/torvalds/linux/commit/0ea1ec713f04bdfac343c9702b21cd3a7c711826 | 0ea1ec713f04bdfac343c9702b21cd3a7c711826 | ARM: dma-mapping: don't allow DMA mappings to be marked executable
DMA mapping permissions were being derived from pgprot_kernel directly
without using PAGE_KERNEL. This causes them to be marked with executable
permission, which is not what we want. Fix this.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> | void __init init_dma_coherent_pool_size(unsigned long size)
{
/*
* Catch any attempt to set the pool size too late.
*/
BUG_ON(atomic_pool.vaddr);
/*
* Set architecture specific coherent pool size only if
* it has not been changed by kernel command line parameter.
*/
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
atomic_pool.size = size;
}
| void __init init_dma_coherent_pool_size(unsigned long size)
{
/*
* Catch any attempt to set the pool size too late.
*/
BUG_ON(atomic_pool.vaddr);
/*
* Set architecture specific coherent pool size only if
* it has not been changed by kernel command line parameter.
*/
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
atomic_pool.size = size;
}
| C | linux | 0 |
CVE-2019-5837 | https://www.cvedetails.com/cve/CVE-2019-5837/ | CWE-200 | https://github.com/chromium/chromium/commit/04aaacb936a08d70862d6d9d7e8354721ae46be8 | 04aaacb936a08d70862d6d9d7e8354721ae46be8 | Reland "AppCache: Add padding to cross-origin responses."
This is a reland of 85b389caa7d725cdd31f59e9a2b79ff54804b7b7
Initialized CacheRecord::padding_size to 0.
Original change's description:
> AppCache: Add padding to cross-origin responses.
>
> Bug: 918293
> Change-Id: I4f16640f06feac009d6bbbb624951da6d2669f6c
> Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1488059
> Commit-Queue: Staphany Park <staphany@chromium.org>
> Reviewed-by: Victor Costan <pwnall@chromium.org>
> Reviewed-by: Marijn Kruisselbrink <mek@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#644624}
Bug: 918293
Change-Id: Ie1d3f99c7e8a854d33255a4d66243da2ce16441c
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1539906
Reviewed-by: Victor Costan <pwnall@chromium.org>
Commit-Queue: Staphany Park <staphany@chromium.org>
Cr-Commit-Position: refs/heads/master@{#644719} | void Verify_FindMainResponseWithMultipleHits5() {
EXPECT_EQ(kFallbackTestUrl, delegate()->found_url_);
EXPECT_EQ(kManifestUrl2, delegate()->found_manifest_url_);
EXPECT_EQ(2, delegate()->found_cache_id_);
EXPECT_EQ(2, delegate()->found_group_id_);
EXPECT_FALSE(delegate()->found_entry_.has_response_id());
EXPECT_EQ(2 + kFallbackEntryIdOffset,
delegate()->found_fallback_entry_.response_id());
EXPECT_TRUE(delegate()->found_fallback_entry_.IsFallback());
EXPECT_EQ(kEntryUrl2, delegate()->found_namespace_entry_url_);
TestFinished();
}
| void Verify_FindMainResponseWithMultipleHits5() {
EXPECT_EQ(kFallbackTestUrl, delegate()->found_url_);
EXPECT_EQ(kManifestUrl2, delegate()->found_manifest_url_);
EXPECT_EQ(2, delegate()->found_cache_id_);
EXPECT_EQ(2, delegate()->found_group_id_);
EXPECT_FALSE(delegate()->found_entry_.has_response_id());
EXPECT_EQ(2 + kFallbackEntryIdOffset,
delegate()->found_fallback_entry_.response_id());
EXPECT_TRUE(delegate()->found_fallback_entry_.IsFallback());
EXPECT_EQ(kEntryUrl2, delegate()->found_namespace_entry_url_);
TestFinished();
}
| C | Chrome | 0 |
CVE-2017-16527 | https://www.cvedetails.com/cve/CVE-2017-16527/ | CWE-416 | https://github.com/torvalds/linux/commit/124751d5e63c823092060074bd0abaae61aaa9c4 | 124751d5e63c823092060074bd0abaae61aaa9c4 | ALSA: usb-audio: Kill stray URB at exiting
USB-audio driver may leave a stray URB for the mixer interrupt when it
exits by some error during probe. This leads to a use-after-free
error as spotted by syzkaller like:
==================================================================
BUG: KASAN: use-after-free in snd_usb_mixer_interrupt+0x604/0x6f0
Call Trace:
<IRQ>
__dump_stack lib/dump_stack.c:16
dump_stack+0x292/0x395 lib/dump_stack.c:52
print_address_description+0x78/0x280 mm/kasan/report.c:252
kasan_report_error mm/kasan/report.c:351
kasan_report+0x23d/0x350 mm/kasan/report.c:409
__asan_report_load8_noabort+0x19/0x20 mm/kasan/report.c:430
snd_usb_mixer_interrupt+0x604/0x6f0 sound/usb/mixer.c:2490
__usb_hcd_giveback_urb+0x2e0/0x650 drivers/usb/core/hcd.c:1779
....
Allocated by task 1484:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551
kmem_cache_alloc_trace+0x11e/0x2d0 mm/slub.c:2772
kmalloc ./include/linux/slab.h:493
kzalloc ./include/linux/slab.h:666
snd_usb_create_mixer+0x145/0x1010 sound/usb/mixer.c:2540
create_standard_mixer_quirk+0x58/0x80 sound/usb/quirks.c:516
snd_usb_create_quirk+0x92/0x100 sound/usb/quirks.c:560
create_composite_quirk+0x1c4/0x3e0 sound/usb/quirks.c:59
snd_usb_create_quirk+0x92/0x100 sound/usb/quirks.c:560
usb_audio_probe+0x1040/0x2c10 sound/usb/card.c:618
....
Freed by task 1484:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:524
slab_free_hook mm/slub.c:1390
slab_free_freelist_hook mm/slub.c:1412
slab_free mm/slub.c:2988
kfree+0xf6/0x2f0 mm/slub.c:3919
snd_usb_mixer_free+0x11a/0x160 sound/usb/mixer.c:2244
snd_usb_mixer_dev_free+0x36/0x50 sound/usb/mixer.c:2250
__snd_device_free+0x1ff/0x380 sound/core/device.c:91
snd_device_free_all+0x8f/0xe0 sound/core/device.c:244
snd_card_do_free sound/core/init.c:461
release_card_device+0x47/0x170 sound/core/init.c:181
device_release+0x13f/0x210 drivers/base/core.c:814
....
Actually such a URB is killed properly at disconnection when the
device gets probed successfully, and what we need is to apply it for
the error-path, too.
In this patch, we apply snd_usb_mixer_disconnect() at releasing.
Also introduce a new flag, disconnected, to struct usb_mixer_interface
for not performing the disconnection procedure twice.
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN)
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = cval->channels;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN) {
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
} else {
if (!cval->initialized) {
get_min_max_with_quirks(cval, 0, kcontrol);
if (cval->initialized && cval->dBmin >= cval->dBmax) {
kcontrol->vd[0].access &=
~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
snd_ctl_notify(cval->head.mixer->chip->card,
SNDRV_CTL_EVENT_MASK_INFO,
&kcontrol->id);
}
}
uinfo->value.integer.min = 0;
uinfo->value.integer.max =
(cval->max - cval->min + cval->res - 1) / cval->res;
}
return 0;
}
| static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN)
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = cval->channels;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN) {
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
} else {
if (!cval->initialized) {
get_min_max_with_quirks(cval, 0, kcontrol);
if (cval->initialized && cval->dBmin >= cval->dBmax) {
kcontrol->vd[0].access &=
~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
snd_ctl_notify(cval->head.mixer->chip->card,
SNDRV_CTL_EVENT_MASK_INFO,
&kcontrol->id);
}
}
uinfo->value.integer.min = 0;
uinfo->value.integer.max =
(cval->max - cval->min + cval->res - 1) / cval->res;
}
return 0;
}
| C | linux | 0 |
CVE-2016-2428 | https://www.cvedetails.com/cve/CVE-2016-2428/ | CWE-119 | https://android.googlesource.com/platform/external/aac/+/5d4405f601fa11a8955fd7611532c982420e4206 | 5d4405f601fa11a8955fd7611532c982420e4206 | Fix stack corruption happening in aacDecoder_drcExtractAndMap()
In the aacDecoder_drcExtractAndMap() function, self->numThreads
can be used after having exceeded its intended max value,
MAX_DRC_THREADS, causing memory to be cleared after the
threadBs[MAX_DRC_THREADS] array.
The crash is prevented by never using self->numThreads with
a value equal to or greater than MAX_DRC_THREADS.
A proper fix will be required as there seems to be an issue as
to which entry in the threadBs array is meant to be initialized
and used.
Bug 26751339
Change-Id: I655cc40c35d4206ab72e83b2bdb751be2fe52b5a
| void aacDecoder_drcInitChannelData (
CDrcChannelData *pDrcChData )
{
if (pDrcChData != NULL) {
pDrcChData->expiryCount = 0;
pDrcChData->numBands = 1;
pDrcChData->bandTop[0] = (1024 >> 2) - 1;
pDrcChData->drcValue[0] = 0;
pDrcChData->drcInterpolationScheme = 0;
pDrcChData->drcDataType = UNKNOWN_PAYLOAD;
}
}
| void aacDecoder_drcInitChannelData (
CDrcChannelData *pDrcChData )
{
if (pDrcChData != NULL) {
pDrcChData->expiryCount = 0;
pDrcChData->numBands = 1;
pDrcChData->bandTop[0] = (1024 >> 2) - 1;
pDrcChData->drcValue[0] = 0;
pDrcChData->drcInterpolationScheme = 0;
pDrcChData->drcDataType = UNKNOWN_PAYLOAD;
}
}
| C | Android | 0 |
CVE-2017-12146 | https://www.cvedetails.com/cve/CVE-2017-12146/ | CWE-362 | https://github.com/torvalds/linux/commit/6265539776a0810b7ce6398c27866ddb9c6bd154 | 6265539776a0810b7ce6398c27866ddb9c6bd154 | driver core: platform: fix race condition with driver_override
The driver_override implementation is susceptible to race condition when
different threads are reading vs storing a different driver override.
Add locking to avoid race condition.
Fixes: 3d713e0e382e ("driver core: platform: add device binding path 'driver_override'")
Cc: stable@vger.kernel.org
Signed-off-by: Adrian Salido <salidoa@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | int platform_device_add(struct platform_device *pdev)
{
int i, ret;
if (!pdev)
return -EINVAL;
if (!pdev->dev.parent)
pdev->dev.parent = &platform_bus;
pdev->dev.bus = &platform_bus_type;
switch (pdev->id) {
default:
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
break;
case PLATFORM_DEVID_NONE:
dev_set_name(&pdev->dev, "%s", pdev->name);
break;
case PLATFORM_DEVID_AUTO:
/*
* Automatically allocated device ID. We mark it as such so
* that we remember it must be freed, and we append a suffix
* to avoid namespace collision with explicit IDs.
*/
ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
pdev->id = ret;
pdev->id_auto = true;
dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
break;
}
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
r->name = dev_name(&pdev->dev);
p = r->parent;
if (!p) {
if (resource_type(r) == IORESOURCE_MEM)
p = &iomem_resource;
else if (resource_type(r) == IORESOURCE_IO)
p = &ioport_resource;
}
if (p && insert_resource(p, r)) {
dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
ret = -EBUSY;
goto failed;
}
}
pr_debug("Registering platform device '%s'. Parent at %s\n",
dev_name(&pdev->dev), dev_name(pdev->dev.parent));
ret = device_add(&pdev->dev);
if (ret == 0)
return ret;
failed:
if (pdev->id_auto) {
ida_simple_remove(&platform_devid_ida, pdev->id);
pdev->id = PLATFORM_DEVID_AUTO;
}
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
}
err_out:
return ret;
}
| int platform_device_add(struct platform_device *pdev)
{
int i, ret;
if (!pdev)
return -EINVAL;
if (!pdev->dev.parent)
pdev->dev.parent = &platform_bus;
pdev->dev.bus = &platform_bus_type;
switch (pdev->id) {
default:
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
break;
case PLATFORM_DEVID_NONE:
dev_set_name(&pdev->dev, "%s", pdev->name);
break;
case PLATFORM_DEVID_AUTO:
/*
* Automatically allocated device ID. We mark it as such so
* that we remember it must be freed, and we append a suffix
* to avoid namespace collision with explicit IDs.
*/
ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
pdev->id = ret;
pdev->id_auto = true;
dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
break;
}
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
r->name = dev_name(&pdev->dev);
p = r->parent;
if (!p) {
if (resource_type(r) == IORESOURCE_MEM)
p = &iomem_resource;
else if (resource_type(r) == IORESOURCE_IO)
p = &ioport_resource;
}
if (p && insert_resource(p, r)) {
dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r);
ret = -EBUSY;
goto failed;
}
}
pr_debug("Registering platform device '%s'. Parent at %s\n",
dev_name(&pdev->dev), dev_name(pdev->dev.parent));
ret = device_add(&pdev->dev);
if (ret == 0)
return ret;
failed:
if (pdev->id_auto) {
ida_simple_remove(&platform_devid_ida, pdev->id);
pdev->id = PLATFORM_DEVID_AUTO;
}
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
}
err_out:
return ret;
}
| C | linux | 0 |
CVE-2018-6096 | https://www.cvedetails.com/cve/CVE-2018-6096/ | null | https://github.com/chromium/chromium/commit/36f801fdbec07d116a6f4f07bb363f10897d6a51 | 36f801fdbec07d116a6f4f07bb363f10897d6a51 | If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <nasko@chromium.org>
Reviewed-by: Philip Jägenstedt <foolip@chromium.org>
Commit-Queue: Avi Drissman <avi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533790} | RenderFrameImpl::CreateWorkerFetchContext() {
blink::WebServiceWorkerNetworkProvider* web_provider =
frame_->GetDocumentLoader()->GetServiceWorkerNetworkProvider();
DCHECK(web_provider);
ServiceWorkerNetworkProvider* provider =
ServiceWorkerNetworkProvider::FromWebServiceWorkerNetworkProvider(
web_provider);
mojom::ServiceWorkerWorkerClientRequest service_worker_client_request;
mojom::ServiceWorkerContainerHostPtrInfo container_host_ptr_info;
ServiceWorkerProviderContext* provider_context = provider->context();
if (provider_context) {
service_worker_client_request =
provider_context->CreateWorkerClientRequest();
if (ServiceWorkerUtils::IsServicificationEnabled())
container_host_ptr_info = provider_context->CloneContainerHostPtrInfo();
}
std::unique_ptr<WorkerFetchContextImpl> worker_fetch_context =
std::make_unique<WorkerFetchContextImpl>(
std::move(service_worker_client_request),
std::move(container_host_ptr_info), GetLoaderFactoryBundle()->Clone(),
GetContentClient()->renderer()->CreateURLLoaderThrottleProvider(
URLLoaderThrottleProviderType::kWorker));
worker_fetch_context->set_parent_frame_id(routing_id_);
worker_fetch_context->set_site_for_cookies(
frame_->GetDocument().SiteForCookies());
worker_fetch_context->set_is_secure_context(
frame_->GetDocument().IsSecureContext());
worker_fetch_context->set_service_worker_provider_id(provider->provider_id());
worker_fetch_context->set_is_controlled_by_service_worker(
provider->IsControlledByServiceWorker());
worker_fetch_context->set_origin_url(
GURL(frame_->GetDocument().Url()).GetOrigin());
{
SCOPED_UMA_HISTOGRAM_TIMER(
"RenderFrameObservers.WillCreateWorkerFetchContext");
for (auto& observer : observers_)
observer.WillCreateWorkerFetchContext(worker_fetch_context.get());
}
return std::move(worker_fetch_context);
}
| RenderFrameImpl::CreateWorkerFetchContext() {
blink::WebServiceWorkerNetworkProvider* web_provider =
frame_->GetDocumentLoader()->GetServiceWorkerNetworkProvider();
DCHECK(web_provider);
ServiceWorkerNetworkProvider* provider =
ServiceWorkerNetworkProvider::FromWebServiceWorkerNetworkProvider(
web_provider);
mojom::ServiceWorkerWorkerClientRequest service_worker_client_request;
mojom::ServiceWorkerContainerHostPtrInfo container_host_ptr_info;
ServiceWorkerProviderContext* provider_context = provider->context();
if (provider_context) {
service_worker_client_request =
provider_context->CreateWorkerClientRequest();
if (ServiceWorkerUtils::IsServicificationEnabled())
container_host_ptr_info = provider_context->CloneContainerHostPtrInfo();
}
std::unique_ptr<WorkerFetchContextImpl> worker_fetch_context =
std::make_unique<WorkerFetchContextImpl>(
std::move(service_worker_client_request),
std::move(container_host_ptr_info), GetLoaderFactoryBundle()->Clone(),
GetContentClient()->renderer()->CreateURLLoaderThrottleProvider(
URLLoaderThrottleProviderType::kWorker));
worker_fetch_context->set_parent_frame_id(routing_id_);
worker_fetch_context->set_site_for_cookies(
frame_->GetDocument().SiteForCookies());
worker_fetch_context->set_is_secure_context(
frame_->GetDocument().IsSecureContext());
worker_fetch_context->set_service_worker_provider_id(provider->provider_id());
worker_fetch_context->set_is_controlled_by_service_worker(
provider->IsControlledByServiceWorker());
worker_fetch_context->set_origin_url(
GURL(frame_->GetDocument().Url()).GetOrigin());
{
SCOPED_UMA_HISTOGRAM_TIMER(
"RenderFrameObservers.WillCreateWorkerFetchContext");
for (auto& observer : observers_)
observer.WillCreateWorkerFetchContext(worker_fetch_context.get());
}
return std::move(worker_fetch_context);
}
| C | Chrome | 0 |
CVE-2018-17471 | https://www.cvedetails.com/cve/CVE-2018-17471/ | CWE-20 | https://github.com/chromium/chromium/commit/d18c519758c2e6043f0e1f00e2b69a55b3d7997f | d18c519758c2e6043f0e1f00e2b69a55b3d7997f | Security drop fullscreen for any nested WebContents level.
This relands 3dcaec6e30feebefc11e with a fix to the test.
BUG=873080
TEST=as in bug
Change-Id: Ie68b197fc6b92447e9633f233354a68fefcf20c7
Reviewed-on: https://chromium-review.googlesource.com/1175925
Reviewed-by: Sidney San Martín <sdy@chromium.org>
Commit-Queue: Avi Drissman <avi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#583335} | ForwardingAudioStreamFactory* WebContentsImpl::GetAudioStreamFactory() {
if (!audio_stream_factory_) {
audio_stream_factory_.emplace(
this,
content::ServiceManagerConnection::GetForProcess()
->GetConnector()
->Clone(),
AudioStreamBrokerFactory::CreateImpl());
}
return &*audio_stream_factory_;
}
| ForwardingAudioStreamFactory* WebContentsImpl::GetAudioStreamFactory() {
if (!audio_stream_factory_) {
audio_stream_factory_.emplace(
this,
content::ServiceManagerConnection::GetForProcess()
->GetConnector()
->Clone(),
AudioStreamBrokerFactory::CreateImpl());
}
return &*audio_stream_factory_;
}
| C | Chrome | 0 |
CVE-2015-2698 | https://www.cvedetails.com/cve/CVE-2015-2698/ | CWE-119 | https://github.com/krb5/krb5/commit/3db8dfec1ef50ddd78d6ba9503185995876a39fd | 3db8dfec1ef50ddd78d6ba9503185995876a39fd | Fix IAKERB context export/import [CVE-2015-2698]
The patches for CVE-2015-2696 contained a regression in the newly
added IAKERB iakerb_gss_export_sec_context() function, which could
cause it to corrupt memory. Fix the regression by properly
dereferencing the context_handle pointer before casting it.
Also, the patches did not implement an IAKERB gss_import_sec_context()
function, under the erroneous belief that an exported IAKERB context
would be tagged as a krb5 context. Implement it now to allow IAKERB
contexts to be successfully exported and imported after establishment.
CVE-2015-2698:
In any MIT krb5 release with the patches for CVE-2015-2696 applied, an
application which calls gss_export_sec_context() may experience memory
corruption if the context was established using the IAKERB mechanism.
Historically, some vulnerabilities of this nature can be translated
into remote code execution, though the necessary exploits must be
tailored to the individual application and are usually quite
complicated.
CVSSv2 Vector: AV:N/AC:H/Au:S/C:C/I:C/A:C/E:POC/RL:OF/RC:C
ticket: 8273 (new)
target_version: 1.14
tags: pullup | krb5_gss_inquire_sec_context_by_oid (OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
krb5_gss_ctx_id_rec *ctx;
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
ctx = (krb5_gss_ctx_id_rec *) context_handle;
if (ctx->terminated || !ctx->established)
return GSS_S_NO_CONTEXT;
for (i = 0; i < sizeof(krb5_gss_inquire_sec_context_by_oid_ops)/
sizeof(krb5_gss_inquire_sec_context_by_oid_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_sec_context_by_oid_ops[i].oid)) {
return (*krb5_gss_inquire_sec_context_by_oid_ops[i].func)(minor_status,
context_handle,
desired_object,
data_set);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
| krb5_gss_inquire_sec_context_by_oid (OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
krb5_gss_ctx_id_rec *ctx;
size_t i;
if (minor_status == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*minor_status = 0;
if (desired_object == GSS_C_NO_OID)
return GSS_S_CALL_INACCESSIBLE_READ;
if (data_set == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
*data_set = GSS_C_NO_BUFFER_SET;
ctx = (krb5_gss_ctx_id_rec *) context_handle;
if (ctx->terminated || !ctx->established)
return GSS_S_NO_CONTEXT;
for (i = 0; i < sizeof(krb5_gss_inquire_sec_context_by_oid_ops)/
sizeof(krb5_gss_inquire_sec_context_by_oid_ops[0]); i++) {
if (g_OID_prefix_equal(desired_object, &krb5_gss_inquire_sec_context_by_oid_ops[i].oid)) {
return (*krb5_gss_inquire_sec_context_by_oid_ops[i].func)(minor_status,
context_handle,
desired_object,
data_set);
}
}
*minor_status = EINVAL;
return GSS_S_UNAVAILABLE;
}
| C | krb5 | 0 |
CVE-2016-1671 | https://www.cvedetails.com/cve/CVE-2016-1671/ | CWE-22 | https://github.com/chromium/chromium/commit/9cfe470d793da6e09b966d435c8fa2ba1625d5fe | 9cfe470d793da6e09b966d435c8fa2ba1625d5fe | [base] Make dynamic container to static span conversion explicit
This change disallows implicit conversions from dynamic containers to
static spans. This conversion can cause CHECK failures, and thus should
be done carefully. Requiring explicit construction makes it more obvious
when this happens. To aid usability, appropriate base::make_span<size_t>
overloads are added.
Bug: 877931
Change-Id: Id9f526bc57bfd30a52d14df827b0445ca087381d
Reviewed-on: https://chromium-review.googlesource.com/1189985
Reviewed-by: Ryan Sleevi <rsleevi@chromium.org>
Reviewed-by: Balazs Engedy <engedy@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Commit-Queue: Jan Wilken Dörrie <jdoerrie@chromium.org>
Cr-Commit-Position: refs/heads/master@{#586657} | void GenerateSessionHashV1WithSessionSecurity(
base::span<const uint8_t, kChallengeLen> server_challenge,
base::span<const uint8_t, kChallengeLen> client_challenge,
base::span<uint8_t, kNtlmHashLen> session_hash) {
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, server_challenge.data(), kChallengeLen);
MD5_Update(&ctx, client_challenge.data(), kChallengeLen);
MD5_Final(session_hash.data(), &ctx);
}
| void GenerateSessionHashV1WithSessionSecurity(
base::span<const uint8_t, kChallengeLen> server_challenge,
base::span<const uint8_t, kChallengeLen> client_challenge,
base::span<uint8_t, kNtlmHashLen> session_hash) {
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, server_challenge.data(), kChallengeLen);
MD5_Update(&ctx, client_challenge.data(), kChallengeLen);
MD5_Final(session_hash.data(), &ctx);
}
| C | Chrome | 0 |
CVE-2017-14170 | https://www.cvedetails.com/cve/CVE-2017-14170/ | CWE-834 | https://github.com/FFmpeg/FFmpeg/commit/900f39692ca0337a98a7cf047e4e2611071810c2 | 900f39692ca0337a98a7cf047e4e2611071810c2 | avformat/mxfdec: Fix DoS issues in mxf_read_index_entry_array()
Fixes: 20170829A.mxf
Co-Author: 张洪亮(望初)" <wangchu.zhl@alibaba-inc.com>
Found-by: Xiaohei and Wangchu from Alibaba Security Team
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> | static int mxf_read_strong_ref_array(AVIOContext *pb, UID **refs, int *count)
{
*count = avio_rb32(pb);
*refs = av_calloc(*count, sizeof(UID));
if (!*refs) {
*count = 0;
return AVERROR(ENOMEM);
}
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)*refs, *count * sizeof(UID));
return 0;
}
| static int mxf_read_strong_ref_array(AVIOContext *pb, UID **refs, int *count)
{
*count = avio_rb32(pb);
*refs = av_calloc(*count, sizeof(UID));
if (!*refs) {
*count = 0;
return AVERROR(ENOMEM);
}
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)*refs, *count * sizeof(UID));
return 0;
}
| C | FFmpeg | 0 |
CVE-2019-5822 | https://www.cvedetails.com/cve/CVE-2019-5822/ | CWE-284 | https://github.com/chromium/chromium/commit/2f81d000fdb5331121cba7ff81dfaaec25b520a5 | 2f81d000fdb5331121cba7ff81dfaaec25b520a5 | When turning a download into a navigation, navigate the right frame
Code changes from Nate Chapin <japhet@chromium.org>
Bug: 926105
Change-Id: I098599394e6ebe7d2fce5af838014297a337d294
Reviewed-on: https://chromium-review.googlesource.com/c/1454962
Reviewed-by: Camille Lamy <clamy@chromium.org>
Commit-Queue: Jochen Eisinger <jochen@chromium.org>
Cr-Commit-Position: refs/heads/master@{#629547} | FakeSafeBrowsingService() : TestSafeBrowsingService() {}
| FakeSafeBrowsingService() : TestSafeBrowsingService() {}
| C | Chrome | 0 |
CVE-2017-12187 | https://www.cvedetails.com/cve/CVE-2017-12187/ | CWE-20 | https://cgit.freedesktop.org/xorg/xserver/commit/?id=cad5a1050b7184d828aef9c1dd151c3ab649d37e | cad5a1050b7184d828aef9c1dd151c3ab649d37e | null | ProcPseudoramiXGetState(ClientPtr client)
{
REQUEST(xPanoramiXGetStateReq);
WindowPtr pWin;
xPanoramiXGetStateReply rep;
register int rc;
TRACE;
REQUEST_SIZE_MATCH(xPanoramiXGetStateReq);
rc = dixLookupWindow(&pWin, stuff->window, client, DixGetAttrAccess);
if (rc != Success)
return rc;
rep.type = X_Reply;
rep.length = 0;
rep.sequenceNumber = client->sequence;
rep.state = !noPseudoramiXExtension;
rep.window = stuff->window;
if (client->swapped) {
swaps(&rep.sequenceNumber);
swapl(&rep.length);
swapl(&rep.window);
}
WriteToClient(client, sizeof(xPanoramiXGetStateReply),&rep);
return Success;
}
| ProcPseudoramiXGetState(ClientPtr client)
{
REQUEST(xPanoramiXGetStateReq);
WindowPtr pWin;
xPanoramiXGetStateReply rep;
register int rc;
TRACE;
REQUEST_SIZE_MATCH(xPanoramiXGetStateReq);
rc = dixLookupWindow(&pWin, stuff->window, client, DixGetAttrAccess);
if (rc != Success)
return rc;
rep.type = X_Reply;
rep.length = 0;
rep.sequenceNumber = client->sequence;
rep.state = !noPseudoramiXExtension;
rep.window = stuff->window;
if (client->swapped) {
swaps(&rep.sequenceNumber);
swapl(&rep.length);
swapl(&rep.window);
}
WriteToClient(client, sizeof(xPanoramiXGetStateReply),&rep);
return Success;
}
| C | xserver | 0 |
CVE-2014-9903 | https://www.cvedetails.com/cve/CVE-2014-9903/ | CWE-200 | https://github.com/torvalds/linux/commit/4efbc454ba68def5ef285b26ebfcfdb605b52755 | 4efbc454ba68def5ef285b26ebfcfdb605b52755 | sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <raistlin@linux.it>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1392585857-10725-1-git-send-email-vegard.nossum@oracle.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | void set_numabalancing_state(bool enabled)
{
if (enabled)
sched_feat_set("NUMA");
else
sched_feat_set("NO_NUMA");
}
| void set_numabalancing_state(bool enabled)
{
if (enabled)
sched_feat_set("NUMA");
else
sched_feat_set("NO_NUMA");
}
| C | linux | 0 |
CVE-2012-2669 | https://www.cvedetails.com/cve/CVE-2012-2669/ | CWE-20 | https://github.com/torvalds/linux/commit/bcc2c9c3fff859e0eb019fe6fec26f9b8eba795c | bcc2c9c3fff859e0eb019fe6fec26f9b8eba795c | Tools: hv: verify origin of netlink connector message
The SuSE security team suggested to use recvfrom instead of recv to be
certain that the connector message is originated from kernel.
CVE-2012-2669
Signed-off-by: Olaf Hering <olaf@aepfle.de>
Signed-off-by: Marcus Meissner <meissner@suse.de>
Signed-off-by: Sebastian Krahmer <krahmer@suse.de>
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | static void kvp_acquire_lock(int pool)
{
struct flock fl = {F_WRLCK, SEEK_SET, 0, 0, 0};
fl.l_pid = getpid();
if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) {
syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool);
exit(-1);
}
}
| static void kvp_acquire_lock(int pool)
{
struct flock fl = {F_WRLCK, SEEK_SET, 0, 0, 0};
fl.l_pid = getpid();
if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) {
syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool);
exit(-1);
}
}
| C | linux | 0 |
CVE-2018-18710 | https://www.cvedetails.com/cve/CVE-2018-18710/ | CWE-200 | https://github.com/torvalds/linux/commit/e4f3aa2e1e67bb48dfbaaf1cad59013d5a5bc276 | e4f3aa2e1e67bb48dfbaaf1cad59013d5a5bc276 | cdrom: fix improper type cast, which can leat to information leak.
There is another cast from unsigned long to int which causes
a bounds check to fail with specially crafted input. The value is
then used as an index in the slot array in cdrom_slot_status().
This issue is similar to CVE-2018-16658 and CVE-2018-10940.
Signed-off-by: Young_X <YangX92@hotmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk> | static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_volctrl volume;
cd_dbg(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&volume, argp, sizeof(volume)))
return -EFAULT;
return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume);
}
| static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
void __user *argp)
{
struct cdrom_volctrl volume;
cd_dbg(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
if (copy_from_user(&volume, argp, sizeof(volume)))
return -EFAULT;
return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume);
}
| C | linux | 0 |
CVE-2013-4587 | https://www.cvedetails.com/cve/CVE-2013-4587/ | CWE-20 | https://github.com/torvalds/linux/commit/338c7dbadd2671189cec7faf64c84d01071b3f96 | 338c7dbadd2671189cec7faf64c84d01071b3f96 | KVM: Improve create VCPU parameter (CVE-2013-4587)
In multiple functions the vcpu_id is used as an offset into a bitfield. Ag
malicious user could specify a vcpu_id greater than 255 in order to set or
clear bits in kernel memory. This could be used to elevate priveges in the
kernel. This patch verifies that the vcpu_id provided is less than 255.
The api documentation already specifies that the vcpu_id must be less than
max_vcpus, but this is currently not checked.
Reported-by: Andrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{
make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}
| void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{
make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}
| C | linux | 0 |
CVE-2018-16075 | https://www.cvedetails.com/cve/CVE-2018-16075/ | CWE-254 | https://github.com/chromium/chromium/commit/d913f72b4875cf0814fc3f03ad7c00642097c4a4 | d913f72b4875cf0814fc3f03ad7c00642097c4a4 | Remove RequireCSSExtensionForFile runtime enabled flag.
The feature has long since been stable (since M64) and doesn't seem
to be a need for this flag.
BUG=788936
Change-Id: I666390b869289c328acb4a2daa5bf4154e1702c0
Reviewed-on: https://chromium-review.googlesource.com/c/1324143
Reviewed-by: Mike West <mkwst@chromium.org>
Reviewed-by: Camille Lamy <clamy@chromium.org>
Commit-Queue: Dave Tapuska <dtapuska@chromium.org>
Cr-Commit-Position: refs/heads/master@{#607329} | bool CSSStyleSheetResource::CanUseSheet(const CSSParserContext* parser_context,
MIMETypeCheck mime_type_check) const {
if (ErrorOccurred())
return false;
KURL sheet_url = GetResponse().Url();
if (sheet_url.IsLocalFile()) {
if (parser_context) {
parser_context->Count(WebFeature::kLocalCSSFile);
}
String extension;
int last_dot = sheet_url.LastPathComponent().ReverseFind('.');
if (last_dot != -1)
extension = sheet_url.LastPathComponent().Substring(last_dot + 1);
if (!EqualIgnoringASCIICase(
MIMETypeRegistry::GetMIMETypeForExtension(extension), "text/css")) {
if (parser_context) {
parser_context->CountDeprecation(
WebFeature::kLocalCSSFileExtensionRejected);
}
return false;
}
}
if (mime_type_check == MIMETypeCheck::kLax)
return true;
AtomicString content_type = HttpContentType();
return content_type.IsEmpty() ||
DeprecatedEqualIgnoringCase(content_type, "text/css") ||
DeprecatedEqualIgnoringCase(content_type,
"application/x-unknown-content-type");
}
| bool CSSStyleSheetResource::CanUseSheet(const CSSParserContext* parser_context,
MIMETypeCheck mime_type_check) const {
if (ErrorOccurred())
return false;
KURL sheet_url = GetResponse().Url();
if (sheet_url.IsLocalFile()) {
if (parser_context) {
parser_context->Count(WebFeature::kLocalCSSFile);
}
String extension;
int last_dot = sheet_url.LastPathComponent().ReverseFind('.');
if (last_dot != -1)
extension = sheet_url.LastPathComponent().Substring(last_dot + 1);
if (!EqualIgnoringASCIICase(
MIMETypeRegistry::GetMIMETypeForExtension(extension), "text/css")) {
if (parser_context) {
parser_context->CountDeprecation(
WebFeature::kLocalCSSFileExtensionRejected);
}
if (RuntimeEnabledFeatures::RequireCSSExtensionForFileEnabled()) {
return false;
}
}
}
if (mime_type_check == MIMETypeCheck::kLax)
return true;
AtomicString content_type = HttpContentType();
return content_type.IsEmpty() ||
DeprecatedEqualIgnoringCase(content_type, "text/css") ||
DeprecatedEqualIgnoringCase(content_type,
"application/x-unknown-content-type");
}
| C | Chrome | 1 |
CVE-2017-5019 | https://www.cvedetails.com/cve/CVE-2017-5019/ | CWE-416 | https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93 | f03ea5a5c2ff26e239dfd23e263b15da2d9cee93 | Convert FrameHostMsg_DidAddMessageToConsole to Mojo.
Note: Since this required changing the test
RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually
re-introduced https://crbug.com/666714 locally (the bug the test was
added for), and reran the test to confirm that it still covers the bug.
Bug: 786836
Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270
Commit-Queue: Lowell Manners <lowell@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Camille Lamy <clamy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#653137} | void RenderFrameImpl::InitializeBlameContext(RenderFrameImpl* parent_frame) {
DCHECK(!blame_context_);
blame_context_ = std::make_unique<FrameBlameContext>(this, parent_frame);
blame_context_->Initialize();
}
| void RenderFrameImpl::InitializeBlameContext(RenderFrameImpl* parent_frame) {
DCHECK(!blame_context_);
blame_context_ = std::make_unique<FrameBlameContext>(this, parent_frame);
blame_context_->Initialize();
}
| C | Chrome | 0 |
CVE-2011-4324 | https://www.cvedetails.com/cve/CVE-2011-4324/ | null | https://github.com/torvalds/linux/commit/dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 | NFSv4: Convert the open and close ops to use fmode
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> | int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
{
set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
return 1;
}
| int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
{
set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
return 1;
}
| C | linux | 0 |
CVE-2011-4131 | https://www.cvedetails.com/cve/CVE-2011-4131/ | CWE-189 | https://github.com/torvalds/linux/commit/bf118a342f10dafe44b14451a1392c3254629a1f | bf118a342f10dafe44b14451a1392c3254629a1f | NFSv4: include bitmap in nfsv4 get acl data
The NFSv4 bitmap size is unbounded: a server can return an arbitrary
sized bitmap in an FATTR4_WORD0_ACL request. Replace using the
nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server
with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data
xdr length to the (cached) acl page data.
This is a general solution to commit e5012d1f "NFSv4.1: update
nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead
when getting ACLs.
Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr
was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved.
Cc: stable@kernel.org
Signed-off-by: Andy Adamson <andros@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> | static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *dummy)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_release_lockowner(xdr);
return status;
}
| static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *dummy)
{
struct compound_hdr hdr;
int status;
status = decode_compound_hdr(xdr, &hdr);
if (!status)
status = decode_release_lockowner(xdr);
return status;
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/19190765882e272a6a2162c89acdb29110f7e3cf | 19190765882e272a6a2162c89acdb29110f7e3cf | Revert 102184 - [Sync] use base::Time in sync
Make EntryKernel/Entry/BaseNode use base::Time instead of int64s.
Add sync/util/time.h, with utility functions to manage the sync proto
time format.
Store times on disk in proto format instead of the local system.
This requires a database version bump (to 77).
Update SessionChangeProcessor/SessionModelAssociator
to use base::Time, too.
Remove hackish Now() function.
Remove ZeroFields() function, and instead zero-initialize in EntryKernel::EntryKernel() directly.
BUG=
TEST=
Review URL: http://codereview.chromium.org/7981006
TBR=akalin@chromium.org
Review URL: http://codereview.chromium.org/7977034
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@102186 0039d316-1c4b-4281-b951-d872f2087c98 | int64 MakeNodeWithParent(UserShare* share,
ModelType model_type,
const std::string& client_tag,
int64 parent_id) {
WriteTransaction trans(FROM_HERE, share);
ReadNode parent_node(&trans);
EXPECT_TRUE(parent_node.InitByIdLookup(parent_id));
WriteNode node(&trans);
EXPECT_TRUE(node.InitUniqueByCreation(model_type, parent_node, client_tag));
node.SetIsFolder(false);
return node.GetId();
}
| int64 MakeNodeWithParent(UserShare* share,
ModelType model_type,
const std::string& client_tag,
int64 parent_id) {
WriteTransaction trans(FROM_HERE, share);
ReadNode parent_node(&trans);
EXPECT_TRUE(parent_node.InitByIdLookup(parent_id));
WriteNode node(&trans);
EXPECT_TRUE(node.InitUniqueByCreation(model_type, parent_node, client_tag));
node.SetIsFolder(false);
return node.GetId();
}
| C | Chrome | 0 |
CVE-2016-3839 | https://www.cvedetails.com/cve/CVE-2016-3839/ | CWE-284 | https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c | 472271b153c5dc53c28beac55480a8d8434b2d5c | DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
| void btif_hl_release_socket(UINT8 app_idx, UINT8 mcl_idx, UINT8 mdl_idx){
btif_hl_soc_cb_t *p_scb = NULL;
btif_hl_mdl_cb_t *p_dcb = BTIF_HL_GET_MDL_CB_PTR(app_idx, mcl_idx, mdl_idx);
BTIF_TRACE_DEBUG("%s", __FUNCTION__);
BTIF_TRACE_DEBUG("app_idx=%d mcl_idx=%d mdl_idx=%d", app_idx, mcl_idx, mdl_idx );
if (p_dcb && p_dcb->p_scb)
{
p_scb = p_dcb->p_scb;
btif_hl_set_socket_state(p_scb, BTIF_HL_SOC_STATE_W4_REL);
p_dcb->p_scb = NULL;
btif_hl_select_close_connected();
}
}
| void btif_hl_release_socket(UINT8 app_idx, UINT8 mcl_idx, UINT8 mdl_idx){
btif_hl_soc_cb_t *p_scb = NULL;
btif_hl_mdl_cb_t *p_dcb = BTIF_HL_GET_MDL_CB_PTR(app_idx, mcl_idx, mdl_idx);
BTIF_TRACE_DEBUG("%s", __FUNCTION__);
BTIF_TRACE_DEBUG("app_idx=%d mcl_idx=%d mdl_idx=%d", app_idx, mcl_idx, mdl_idx );
if (p_dcb && p_dcb->p_scb)
{
p_scb = p_dcb->p_scb;
btif_hl_set_socket_state(p_scb, BTIF_HL_SOC_STATE_W4_REL);
p_dcb->p_scb = NULL;
btif_hl_select_close_connected();
}
}
| C | Android | 0 |
CVE-2016-1665 | https://www.cvedetails.com/cve/CVE-2016-1665/ | CWE-20 | https://github.com/chromium/chromium/commit/282f53ffdc3b1902da86f6a0791af736837efbf8 | 282f53ffdc3b1902da86f6a0791af736837efbf8 | [signin] Add metrics to track the source for refresh token updated events
This CL add a source for update and revoke credentials operations. It then
surfaces the source in the chrome://signin-internals page.
This CL also records the following histograms that track refresh token events:
* Signin.RefreshTokenUpdated.ToValidToken.Source
* Signin.RefreshTokenUpdated.ToInvalidToken.Source
* Signin.RefreshTokenRevoked.Source
These histograms are needed to validate the assumptions of how often tokens
are revoked by the browser and the sources for the token revocations.
Bug: 896182
Change-Id: I2fcab80ee8e5699708e695bc3289fa6d34859a90
Reviewed-on: https://chromium-review.googlesource.com/c/1286464
Reviewed-by: Jochen Eisinger <jochen@chromium.org>
Reviewed-by: David Roger <droger@chromium.org>
Reviewed-by: Ilya Sherman <isherman@chromium.org>
Commit-Queue: Mihai Sardarescu <msarda@chromium.org>
Cr-Commit-Position: refs/heads/master@{#606181} | static DiceResponseHandlerFactory* GetInstance() {
return base::Singleton<DiceResponseHandlerFactory>::get();
}
| static DiceResponseHandlerFactory* GetInstance() {
return base::Singleton<DiceResponseHandlerFactory>::get();
}
| C | Chrome | 0 |
CVE-2018-6144 | https://www.cvedetails.com/cve/CVE-2018-6144/ | CWE-787 | https://github.com/chromium/chromium/commit/9f6510f20ccd794c4a71d5779ae802241e6e3f9b | 9f6510f20ccd794c4a71d5779ae802241e6e3f9b | Add the method to check if offline archive is in internal dir
Bug: 758690
Change-Id: I8bb4283fc40a87fa7a87df2c7e513e2e16903290
Reviewed-on: https://chromium-review.googlesource.com/828049
Reviewed-by: Filip Gorski <fgorski@chromium.org>
Commit-Queue: Jian Li <jianli@chromium.org>
Cr-Commit-Position: refs/heads/master@{#524232} | void OfflinePageModelTaskified::SavePage(
const SavePageParams& save_page_params,
std::unique_ptr<OfflinePageArchiver> archiver,
const SavePageCallback& callback) {
auto task = base::MakeUnique<CreateArchiveTask>(
GetArchiveDirectory(save_page_params.client_id.name_space),
save_page_params, archiver.get(),
base::Bind(&OfflinePageModelTaskified::OnCreateArchiveDone,
weak_ptr_factory_.GetWeakPtr(), callback));
pending_archivers_.push_back(std::move(archiver));
task_queue_.AddTask(std::move(task));
}
| void OfflinePageModelTaskified::SavePage(
const SavePageParams& save_page_params,
std::unique_ptr<OfflinePageArchiver> archiver,
const SavePageCallback& callback) {
auto task = base::MakeUnique<CreateArchiveTask>(
GetArchiveDirectory(save_page_params.client_id.name_space),
save_page_params, archiver.get(),
base::Bind(&OfflinePageModelTaskified::OnCreateArchiveDone,
weak_ptr_factory_.GetWeakPtr(), callback));
pending_archivers_.push_back(std::move(archiver));
task_queue_.AddTask(std::move(task));
}
| C | Chrome | 0 |
CVE-2019-3817 | https://www.cvedetails.com/cve/CVE-2019-3817/ | CWE-416 | https://github.com/rpm-software-management/libcomps/commit/e3a5d056633677959ad924a51758876d415e7046 | e3a5d056633677959ad924a51758876d415e7046 | Fix UAF in comps_objmrtree_unite function
The added field is not used at all in many places and it is probably the
left-over of some copy-paste. | COMPS_MRTreeData * comps_mrtree_data_create_n(COMPS_MRTree * tree, char * key,
size_t keylen, void * data) {
COMPS_MRTreeData * rtd;
if ((rtd = malloc(sizeof(*rtd))) == NULL)
return NULL;
if ((rtd->key = malloc(sizeof(char) * (keylen+1))) == NULL) {
free(rtd);
return NULL;
}
memcpy(rtd->key, key, sizeof(char)*keylen);
rtd->key[keylen] = 0;
rtd->is_leaf = 1;
rtd->data = comps_hslist_create();
comps_hslist_init(rtd->data, NULL, tree->data_cloner,
tree->data_destructor);
if (data)
comps_hslist_append(rtd->data, data, 0);
rtd->subnodes = comps_hslist_create();
comps_hslist_init(rtd->subnodes, NULL,
NULL,
&comps_mrtree_data_destroy_v);
return rtd;
}
| COMPS_MRTreeData * comps_mrtree_data_create_n(COMPS_MRTree * tree, char * key,
size_t keylen, void * data) {
COMPS_MRTreeData * rtd;
if ((rtd = malloc(sizeof(*rtd))) == NULL)
return NULL;
if ((rtd->key = malloc(sizeof(char) * (keylen+1))) == NULL) {
free(rtd);
return NULL;
}
memcpy(rtd->key, key, sizeof(char)*keylen);
rtd->key[keylen] = 0;
rtd->is_leaf = 1;
rtd->data = comps_hslist_create();
comps_hslist_init(rtd->data, NULL, tree->data_cloner,
tree->data_destructor);
if (data)
comps_hslist_append(rtd->data, data, 0);
rtd->subnodes = comps_hslist_create();
comps_hslist_init(rtd->subnodes, NULL,
NULL,
&comps_mrtree_data_destroy_v);
return rtd;
}
| C | libcomps | 0 |
CVE-2016-3913 | https://www.cvedetails.com/cve/CVE-2016-3913/ | CWE-264 | https://android.googlesource.com/platform/frameworks/av/+/0c3b93c8c2027e74af642967eee5c142c8fd185d | 0c3b93c8c2027e74af642967eee5c142c8fd185d | MediaPlayerService: avoid invalid static cast
Bug: 30204103
Change-Id: Ie0dd3568a375f1e9fed8615ad3d85184bcc99028
(cherry picked from commit ee0a0e39acdcf8f97e0d6945c31ff36a06a36e9d)
| void MediaPlayerService::AudioOutput::close()
{
ALOGV("close");
sp<AudioTrack> track;
{
Mutex::Autolock lock(mLock);
track = mTrack;
close_l(); // clears mTrack
}
}
| void MediaPlayerService::AudioOutput::close()
{
ALOGV("close");
sp<AudioTrack> track;
{
Mutex::Autolock lock(mLock);
track = mTrack;
close_l(); // clears mTrack
}
}
| C | Android | 0 |
CVE-2016-7425 | https://www.cvedetails.com/cve/CVE-2016-7425/ | CWE-119 | https://github.com/torvalds/linux/commit/7bc2b55a5c030685b399bb65b6baa9ccc3d1f167 | 7bc2b55a5c030685b399bb65b6baa9ccc3d1f167 | scsi: arcmsr: Buffer overflow in arcmsr_iop_message_xfer()
We need to put an upper bound on "user_len" so the memcpy() doesn't
overflow.
Cc: <stable@vger.kernel.org>
Reported-by: Marco Grassi <marco.gra@gmail.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Tomas Henzl <thenzl@redhat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> | static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t outbound_doorbell;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
/*clear doorbell interrupt */
writel(outbound_doorbell, ®->outbound_doorbell);
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
uint32_t outbound_doorbell, i;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
writel(outbound_doorbell, ®->outbound_doorbell_clear);
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
for (i = 0; i < 200; i++) {
msleep(20);
outbound_doorbell = readl(®->outbound_doorbell);
if (outbound_doorbell &
ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
writel(outbound_doorbell,
®->outbound_doorbell_clear);
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
®->inbound_doorbell);
} else
break;
}
}
break;
case ACB_ADAPTER_TYPE_D: {
struct MessageUnit_D *reg = acb->pmuD;
uint32_t outbound_doorbell, i;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(reg->outbound_doorbell);
writel(outbound_doorbell, reg->outbound_doorbell);
writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
reg->inbound_doorbell);
for (i = 0; i < 200; i++) {
msleep(20);
outbound_doorbell = readl(reg->outbound_doorbell);
if (outbound_doorbell &
ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
writel(outbound_doorbell,
reg->outbound_doorbell);
writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
reg->inbound_doorbell);
} else
break;
}
}
break;
}
}
| static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
{
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
struct MessageUnit_A __iomem *reg = acb->pmuA;
uint32_t outbound_doorbell;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
/*clear doorbell interrupt */
writel(outbound_doorbell, ®->outbound_doorbell);
writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, ®->inbound_doorbell);
}
break;
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
/*clear interrupt and message state*/
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
/* let IOP know data has been read */
}
break;
case ACB_ADAPTER_TYPE_C: {
struct MessageUnit_C __iomem *reg = acb->pmuC;
uint32_t outbound_doorbell, i;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(®->outbound_doorbell);
writel(outbound_doorbell, ®->outbound_doorbell_clear);
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, ®->inbound_doorbell);
for (i = 0; i < 200; i++) {
msleep(20);
outbound_doorbell = readl(®->outbound_doorbell);
if (outbound_doorbell &
ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
writel(outbound_doorbell,
®->outbound_doorbell_clear);
writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
®->inbound_doorbell);
} else
break;
}
}
break;
case ACB_ADAPTER_TYPE_D: {
struct MessageUnit_D *reg = acb->pmuD;
uint32_t outbound_doorbell, i;
/* empty doorbell Qbuffer if door bell ringed */
outbound_doorbell = readl(reg->outbound_doorbell);
writel(outbound_doorbell, reg->outbound_doorbell);
writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
reg->inbound_doorbell);
for (i = 0; i < 200; i++) {
msleep(20);
outbound_doorbell = readl(reg->outbound_doorbell);
if (outbound_doorbell &
ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
writel(outbound_doorbell,
reg->outbound_doorbell);
writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
reg->inbound_doorbell);
} else
break;
}
}
break;
}
}
| C | linux | 0 |
CVE-2017-5009 | https://www.cvedetails.com/cve/CVE-2017-5009/ | CWE-119 | https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60 | 1c40f9042ae2d6ee7483d72998aabb5e73b2ff60 | DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <pfeldman@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Commit-Queue: Andrey Lushnikov <lushnikov@chromium.org>
Cr-Commit-Position: refs/heads/master@{#507936} | InspectorStyleInvalidatorInvalidateEvent::InvalidationList(
ContainerNode& node,
const Vector<RefPtr<InvalidationSet>>& invalidation_list) {
std::unique_ptr<TracedValue> value =
FillCommonPart(node, kElementHasPendingInvalidationList);
value->BeginArray("invalidationList");
for (const auto& invalidation_set : invalidation_list)
invalidation_set->ToTracedValue(value.get());
value->EndArray();
return value;
}
| InspectorStyleInvalidatorInvalidateEvent::InvalidationList(
ContainerNode& node,
const Vector<RefPtr<InvalidationSet>>& invalidation_list) {
std::unique_ptr<TracedValue> value =
FillCommonPart(node, kElementHasPendingInvalidationList);
value->BeginArray("invalidationList");
for (const auto& invalidation_set : invalidation_list)
invalidation_set->ToTracedValue(value.get());
value->EndArray();
return value;
}
| C | Chrome | 0 |
CVE-2012-3552 | https://www.cvedetails.com/cve/CVE-2012-3552/ | CWE-362 | https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259 | f6d8bd051c391c1c0458a30b2a7abcd939329259 | inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
{
if (IPCB(skb)->opt.optlen == 0)
return;
put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
ip_hdr(skb) + 1);
}
| static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
{
if (IPCB(skb)->opt.optlen == 0)
return;
put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
ip_hdr(skb) + 1);
}
| C | linux | 0 |
CVE-2010-5313 | https://www.cvedetails.com/cve/CVE-2010-5313/ | CWE-362 | https://github.com/torvalds/linux/commit/fc3a9157d3148ab91039c75423da8ef97be3e105 | fc3a9157d3148ab91039c75423da8ef97be3e105 | KVM: X86: Don't report L2 emulation failures to user-space
This patch prevents that emulation failures which result
from emulating an instruction for an L2-Guest results in
being reported to userspace.
Without this patch a malicious L2-Guest would be able to
kill the L1 by triggering a race-condition between an vmexit
and the instruction emulator.
With this patch the L2 will most likely only kill itself in
this situation.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> | static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa;
if (tdp_enabled)
return false;
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-entetr the
* guest to let CPU execute the instruction.
*/
if (kvm_mmu_unprotect_page_virt(vcpu, gva))
return true;
gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
if (gpa == UNMAPPED_GVA)
return true; /* let cpu generate fault */
if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
return true;
return false;
}
| static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
{
gpa_t gpa;
if (tdp_enabled)
return false;
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-entetr the
* guest to let CPU execute the instruction.
*/
if (kvm_mmu_unprotect_page_virt(vcpu, gva))
return true;
gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
if (gpa == UNMAPPED_GVA)
return true; /* let cpu generate fault */
if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
return true;
return false;
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/a0af50481db56aa780942e8595a20c36b2c34f5c | a0af50481db56aa780942e8595a20c36b2c34f5c | Build fix following bug #30696.
Patch by Gavin Barraclough <barraclough@apple.com> on 2009-10-22
Reviewed by NOBODY (build fix).
* WebCoreSupport/FrameLoaderClientGtk.cpp:
(WebKit::FrameLoaderClient::windowObjectCleared):
* webkit/webkitwebframe.cpp:
(webkit_web_frame_get_global_context):
git-svn-id: svn://svn.chromium.org/blink/trunk@49964 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | unsigned int webkit_web_frame_number_of_active_animations(WebKitWebFrame* frame)
{
Frame* coreFrame = core(frame);
if (!coreFrame)
return 0;
AnimationController* controller = coreFrame->animation();
if (!controller)
return 0;
return controller->numberOfActiveAnimations();
}
| unsigned int webkit_web_frame_number_of_active_animations(WebKitWebFrame* frame)
{
Frame* coreFrame = core(frame);
if (!coreFrame)
return 0;
AnimationController* controller = coreFrame->animation();
if (!controller)
return 0;
return controller->numberOfActiveAnimations();
}
| C | Chrome | 0 |
CVE-2010-5331 | https://www.cvedetails.com/cve/CVE-2010-5331/ | CWE-119 | https://github.com/torvalds/linux/commit/0031c41be5c529f8329e327b63cde92ba1284842 | 0031c41be5c529f8329e327b63cde92ba1284842 | drivers/gpu/drm/radeon/radeon_atombios.c: range check issues
This change makes the array larger, "MAX_SUPPORTED_TV_TIMING_V1_2" is 3
and the original size "MAX_SUPPORTED_TV_TIMING" is 2.
Also there were checks that were off by one.
Signed-off-by: Dan Carpenter <error27@gmail.com>
Acked-by: Alex Deucher <alexdeucher@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Dave Airlie <airlied@redhat.com> | bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
uint16_t data_offset;
struct _ATOM_TMDS_INFO *tmds_info;
uint8_t frev, crev;
uint16_t maxfreq;
int i;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
tmds_info =
(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
data_offset);
maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
for (i = 0; i < 4; i++) {
tmds->tmds_pll[i].freq =
le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
tmds->tmds_pll[i].value =
tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
tmds->tmds_pll[i].value |=
(tmds_info->asMiscInfo[i].
ucPLL_VCO_Gain & 0x3f) << 6;
tmds->tmds_pll[i].value |=
(tmds_info->asMiscInfo[i].
ucPLL_DutyCycle & 0xf) << 12;
tmds->tmds_pll[i].value |=
(tmds_info->asMiscInfo[i].
ucPLL_VoltageSwing & 0xf) << 16;
DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
if (maxfreq == tmds->tmds_pll[i].freq) {
tmds->tmds_pll[i].freq = 0xffffffff;
break;
}
}
return true;
}
return false;
}
| bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds)
{
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, TMDS_Info);
uint16_t data_offset;
struct _ATOM_TMDS_INFO *tmds_info;
uint8_t frev, crev;
uint16_t maxfreq;
int i;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
tmds_info =
(struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
data_offset);
maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
for (i = 0; i < 4; i++) {
tmds->tmds_pll[i].freq =
le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency);
tmds->tmds_pll[i].value =
tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f;
tmds->tmds_pll[i].value |=
(tmds_info->asMiscInfo[i].
ucPLL_VCO_Gain & 0x3f) << 6;
tmds->tmds_pll[i].value |=
(tmds_info->asMiscInfo[i].
ucPLL_DutyCycle & 0xf) << 12;
tmds->tmds_pll[i].value |=
(tmds_info->asMiscInfo[i].
ucPLL_VoltageSwing & 0xf) << 16;
DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
if (maxfreq == tmds->tmds_pll[i].freq) {
tmds->tmds_pll[i].freq = 0xffffffff;
break;
}
}
return true;
}
return false;
}
| C | linux | 0 |
CVE-2012-1179 | https://www.cvedetails.com/cve/CVE-2012-1179/ | CWE-264 | https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850 | 4a1d704194a441bf83c636004a479e01360ec850 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;
if (stock->nr_pages) {
unsigned long bytes = stock->nr_pages * PAGE_SIZE;
res_counter_uncharge(&old->res, bytes);
if (do_swap_account)
res_counter_uncharge(&old->memsw, bytes);
stock->nr_pages = 0;
}
stock->cached = NULL;
}
| static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;
if (stock->nr_pages) {
unsigned long bytes = stock->nr_pages * PAGE_SIZE;
res_counter_uncharge(&old->res, bytes);
if (do_swap_account)
res_counter_uncharge(&old->memsw, bytes);
stock->nr_pages = 0;
}
stock->cached = NULL;
}
| C | linux | 0 |
CVE-2013-1789 | https://www.cvedetails.com/cve/CVE-2013-1789/ | null | https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=a9b8ab4657dec65b8b86c225d12c533ad7e984e2 | a9b8ab4657dec65b8b86c225d12c533ad7e984e2 | null | void Splash::blitMask(SplashBitmap *src, int xDest, int yDest,
SplashClipResult clipRes) {
SplashPipe pipe;
Guchar *p;
int w, h, x, y;
w = src->getWidth();
h = src->getHeight();
p = src->getDataPtr();
if (p == NULL) {
error(errInternal, -1, "src->getDataPtr() is NULL in Splash::blitMask");
return;
}
if (vectorAntialias && clipRes != splashClipAllInside) {
pipeInit(&pipe, xDest, yDest, state->fillPattern, NULL,
(Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse);
drawAAPixelInit();
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
pipe.shape = *p++;
pipeInit(&pipe, xDest, yDest, state->fillPattern, NULL,
(Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse);
p = src->getDataPtr();
} else {
pipeInit(&pipe, xDest, yDest, state->fillPattern, NULL,
(Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse);
if (clipRes == splashClipAllInside) {
for (y = 0; y < h; ++y) {
pipeSetXY(&pipe, xDest, yDest + y);
(this->*pipe.run)(&pipe);
} else {
pipeIncX(&pipe);
}
++p;
}
}
updateModX(xDest);
updateModX(xDest + w - 1);
updateModY(yDest);
updateModY(yDest + h - 1);
} else {
for (y = 0; y < h; ++y) {
pipeSetXY(&pipe, xDest, yDest + y);
for (x = 0; x < w; ++x) {
if (*p && state->clip->test(xDest + x, yDest + y)) {
pipe.shape = *p;
(this->*pipe.run)(&pipe);
updateModX(xDest + x);
updateModY(yDest + y);
} else {
pipeIncX(&pipe);
}
++p;
}
}
}
}
}
| void Splash::blitMask(SplashBitmap *src, int xDest, int yDest,
SplashClipResult clipRes) {
SplashPipe pipe;
Guchar *p;
int w, h, x, y;
w = src->getWidth();
h = src->getHeight();
if (vectorAntialias && clipRes != splashClipAllInside) {
pipeInit(&pipe, xDest, yDest, state->fillPattern, NULL,
(Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse);
drawAAPixelInit();
p = src->getDataPtr();
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
pipe.shape = *p++;
pipeInit(&pipe, xDest, yDest, state->fillPattern, NULL,
(Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse);
p = src->getDataPtr();
} else {
pipeInit(&pipe, xDest, yDest, state->fillPattern, NULL,
(Guchar)splashRound(state->fillAlpha * 255), gTrue, gFalse);
p = src->getDataPtr();
if (clipRes == splashClipAllInside) {
for (y = 0; y < h; ++y) {
pipeSetXY(&pipe, xDest, yDest + y);
(this->*pipe.run)(&pipe);
} else {
pipeIncX(&pipe);
}
++p;
}
}
updateModX(xDest);
updateModX(xDest + w - 1);
updateModY(yDest);
updateModY(yDest + h - 1);
} else {
for (y = 0; y < h; ++y) {
pipeSetXY(&pipe, xDest, yDest + y);
for (x = 0; x < w; ++x) {
if (*p && state->clip->test(xDest + x, yDest + y)) {
pipe.shape = *p;
(this->*pipe.run)(&pipe);
updateModX(xDest + x);
updateModY(yDest + y);
} else {
pipeIncX(&pipe);
}
++p;
}
}
}
}
}
| CPP | poppler | 1 |
CVE-2017-5130 | https://www.cvedetails.com/cve/CVE-2017-5130/ | CWE-787 | https://github.com/chromium/chromium/commit/ce1446c00f0fd8f5a3b00727421be2124cb7370f | ce1446c00f0fd8f5a3b00727421be2124cb7370f | Roll libxml to 3939178e4cb797417ff033b1e04ab4b038e224d9
Removes a few patches fixed upstream:
https://git.gnome.org/browse/libxml2/commit/?id=e26630548e7d138d2c560844c43820b6767251e3
https://git.gnome.org/browse/libxml2/commit/?id=94691dc884d1a8ada39f073408b4bb92fe7fe882
Stops using the NOXXE flag which was reverted upstream:
https://git.gnome.org/browse/libxml2/commit/?id=030b1f7a27c22f9237eddca49ec5e620b6258d7d
Changes the patch to uri.c to not add limits.h, which is included
upstream.
Bug: 722079
Change-Id: I4b8449ed33f95de23c54c2cde99970c2df2781ac
Reviewed-on: https://chromium-review.googlesource.com/535233
Reviewed-by: Scott Graham <scottmg@chromium.org>
Commit-Queue: Dominic Cooney <dominicc@chromium.org>
Cr-Commit-Position: refs/heads/master@{#480755} | void XmlWriter::StopWriting() {
xmlTextWriterEndDocument(writer_);
xmlFreeTextWriter(writer_);
writer_ = NULL;
}
| void XmlWriter::StopWriting() {
xmlTextWriterEndDocument(writer_);
xmlFreeTextWriter(writer_);
writer_ = NULL;
}
| C | Chrome | 0 |
CVE-2016-3713 | https://www.cvedetails.com/cve/CVE-2016-3713/ | CWE-284 | https://github.com/torvalds/linux/commit/9842df62004f366b9fed2423e24df10542ee0dc5 | 9842df62004f366b9fed2423e24df10542ee0dc5 | KVM: MTRR: remove MSR 0x2f8
MSR 0x2f8 accessed the 124th Variable Range MTRR ever since MTRR support
was introduced by 9ba075a664df ("KVM: MTRR support").
0x2f8 became harmful when 910a6aae4e2e ("KVM: MTRR: exactly define the
size of variable MTRRs") shrinked the array of VR MTRRs from 256 to 8,
which made access to index 124 out of bounds. The surrounding code only
WARNs in this situation, thus the guest gained a limited read/write
access to struct kvm_arch_vcpu.
0x2f8 is not a valid VR MTRR MSR, because KVM has/advertises only 16 VR
MTRR MSRs, 0x200-0x20f. Every VR MTRR is set up using two MSRs, 0x2f8
was treated as a PHYSBASE and 0x2f9 would be its PHYSMASK, but 0x2f9 was
not implemented in KVM, therefore 0x2f8 could never do anything useful
and getting rid of it is safe.
This fixes CVE-2016-3713.
Fixes: 910a6aae4e2e ("KVM: MTRR: exactly define the size of variable MTRRs")
Cc: stable@vger.kernel.org
Reported-by: David Matlack <dmatlack@google.com>
Signed-off-by: Andy Honig <ahonig@google.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> | static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
{
switch (msr) {
case MSR_MTRRfix64K_00000:
*seg = 0;
*unit = 0;
break;
case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
*seg = 1;
*unit = msr - MSR_MTRRfix16K_80000;
break;
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
*seg = 2;
*unit = msr - MSR_MTRRfix4K_C0000;
break;
default:
return false;
}
return true;
}
| static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
{
switch (msr) {
case MSR_MTRRfix64K_00000:
*seg = 0;
*unit = 0;
break;
case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
*seg = 1;
*unit = msr - MSR_MTRRfix16K_80000;
break;
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
*seg = 2;
*unit = msr - MSR_MTRRfix4K_C0000;
break;
default:
return false;
}
return true;
}
| C | linux | 0 |
CVE-2012-3412 | https://www.cvedetails.com/cve/CVE-2012-3412/ | CWE-189 | https://github.com/torvalds/linux/commit/68cb695ccecf949d48949e72f8ce591fdaaa325c | 68cb695ccecf949d48949e72f8ce591fdaaa325c | sfc: Fix maximum number of TSO segments and minimum TX queue size
[ Upstream commit 7e6d06f0de3f74ca929441add094518ae332257c ]
Currently an skb requiring TSO may not fit within a minimum-size TX
queue. The TX queue selected for the skb may stall and trigger the TX
watchdog repeatedly (since the problem skb will be retried after the
TX reset). This issue is designated as CVE-2012-3412.
Set the maximum number of TSO segments for our devices to 100. This
should make no difference to behaviour unless the actual MSS is less
than about 700. Increase the minimum TX queue size accordingly to
allow for 2 worst-case skbs, so that there will definitely be space
to add an skb after we wake a queue.
To avoid invalidating existing configurations, change
efx_ethtool_set_ringparam() to fix up values that are too small rather
than returning -EINVAL.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk> | static void efx_fini_tso(struct efx_tx_queue *tx_queue)
{
unsigned i;
if (tx_queue->buffer) {
for (i = 0; i <= tx_queue->ptr_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
}
while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
tx_queue->efx->pci_dev);
}
| static void efx_fini_tso(struct efx_tx_queue *tx_queue)
{
unsigned i;
if (tx_queue->buffer) {
for (i = 0; i <= tx_queue->ptr_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
}
while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
tx_queue->efx->pci_dev);
}
| C | linux | 0 |
null | null | null | https://github.com/chromium/chromium/commit/8f883f2b12f68fed993671dce7fb5fb91f2229aa | 8f883f2b12f68fed993671dce7fb5fb91f2229aa | Add more non client Windows messages to the list of messages not being sent to the renderer.
Turns out we get WM_NCLBUTTONDOWN/UP messages at times which go to the renderer and are not acked causing the
unresponsive renderer dialog to show up in Desktop Chrome Aura.
BUG=335248
R=jam@chromium.org
TBR=jam
Review URL: https://codereview.chromium.org/141103004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@245949 0039d316-1c4b-4281-b951-d872f2087c98 | bool RenderWidgetHostViewAura::DeleteRange(const gfx::Range& range) {
NOTIMPLEMENTED();
return false;
}
| bool RenderWidgetHostViewAura::DeleteRange(const gfx::Range& range) {
NOTIMPLEMENTED();
return false;
}
| C | Chrome | 0 |
CVE-2015-1465 | https://www.cvedetails.com/cve/CVE-2015-1465/ | CWE-17 | https://github.com/torvalds/linux/commit/df4d92549f23e1c037e83323aff58a21b3de7fe0 | df4d92549f23e1c037e83323aff58a21b3de7fe0 | ipv4: try to cache dst_entries which would cause a redirect
Not caching dst_entries which cause redirects could be exploited by hosts
on the same subnet, causing a severe DoS attack. This effect aggravated
since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()").
Lookups causing redirects will be allocated with DST_NOCACHE set which
will force dst_release to free them via RCU. Unfortunately waiting for
RCU grace period just takes too long, we can end up with >1M dst_entries
waiting to be released and the system will run OOM. rcuos threads cannot
catch up under high softirq load.
Attaching the flag to emit a redirect later on to the specific skb allows
us to cache those dst_entries thus reducing the pressure on allocation
and deallocation.
This issue was discovered by Marcelo Leitner.
Cc: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Marcelo Leitner <mleitner@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: David S. Miller <davem@davemloft.net> | struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
struct sock *sk)
{
struct rtable *rt = __ip_route_output_key(net, flp4);
if (IS_ERR(rt))
return rt;
if (flp4->flowi4_proto)
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
return rt;
}
| struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
struct sock *sk)
{
struct rtable *rt = __ip_route_output_key(net, flp4);
if (IS_ERR(rt))
return rt;
if (flp4->flowi4_proto)
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
return rt;
}
| C | linux | 0 |
CVE-2018-12247 | https://www.cvedetails.com/cve/CVE-2018-12247/ | CWE-476 | https://github.com/mruby/mruby/commit/55edae0226409de25e59922807cb09acb45731a2 | 55edae0226409de25e59922807cb09acb45731a2 | Allow `Object#clone` to copy frozen status only; fix #4036
Copying all flags from the original object may overwrite the clone's
flags e.g. the embedded flag. | mrb_obj_frozen(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return mrb_true_value();
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
return mrb_false_value();
}
return mrb_true_value();
}
| mrb_obj_frozen(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return mrb_true_value();
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
return mrb_false_value();
}
return mrb_true_value();
}
| C | mruby | 0 |
CVE-2011-3964 | https://www.cvedetails.com/cve/CVE-2011-3964/ | null | https://github.com/chromium/chromium/commit/0c14577c9905bd8161159ec7eaac810c594508d0 | 0c14577c9905bd8161159ec7eaac810c594508d0 | Change omnibox behavior when stripping javascript schema to navigate after stripping the schema on drag drop.
BUG=109245
TEST=N/A
Review URL: http://codereview.chromium.org/9116016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@116692 0039d316-1c4b-4281-b951-d872f2087c98 | void OmniboxViewWin::OnMButtonDown(UINT /*keys*/, const CPoint& /*point*/) {
tracking_double_click_ = false;
}
| void OmniboxViewWin::OnMButtonDown(UINT /*keys*/, const CPoint& /*point*/) {
tracking_double_click_ = false;
}
| C | Chrome | 0 |
CVE-2010-4650 | https://www.cvedetails.com/cve/CVE-2010-4650/ | CWE-119 | https://github.com/torvalds/linux/commit/7572777eef78ebdee1ecb7c258c0ef94d35bad16 | 7572777eef78ebdee1ecb7c258c0ef94d35bad16 | fuse: verify ioctl retries
Verify that the total length of the iovec returned in FUSE_IOCTL_RETRY
doesn't overflow iov_length().
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
CC: Tejun Heo <tj@kernel.org>
CC: <stable@kernel.org> [2.6.31+] | static int fuse_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
int err;
err = -EIO;
if (is_bad_inode(inode))
goto out;
data.file = file;
data.inode = inode;
data.req = fuse_get_req(fc);
err = PTR_ERR(data.req);
if (IS_ERR(data.req))
goto out;
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
if (!err) {
if (data.req->num_pages)
fuse_send_readpages(data.req, file);
else
fuse_put_request(fc, data.req);
}
out:
return err;
}
| static int fuse_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
int err;
err = -EIO;
if (is_bad_inode(inode))
goto out;
data.file = file;
data.inode = inode;
data.req = fuse_get_req(fc);
err = PTR_ERR(data.req);
if (IS_ERR(data.req))
goto out;
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
if (!err) {
if (data.req->num_pages)
fuse_send_readpages(data.req, file);
else
fuse_put_request(fc, data.req);
}
out:
return err;
}
| C | linux | 0 |
CVE-2014-0203 | https://www.cvedetails.com/cve/CVE-2014-0203/ | CWE-20 | https://github.com/torvalds/linux/commit/86acdca1b63e6890540fa19495cfc708beff3d8b | 86acdca1b63e6890540fa19495cfc708beff3d8b | fix autofs/afs/etc. magic mountpoint breakage
We end up trying to kfree() nd.last.name on open("/mnt/tmp", O_CREAT)
if /mnt/tmp is an autofs direct mount. The reason is that nd.last_type
is bogus here; we want LAST_BIND for everything of that kind and we
get LAST_NORM left over from finding parent directory.
So make sure that it *is* set properly; set to LAST_BIND before
doing ->follow_link() - for normal symlinks it will be changed
by __vfs_follow_link() and everything else needs it set that way.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | static int proc_oom_score(struct task_struct *task, char *buffer)
{
unsigned long points;
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
read_lock(&tasklist_lock);
points = badness(task->group_leader, uptime.tv_sec);
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
| static int proc_oom_score(struct task_struct *task, char *buffer)
{
unsigned long points;
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
read_lock(&tasklist_lock);
points = badness(task->group_leader, uptime.tv_sec);
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
| C | linux | 0 |
CVE-2018-17467 | https://www.cvedetails.com/cve/CVE-2018-17467/ | CWE-20 | https://github.com/chromium/chromium/commit/7da6c3419fd172405bcece1ae4ec6ec8316cd345 | 7da6c3419fd172405bcece1ae4ec6ec8316cd345 | Start rendering timer after first navigation
Currently the new content rendering timer in the browser process,
which clears an old page's contents 4 seconds after a navigation if the
new page doesn't draw in that time, is not set on the first navigation
for a top-level frame.
This is problematic because content can exist before the first
navigation, for instance if it was created by a javascript: URL.
This CL removes the code that skips the timer activation on the first
navigation.
Bug: 844881
Change-Id: I19b3ad1ff62c69ded3a5f7b1c0afde191aaf4584
Reviewed-on: https://chromium-review.googlesource.com/1188589
Reviewed-by: Fady Samuel <fsamuel@chromium.org>
Reviewed-by: ccameron <ccameron@chromium.org>
Commit-Queue: Ken Buchanan <kenrb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#586913} | void ReleaseTouchPoint(int index) {
touch_event_.ReleasePoint(index);
}
| void ReleaseTouchPoint(int index) {
touch_event_.ReleasePoint(index);
}
| C | Chrome | 0 |
CVE-2018-11383 | https://www.cvedetails.com/cve/CVE-2018-11383/ | CWE-416 | https://github.com/radare/radare2/commit/9d348bcc2c4bbd3805e7eec97b594be9febbdf9a | 9d348bcc2c4bbd3805e7eec97b594be9febbdf9a | Fix #9943 - Invalid free on RAnal.avr | static void aea_stats_init (AeaStats *stats) {
stats->regs = r_list_newf (free);
stats->regread = r_list_newf (free);
stats->regwrite = r_list_newf (free);
stats->inputregs = r_list_newf (free);
}
| static void aea_stats_init (AeaStats *stats) {
stats->regs = r_list_newf (free);
stats->regread = r_list_newf (free);
stats->regwrite = r_list_newf (free);
stats->inputregs = r_list_newf (free);
}
| C | radare2 | 0 |
CVE-2013-1824 | https://www.cvedetails.com/cve/CVE-2013-1824/ | CWE-200 | https://git.php.net/?p=php-src.git;a=commit;h=188c196d4da60bdde9190d2fc532650d17f7af2d | 188c196d4da60bdde9190d2fc532650d17f7af2d | null | xmlDocPtr soap_xmlParseMemory(const void *buf, size_t buf_size)
{
xmlParserCtxtPtr ctxt = NULL;
xmlDocPtr ret;
/*
xmlInitParser();
*/
ctxt = xmlCreateMemoryParserCtxt(buf, buf_size);
if (ctxt) {
ctxt->options &= ~XML_PARSE_DTDLOAD;
ctxt->sax->ignorableWhitespace = soap_ignorableWhitespace;
ctxt->sax->comment = soap_Comment;
ctxt->sax->warning = NULL;
ctxt->sax->error = NULL;
/*ctxt->sax->fatalError = NULL;*/
#if LIBXML_VERSION >= 20703
ctxt->options |= XML_PARSE_HUGE;
#endif
xmlParseDocument(ctxt);
if (ctxt->wellFormed) {
ret = ctxt->myDoc;
if (ret->URL == NULL && ctxt->directory != NULL) {
ret->URL = xmlCharStrdup(ctxt->directory);
}
} else {
ret = NULL;
xmlFreeDoc(ctxt->myDoc);
ctxt->myDoc = NULL;
}
xmlFreeParserCtxt(ctxt);
} else {
ret = NULL;
}
/*
xmlCleanupParser();
*/
/*
if (ret) {
cleanup_xml_node((xmlNodePtr)ret);
}
*/
return ret;
}
| xmlDocPtr soap_xmlParseMemory(const void *buf, size_t buf_size)
{
xmlParserCtxtPtr ctxt = NULL;
xmlDocPtr ret;
/*
xmlInitParser();
*/
ctxt = xmlCreateMemoryParserCtxt(buf, buf_size);
if (ctxt) {
ctxt->options -= XML_PARSE_DTDLOAD;
ctxt->sax->ignorableWhitespace = soap_ignorableWhitespace;
ctxt->sax->comment = soap_Comment;
ctxt->sax->warning = NULL;
ctxt->sax->error = NULL;
/*ctxt->sax->fatalError = NULL;*/
#if LIBXML_VERSION >= 20703
ctxt->options |= XML_PARSE_HUGE;
#endif
xmlParseDocument(ctxt);
if (ctxt->wellFormed) {
ret = ctxt->myDoc;
if (ret->URL == NULL && ctxt->directory != NULL) {
ret->URL = xmlCharStrdup(ctxt->directory);
}
} else {
ret = NULL;
xmlFreeDoc(ctxt->myDoc);
ctxt->myDoc = NULL;
}
xmlFreeParserCtxt(ctxt);
} else {
ret = NULL;
}
/*
xmlCleanupParser();
*/
/*
if (ret) {
cleanup_xml_node((xmlNodePtr)ret);
}
*/
return ret;
}
| C | php | 1 |
CVE-2016-10050 | https://www.cvedetails.com/cve/CVE-2016-10050/ | CWE-119 | https://github.com/ImageMagick/ImageMagick/commit/73fb0aac5b958521e1511e179ecc0ad49f70ebaf | 73fb0aac5b958521e1511e179ecc0ad49f70ebaf | RLE check for pixel offset less than 0 (heap overflow report from Craig Young). | static Image *ReadRLEImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define SkipLinesOp 0x01
#define SetColorOp 0x02
#define SkipPixelsOp 0x03
#define ByteDataOp 0x05
#define RunDataOp 0x06
#define EOFOp 0x07
char
magick[12];
Image
*image;
IndexPacket
index;
int
opcode,
operand,
status;
MagickStatusType
flags;
MagickSizeType
number_pixels;
MemoryInfo
*pixel_info;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*q;
register ssize_t
i;
register unsigned char
*p;
size_t
bits_per_pixel,
map_length,
number_colormaps,
number_planes,
number_planes_filled,
one,
pixel_info_length;
ssize_t
count,
offset,
y;
unsigned char
background_color[256],
*colormap,
pixel,
plane,
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Determine if this a RLE file.
*/
count=ReadBlob(image,2,(unsigned char *) magick);
if ((count != 2) || (memcmp(magick,"\122\314",2) != 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
do
{
/*
Read image header.
*/
image->page.x=ReadBlobLSBShort(image);
image->page.y=ReadBlobLSBShort(image);
image->columns=ReadBlobLSBShort(image);
image->rows=ReadBlobLSBShort(image);
flags=(MagickStatusType) ReadBlobByte(image);
image->matte=flags & 0x04 ? MagickTrue : MagickFalse;
number_planes=(size_t) ReadBlobByte(image);
bits_per_pixel=(size_t) ReadBlobByte(image);
number_colormaps=(size_t) ReadBlobByte(image);
map_length=(unsigned char) ReadBlobByte(image);
if (map_length >= 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
one=1;
map_length=one << map_length;
if ((number_planes == 0) || (number_planes == 2) ||
((flags & 0x04) && (number_colormaps > 254)) || (bits_per_pixel != 8) ||
(image->columns == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (flags & 0x02)
{
/*
No background color-- initialize to black.
*/
for (i=0; i < (ssize_t) number_planes; i++)
background_color[i]=0;
(void) ReadBlobByte(image);
}
else
{
/*
Initialize background color.
*/
p=background_color;
for (i=0; i < (ssize_t) number_planes; i++)
*p++=(unsigned char) ReadBlobByte(image);
}
if ((number_planes & 0x01) == 0)
(void) ReadBlobByte(image);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
colormap=(unsigned char *) NULL;
if (number_colormaps != 0)
{
/*
Read image colormaps.
*/
colormap=(unsigned char *) AcquireQuantumMemory(number_colormaps,
3*map_length*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=colormap;
for (i=0; i < (ssize_t) number_colormaps; i++)
for (x=0; x < (ssize_t) map_length; x++)
*p++=(unsigned char) ScaleShortToQuantum(ReadBlobLSBShort(image));
}
if ((flags & 0x08) != 0)
{
char
*comment;
size_t
length;
/*
Read image comment.
*/
length=ReadBlobLSBShort(image);
if (length != 0)
{
comment=(char *) AcquireQuantumMemory(length,sizeof(*comment));
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,length-1,(unsigned char *) comment);
comment[length-1]='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
if ((length & 0x01) == 0)
(void) ReadBlobByte(image);
}
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/*
Allocate RLE pixels.
*/
if (image->matte != MagickFalse)
number_planes++;
number_pixels=(MagickSizeType) image->columns*image->rows;
number_planes_filled=(number_planes % 2 == 0) ? number_planes :
number_planes+1;
if ((number_pixels*number_planes_filled) != (size_t) (number_pixels*
number_planes_filled))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixel_info=AcquireVirtualMemory(image->columns,image->rows*
MagickMax(number_planes_filled,4)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixel_info_length=image->columns*image->rows*
MagickMax(number_planes_filled,4);
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if ((flags & 0x01) && !(flags & 0x02))
{
ssize_t
j;
/*
Set background color.
*/
p=pixels;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if (image->matte == MagickFalse)
for (j=0; j < (ssize_t) number_planes; j++)
*p++=background_color[j];
else
{
for (j=0; j < (ssize_t) (number_planes-1); j++)
*p++=background_color[j];
*p++=0; /* initialize matte channel */
}
}
}
/*
Read runlength-encoded image.
*/
plane=0;
x=0;
y=0;
opcode=ReadBlobByte(image);
do
{
switch (opcode & 0x3f)
{
case SkipLinesOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
x=0;
y+=operand;
break;
}
case SetColorOp:
{
operand=ReadBlobByte(image);
plane=(unsigned char) operand;
if (plane == 255)
plane=(unsigned char) (number_planes-1);
x=0;
break;
}
case SkipPixelsOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
x+=operand;
break;
}
case ByteDataOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
offset=((image->rows-y-1)*image->columns*number_planes)+x*
number_planes+plane;
operand++;
if ((offset < 0) ||
(offset+((size_t) operand*number_planes) > pixel_info_length))
{
if (number_colormaps != 0)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
p=pixels+offset;
for (i=0; i < (ssize_t) operand; i++)
{
pixel=(unsigned char) ReadBlobByte(image);
if ((y < (ssize_t) image->rows) &&
((x+i) < (ssize_t) image->columns))
*p=pixel;
p+=number_planes;
}
if (operand & 0x01)
(void) ReadBlobByte(image);
x+=operand;
break;
}
case RunDataOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
pixel=(unsigned char) ReadBlobByte(image);
(void) ReadBlobByte(image);
operand++;
offset=((image->rows-y-1)*image->columns*number_planes)+x*
number_planes+plane;
if ((offset < 0) ||
(offset+((size_t) operand*number_planes) > pixel_info_length))
{
if (number_colormaps != 0)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
p=pixels+offset;
for (i=0; i < (ssize_t) operand; i++)
{
if ((y < (ssize_t) image->rows) &&
((x+i) < (ssize_t) image->columns))
*p=pixel;
p+=number_planes;
}
x+=operand;
break;
}
default:
break;
}
opcode=ReadBlobByte(image);
} while (((opcode & 0x3f) != EOFOp) && (opcode != EOF));
if (number_colormaps != 0)
{
MagickStatusType
mask;
/*
Apply colormap affineation to image.
*/
mask=(MagickStatusType) (map_length-1);
p=pixels;
x=(ssize_t) number_planes;
if (number_colormaps == 1)
for (i=0; i < (ssize_t) number_pixels; i++)
{
if (IsValidColormapIndex(image,*p & mask,&index,exception) ==
MagickFalse)
break;
*p=colormap[(ssize_t) index];
p++;
}
else
if ((number_planes >= 3) && (number_colormaps >= 3))
for (i=0; i < (ssize_t) number_pixels; i++)
for (x=0; x < (ssize_t) number_planes; x++)
{
if (IsValidColormapIndex(image,(size_t) (x*map_length+
(*p & mask)),&index,exception) == MagickFalse)
break;
*p=colormap[(ssize_t) index];
p++;
}
if ((i < (ssize_t) number_pixels) || (x < (ssize_t) number_planes))
{
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
}
/*
Initialize image structure.
*/
if (number_planes >= 3)
{
/*
Convert raster image to DirectClass pixel packets.
*/
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleCharToQuantum(*p++));
SetPixelGreen(q,ScaleCharToQuantum(*p++));
SetPixelBlue(q,ScaleCharToQuantum(*p++));
if (image->matte != MagickFalse)
SetPixelAlpha(q,ScaleCharToQuantum(*p++));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
/*
Create colormap.
*/
if (number_colormaps == 0)
map_length=256;
if (AcquireImageColormap(image,map_length) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=colormap;
if (number_colormaps == 1)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Pseudocolor.
*/
image->colormap[i].red=ScaleCharToQuantum((unsigned char) i);
image->colormap[i].green=ScaleCharToQuantum((unsigned char) i);
image->colormap[i].blue=ScaleCharToQuantum((unsigned char) i);
}
else
if (number_colormaps > 1)
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ScaleCharToQuantum(*p);
image->colormap[i].green=ScaleCharToQuantum(*(p+map_length));
image->colormap[i].blue=ScaleCharToQuantum(*(p+map_length*2));
p++;
}
p=pixels;
if (image->matte == MagickFalse)
{
/*
Convert raster image to PseudoClass pixel packets.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,*p++);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image);
}
else
{
/*
Image has a matte channel-- promote to DirectClass.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsValidColormapIndex(image,*p++,&index,exception) ==
MagickFalse)
break;
SetPixelRed(q,image->colormap[(ssize_t) index].red);
if (IsValidColormapIndex(image,*p++,&index,exception) ==
MagickFalse)
break;
SetPixelGreen(q,image->colormap[(ssize_t) index].green);
if (IsValidColormapIndex(image,*p++,&index,exception) ==
MagickFalse)
break;
SetPixelBlue(q,image->colormap[(ssize_t) index].blue);
SetPixelAlpha(q,ScaleCharToQuantum(*p++));
q++;
}
if (x < (ssize_t) image->columns)
break;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
image->colormap=(PixelPacket *) RelinquishMagickMemory(
image->colormap);
image->storage_class=DirectClass;
image->colors=0;
}
}
if (number_colormaps != 0)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
(void) ReadBlobByte(image);
count=ReadBlob(image,2,(unsigned char *) magick);
if ((count != 0) && (memcmp(magick,"\122\314",2) == 0))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while ((count != 0) && (memcmp(magick,"\122\314",2) == 0));
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| static Image *ReadRLEImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define SkipLinesOp 0x01
#define SetColorOp 0x02
#define SkipPixelsOp 0x03
#define ByteDataOp 0x05
#define RunDataOp 0x06
#define EOFOp 0x07
char
magick[12];
Image
*image;
IndexPacket
index;
int
opcode,
operand,
status;
MagickStatusType
flags;
MagickSizeType
number_pixels;
MemoryInfo
*pixel_info;
register IndexPacket
*indexes;
register ssize_t
x;
register PixelPacket
*q;
register ssize_t
i;
register unsigned char
*p;
size_t
bits_per_pixel,
map_length,
number_colormaps,
number_planes,
number_planes_filled,
one,
offset,
pixel_info_length;
ssize_t
count,
y;
unsigned char
background_color[256],
*colormap,
pixel,
plane,
*pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
/*
Determine if this a RLE file.
*/
count=ReadBlob(image,2,(unsigned char *) magick);
if ((count != 2) || (memcmp(magick,"\122\314",2) != 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
do
{
/*
Read image header.
*/
image->page.x=ReadBlobLSBShort(image);
image->page.y=ReadBlobLSBShort(image);
image->columns=ReadBlobLSBShort(image);
image->rows=ReadBlobLSBShort(image);
flags=(MagickStatusType) ReadBlobByte(image);
image->matte=flags & 0x04 ? MagickTrue : MagickFalse;
number_planes=(size_t) ReadBlobByte(image);
bits_per_pixel=(size_t) ReadBlobByte(image);
number_colormaps=(size_t) ReadBlobByte(image);
map_length=(unsigned char) ReadBlobByte(image);
if (map_length >= 32)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
one=1;
map_length=one << map_length;
if ((number_planes == 0) || (number_planes == 2) ||
((flags & 0x04) && (number_colormaps > 254)) || (bits_per_pixel != 8) ||
(image->columns == 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (flags & 0x02)
{
/*
No background color-- initialize to black.
*/
for (i=0; i < (ssize_t) number_planes; i++)
background_color[i]=0;
(void) ReadBlobByte(image);
}
else
{
/*
Initialize background color.
*/
p=background_color;
for (i=0; i < (ssize_t) number_planes; i++)
*p++=(unsigned char) ReadBlobByte(image);
}
if ((number_planes & 0x01) == 0)
(void) ReadBlobByte(image);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
colormap=(unsigned char *) NULL;
if (number_colormaps != 0)
{
/*
Read image colormaps.
*/
colormap=(unsigned char *) AcquireQuantumMemory(number_colormaps,
3*map_length*sizeof(*colormap));
if (colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=colormap;
for (i=0; i < (ssize_t) number_colormaps; i++)
for (x=0; x < (ssize_t) map_length; x++)
*p++=(unsigned char) ScaleShortToQuantum(ReadBlobLSBShort(image));
}
if ((flags & 0x08) != 0)
{
char
*comment;
size_t
length;
/*
Read image comment.
*/
length=ReadBlobLSBShort(image);
if (length != 0)
{
comment=(char *) AcquireQuantumMemory(length,sizeof(*comment));
if (comment == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,length-1,(unsigned char *) comment);
comment[length-1]='\0';
(void) SetImageProperty(image,"comment",comment);
comment=DestroyString(comment);
if ((length & 0x01) == 0)
(void) ReadBlobByte(image);
}
}
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
/*
Allocate RLE pixels.
*/
if (image->matte != MagickFalse)
number_planes++;
number_pixels=(MagickSizeType) image->columns*image->rows;
number_planes_filled=(number_planes % 2 == 0) ? number_planes :
number_planes+1;
if ((number_pixels*number_planes_filled) != (size_t) (number_pixels*
number_planes_filled))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixel_info=AcquireVirtualMemory(image->columns,image->rows*
MagickMax(number_planes_filled,4)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixel_info_length=image->columns*image->rows*
MagickMax(number_planes_filled,4);
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if ((flags & 0x01) && !(flags & 0x02))
{
ssize_t
j;
/*
Set background color.
*/
p=pixels;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if (image->matte == MagickFalse)
for (j=0; j < (ssize_t) number_planes; j++)
*p++=background_color[j];
else
{
for (j=0; j < (ssize_t) (number_planes-1); j++)
*p++=background_color[j];
*p++=0; /* initialize matte channel */
}
}
}
/*
Read runlength-encoded image.
*/
plane=0;
x=0;
y=0;
opcode=ReadBlobByte(image);
do
{
switch (opcode & 0x3f)
{
case SkipLinesOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
x=0;
y+=operand;
break;
}
case SetColorOp:
{
operand=ReadBlobByte(image);
plane=(unsigned char) operand;
if (plane == 255)
plane=(unsigned char) (number_planes-1);
x=0;
break;
}
case SkipPixelsOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
x+=operand;
break;
}
case ByteDataOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
offset=((image->rows-y-1)*image->columns*number_planes)+x*
number_planes+plane;
operand++;
if (offset+((size_t) operand*number_planes) > pixel_info_length)
{
if (number_colormaps != 0)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
p=pixels+offset;
for (i=0; i < (ssize_t) operand; i++)
{
pixel=(unsigned char) ReadBlobByte(image);
if ((y < (ssize_t) image->rows) &&
((x+i) < (ssize_t) image->columns))
*p=pixel;
p+=number_planes;
}
if (operand & 0x01)
(void) ReadBlobByte(image);
x+=operand;
break;
}
case RunDataOp:
{
operand=ReadBlobByte(image);
if (opcode & 0x40)
operand=ReadBlobLSBSignedShort(image);
pixel=(unsigned char) ReadBlobByte(image);
(void) ReadBlobByte(image);
operand++;
offset=((image->rows-y-1)*image->columns*number_planes)+x*
number_planes+plane;
p=pixels+offset;
if (offset+((size_t) operand*number_planes) > pixel_info_length)
{
if (number_colormaps != 0)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
for (i=0; i < (ssize_t) operand; i++)
{
if ((y < (ssize_t) image->rows) &&
((x+i) < (ssize_t) image->columns))
*p=pixel;
p+=number_planes;
}
x+=operand;
break;
}
default:
break;
}
opcode=ReadBlobByte(image);
} while (((opcode & 0x3f) != EOFOp) && (opcode != EOF));
if (number_colormaps != 0)
{
MagickStatusType
mask;
/*
Apply colormap affineation to image.
*/
mask=(MagickStatusType) (map_length-1);
p=pixels;
x=(ssize_t) number_planes;
if (number_colormaps == 1)
for (i=0; i < (ssize_t) number_pixels; i++)
{
if (IsValidColormapIndex(image,*p & mask,&index,exception) ==
MagickFalse)
break;
*p=colormap[(ssize_t) index];
p++;
}
else
if ((number_planes >= 3) && (number_colormaps >= 3))
for (i=0; i < (ssize_t) number_pixels; i++)
for (x=0; x < (ssize_t) number_planes; x++)
{
if (IsValidColormapIndex(image,(size_t) (x*map_length+
(*p & mask)),&index,exception) == MagickFalse)
break;
*p=colormap[(ssize_t) index];
p++;
}
if ((i < (ssize_t) number_pixels) || (x < (ssize_t) number_planes))
{
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
}
}
/*
Initialize image structure.
*/
if (number_planes >= 3)
{
/*
Convert raster image to DirectClass pixel packets.
*/
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ScaleCharToQuantum(*p++));
SetPixelGreen(q,ScaleCharToQuantum(*p++));
SetPixelBlue(q,ScaleCharToQuantum(*p++));
if (image->matte != MagickFalse)
SetPixelAlpha(q,ScaleCharToQuantum(*p++));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
/*
Create colormap.
*/
if (number_colormaps == 0)
map_length=256;
if (AcquireImageColormap(image,map_length) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
p=colormap;
if (number_colormaps == 1)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Pseudocolor.
*/
image->colormap[i].red=ScaleCharToQuantum((unsigned char) i);
image->colormap[i].green=ScaleCharToQuantum((unsigned char) i);
image->colormap[i].blue=ScaleCharToQuantum((unsigned char) i);
}
else
if (number_colormaps > 1)
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ScaleCharToQuantum(*p);
image->colormap[i].green=ScaleCharToQuantum(*(p+map_length));
image->colormap[i].blue=ScaleCharToQuantum(*(p+map_length*2));
p++;
}
p=pixels;
if (image->matte == MagickFalse)
{
/*
Convert raster image to PseudoClass pixel packets.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetAuthenticIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,*p++);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image);
}
else
{
/*
Image has a matte channel-- promote to DirectClass.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsValidColormapIndex(image,*p++,&index,exception) ==
MagickFalse)
break;
SetPixelRed(q,image->colormap[(ssize_t) index].red);
if (IsValidColormapIndex(image,*p++,&index,exception) ==
MagickFalse)
break;
SetPixelGreen(q,image->colormap[(ssize_t) index].green);
if (IsValidColormapIndex(image,*p++,&index,exception) ==
MagickFalse)
break;
SetPixelBlue(q,image->colormap[(ssize_t) index].blue);
SetPixelAlpha(q,ScaleCharToQuantum(*p++));
q++;
}
if (x < (ssize_t) image->columns)
break;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
image->colormap=(PixelPacket *) RelinquishMagickMemory(
image->colormap);
image->storage_class=DirectClass;
image->colors=0;
}
}
if (number_colormaps != 0)
colormap=(unsigned char *) RelinquishMagickMemory(colormap);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
(void) ReadBlobByte(image);
count=ReadBlob(image,2,(unsigned char *) magick);
if ((count != 0) && (memcmp(magick,"\122\314",2) == 0))
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while ((count != 0) && (memcmp(magick,"\122\314",2) == 0));
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| C | ImageMagick | 1 |
CVE-2017-8069 | https://www.cvedetails.com/cve/CVE-2017-8069/ | CWE-119 | https://github.com/torvalds/linux/commit/7926aff5c57b577ab0f43364ff0c59d968f6a414 | 7926aff5c57b577ab0f43364ff0c59d968f6a414 | rtl8150: Use heap buffers for all register access
Allocating USB buffers on the stack is not portable, and no longer
works on x86_64 (with VMAP_STACK enabled as per default).
Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void free_skb_pool(rtl8150_t *dev)
{
int i;
for (i = 0; i < RX_SKB_POOL_SIZE; i++)
if (dev->rx_skb_pool[i])
dev_kfree_skb(dev->rx_skb_pool[i]);
}
| static void free_skb_pool(rtl8150_t *dev)
{
int i;
for (i = 0; i < RX_SKB_POOL_SIZE; i++)
if (dev->rx_skb_pool[i])
dev_kfree_skb(dev->rx_skb_pool[i]);
}
| C | linux | 0 |
CVE-2014-9421 | https://www.cvedetails.com/cve/CVE-2014-9421/ | null | https://github.com/krb5/krb5/commit/a197e92349a4aa2141b5dff12e9dd44c2a2166e3 | a197e92349a4aa2141b5dff12e9dd44c2a2166e3 | Fix kadm5/gssrpc XDR double free [CVE-2014-9421]
[MITKRB5-SA-2015-001] In auth_gssapi_unwrap_data(), do not free
partial deserialization results upon failure to deserialize. This
responsibility belongs to the callers, svctcp_getargs() and
svcudp_getargs(); doing it in the unwrap function results in freeing
the results twice.
In xdr_krb5_tl_data() and xdr_krb5_principal(), null out the pointers
we are freeing, as other XDR functions such as xdr_bytes() and
xdr_string().
ticket: 8056 (new)
target_version: 1.13.1
tags: pullup | xdr_krb5_timestamp(XDR *xdrs, krb5_timestamp *objp)
{
/* This assumes that int32 and krb5_timestamp are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
| xdr_krb5_timestamp(XDR *xdrs, krb5_timestamp *objp)
{
/* This assumes that int32 and krb5_timestamp are the same size.
This shouldn't be a problem, since we've got a unit test which
checks for this. */
if (!xdr_int32(xdrs, (int32_t *) objp)) {
return (FALSE);
}
return (TRUE);
}
| C | krb5 | 0 |
CVE-2019-16995 | https://www.cvedetails.com/cve/CVE-2019-16995/ | CWE-772 | https://github.com/torvalds/linux/commit/6caabe7f197d3466d238f70915d65301f1716626 | 6caabe7f197d3466d238f70915d65301f1716626 | net: hsr: fix memory leak in hsr_dev_finalize()
If hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER) failed to
add port, it directly returns res and forgets to free the node
that allocated in hsr_create_self_node(), and forgets to delete
the node->mac_list linked in hsr->self_node_db.
BUG: memory leak
unreferenced object 0xffff8881cfa0c780 (size 64):
comm "syz-executor.0", pid 2077, jiffies 4294717969 (age 2415.377s)
hex dump (first 32 bytes):
e0 c7 a0 cf 81 88 ff ff 00 02 00 00 00 00 ad de ................
00 e6 49 cd 81 88 ff ff c0 9b 87 d0 81 88 ff ff ..I.............
backtrace:
[<00000000e2ff5070>] hsr_dev_finalize+0x736/0x960 [hsr]
[<000000003ed2e597>] hsr_newlink+0x2b2/0x3e0 [hsr]
[<000000003fa8c6b6>] __rtnl_newlink+0xf1f/0x1600 net/core/rtnetlink.c:3182
[<000000001247a7ad>] rtnl_newlink+0x66/0x90 net/core/rtnetlink.c:3240
[<00000000e7d1b61d>] rtnetlink_rcv_msg+0x54e/0xb90 net/core/rtnetlink.c:5130
[<000000005556bd3a>] netlink_rcv_skb+0x129/0x340 net/netlink/af_netlink.c:2477
[<00000000741d5ee6>] netlink_unicast_kernel net/netlink/af_netlink.c:1310 [inline]
[<00000000741d5ee6>] netlink_unicast+0x49a/0x650 net/netlink/af_netlink.c:1336
[<000000009d56f9b7>] netlink_sendmsg+0x88b/0xdf0 net/netlink/af_netlink.c:1917
[<0000000046b35c59>] sock_sendmsg_nosec net/socket.c:621 [inline]
[<0000000046b35c59>] sock_sendmsg+0xc3/0x100 net/socket.c:631
[<00000000d208adc9>] __sys_sendto+0x33e/0x560 net/socket.c:1786
[<00000000b582837a>] __do_sys_sendto net/socket.c:1798 [inline]
[<00000000b582837a>] __se_sys_sendto net/socket.c:1794 [inline]
[<00000000b582837a>] __x64_sys_sendto+0xdd/0x1b0 net/socket.c:1794
[<00000000c866801d>] do_syscall_64+0x147/0x600 arch/x86/entry/common.c:290
[<00000000fea382d9>] entry_SYSCALL_64_after_hwframe+0x49/0xbe
[<00000000e01dacb3>] 0xffffffffffffffff
Fixes: c5a759117210 ("net/hsr: Use list_head (and rcu) instead of array for slave devices.")
Reported-by: Hulk Robot <hulkci@huawei.com>
Signed-off-by: Mao Wenan <maowenan@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
{
struct hsr_port *master;
unsigned char old_operstate;
bool has_carrier;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
/* netif_stacked_transfer_operstate() cannot be used here since
* it doesn't set IF_OPER_LOWERLAYERDOWN (?)
*/
old_operstate = master->dev->operstate;
has_carrier = hsr_check_carrier(master);
hsr_set_operstate(master, has_carrier);
hsr_check_announce(master->dev, old_operstate);
}
| void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
{
struct hsr_port *master;
unsigned char old_operstate;
bool has_carrier;
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
/* netif_stacked_transfer_operstate() cannot be used here since
* it doesn't set IF_OPER_LOWERLAYERDOWN (?)
*/
old_operstate = master->dev->operstate;
has_carrier = hsr_check_carrier(master);
hsr_set_operstate(master, has_carrier);
hsr_check_announce(master->dev, old_operstate);
}
| C | linux | 0 |
CVE-2016-1639 | https://www.cvedetails.com/cve/CVE-2016-1639/ | null | https://github.com/chromium/chromium/commit/c66b1fc49870c514b1c1e8b53498153176d7ec2b | c66b1fc49870c514b1c1e8b53498153176d7ec2b | cros: Check initial auth type when showing views login.
Bug: 859611
Change-Id: I0298db9bbf4aed6bd40600aef2e1c5794e8cd058
Reviewed-on: https://chromium-review.googlesource.com/1123056
Reviewed-by: Xiaoyin Hu <xiaoyinh@chromium.org>
Commit-Queue: Jacob Dufault <jdufault@chromium.org>
Cr-Commit-Position: refs/heads/master@{#572224} | void UserSelectionScreen::SetHandler(LoginDisplayWebUIHandler* handler) {
handler_ = handler;
if (handler_) {
for (user_manager::User* user : users_)
handler_->OnUserImageChanged(*user);
}
}
| void UserSelectionScreen::SetHandler(LoginDisplayWebUIHandler* handler) {
handler_ = handler;
if (handler_) {
for (user_manager::User* user : users_)
handler_->OnUserImageChanged(*user);
}
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/690d0a9175790c4bd3abd066932bc08203c164ca | 690d0a9175790c4bd3abd066932bc08203c164ca | Avoid excessive nesting / recursion in browser URL handling.
BUG=31517
TEST=ChildProcessSecurityPolicyTest
Review URL: http://codereview.chromium.org/525038
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@35585 0039d316-1c4b-4281-b951-d872f2087c98 | SecurityState()
: enabled_bindings_(0),
can_read_raw_cookies_(false) { }
| SecurityState()
: enabled_bindings_(0),
can_read_raw_cookies_(false) { }
| C | Chrome | 0 |
CVE-2011-3053 | https://www.cvedetails.com/cve/CVE-2011-3053/ | CWE-399 | https://github.com/chromium/chromium/commit/c442b3eda2f1fdd4d1d4864c34c43cbaf223acae | c442b3eda2f1fdd4d1d4864c34c43cbaf223acae | chromeos: Move audio, power, and UI files into subdirs.
This moves more files from chrome/browser/chromeos/ into
subdirectories.
BUG=chromium-os:22896
TEST=did chrome os builds both with and without aura
TBR=sky
Review URL: http://codereview.chromium.org/9125006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@116746 0039d316-1c4b-4281-b951-d872f2087c98 | void MigrateBrowserPrefs(PrefService* user_prefs, PrefService* local_state) {
int current_version =
local_state->GetInteger(prefs::kMultipleProfilePrefMigration);
if ((current_version & WINDOWS_PREFS) == 0) {
local_state->RegisterIntegerPref(prefs::kDevToolsSplitLocation, -1);
if (local_state->HasPrefPath(prefs::kDevToolsSplitLocation)) {
user_prefs->SetInteger(prefs::kDevToolsSplitLocation,
local_state->GetInteger(prefs::kDevToolsSplitLocation));
}
local_state->ClearPref(prefs::kDevToolsSplitLocation);
local_state->RegisterDictionaryPref(prefs::kBrowserWindowPlacement);
if (local_state->HasPrefPath(prefs::kBrowserWindowPlacement)) {
const PrefService::Preference* pref =
local_state->FindPreference(prefs::kBrowserWindowPlacement);
DCHECK(pref);
user_prefs->Set(prefs::kBrowserWindowPlacement, *(pref->GetValue()));
}
local_state->ClearPref(prefs::kBrowserWindowPlacement);
local_state->SetInteger(prefs::kMultipleProfilePrefMigration,
current_version | WINDOWS_PREFS);
}
}
| void MigrateBrowserPrefs(PrefService* user_prefs, PrefService* local_state) {
int current_version =
local_state->GetInteger(prefs::kMultipleProfilePrefMigration);
if ((current_version & WINDOWS_PREFS) == 0) {
local_state->RegisterIntegerPref(prefs::kDevToolsSplitLocation, -1);
if (local_state->HasPrefPath(prefs::kDevToolsSplitLocation)) {
user_prefs->SetInteger(prefs::kDevToolsSplitLocation,
local_state->GetInteger(prefs::kDevToolsSplitLocation));
}
local_state->ClearPref(prefs::kDevToolsSplitLocation);
local_state->RegisterDictionaryPref(prefs::kBrowserWindowPlacement);
if (local_state->HasPrefPath(prefs::kBrowserWindowPlacement)) {
const PrefService::Preference* pref =
local_state->FindPreference(prefs::kBrowserWindowPlacement);
DCHECK(pref);
user_prefs->Set(prefs::kBrowserWindowPlacement, *(pref->GetValue()));
}
local_state->ClearPref(prefs::kBrowserWindowPlacement);
local_state->SetInteger(prefs::kMultipleProfilePrefMigration,
current_version | WINDOWS_PREFS);
}
}
| C | Chrome | 0 |
CVE-2016-2414 | https://www.cvedetails.com/cve/CVE-2016-2414/ | CWE-20 | https://android.googlesource.com/platform/frameworks/minikin/+/ca8ac8acdad662230ae37998c6c4091bb39402b6 | ca8ac8acdad662230ae37998c6c4091bb39402b6 | Reject fonts with invalid ranges in cmap
A corrupt or malicious font may have a negative size in its cmap
range, which in turn could lead to memory corruption. This patch
detects the case and rejects the font, and also includes an assertion
in the sparse bit set implementation if we missed any such case.
External issue:
https://code.google.com/p/android/issues/detail?id=192618
Bug: 26413177
Change-Id: Icc0c80e4ef389abba0964495b89aa0fae3e9f4b2
| uint32_t SparseBitSet::calcNumPages(const uint32_t* ranges, size_t nRanges) {
bool haveZeroPage = false;
uint32_t nonzeroPageEnd = 0;
uint32_t nPages = 0;
for (size_t i = 0; i < nRanges; i++) {
uint32_t start = ranges[i * 2];
uint32_t end = ranges[i * 2 + 1];
uint32_t startPage = start >> kLogValuesPerPage;
uint32_t endPage = (end - 1) >> kLogValuesPerPage;
if (startPage >= nonzeroPageEnd) {
if (startPage > nonzeroPageEnd) {
if (!haveZeroPage) {
haveZeroPage = true;
nPages++;
}
}
nPages++;
}
nPages += endPage - startPage;
nonzeroPageEnd = endPage + 1;
}
return nPages;
}
| uint32_t SparseBitSet::calcNumPages(const uint32_t* ranges, size_t nRanges) {
bool haveZeroPage = false;
uint32_t nonzeroPageEnd = 0;
uint32_t nPages = 0;
for (size_t i = 0; i < nRanges; i++) {
uint32_t start = ranges[i * 2];
uint32_t end = ranges[i * 2 + 1];
uint32_t startPage = start >> kLogValuesPerPage;
uint32_t endPage = (end - 1) >> kLogValuesPerPage;
if (startPage >= nonzeroPageEnd) {
if (startPage > nonzeroPageEnd) {
if (!haveZeroPage) {
haveZeroPage = true;
nPages++;
}
}
nPages++;
}
nPages += endPage - startPage;
nonzeroPageEnd = endPage + 1;
}
return nPages;
}
| C | Android | 0 |
CVE-2013-7448 | https://www.cvedetails.com/cve/CVE-2013-7448/ | CWE-22 | https://github.com/yarolig/didiwiki/commit/5e5c796617e1712905dc5462b94bd5e6c08d15ea | 5e5c796617e1712905dc5462b94bd5e6c08d15ea | page_name_is_good function | wiki_get_pages(int *n_pages, char *expr)
{
WikiPageList **pages;
struct dirent **namelist;
int n, i = 0;
struct stat st;
n = scandir(".", &namelist, 0, (void *)changes_compar);
pages = malloc(sizeof(WikiPageList*)*n);
while(n--)
{
if ((namelist[n]->d_name)[0] == '.'
|| !strcmp(namelist[n]->d_name, "styles.css"))
goto cleanup;
if (expr != NULL)
{ /* Super Simple Search */
char *data = NULL;
if ((data = file_read(namelist[n]->d_name)) != NULL)
if (strstr(data, expr) == NULL)
if (strcmp(namelist[n]->d_name, expr) != 0)
goto cleanup;
}
stat(namelist[n]->d_name, &st);
/* ignore anything but regular readable files */
if (S_ISREG(st.st_mode) && access(namelist[n]->d_name, R_OK) == 0)
{
pages[i] = malloc(sizeof(WikiPageList));
pages[i]->name = strdup (namelist[n]->d_name);
pages[i]->mtime = st.st_mtime;
i++;
}
cleanup:
free(namelist[n]);
}
*n_pages = i;
free(namelist);
if (i==0) return NULL;
return pages;
}
| wiki_get_pages(int *n_pages, char *expr)
{
WikiPageList **pages;
struct dirent **namelist;
int n, i = 0;
struct stat st;
n = scandir(".", &namelist, 0, (void *)changes_compar);
pages = malloc(sizeof(WikiPageList*)*n);
while(n--)
{
if ((namelist[n]->d_name)[0] == '.'
|| !strcmp(namelist[n]->d_name, "styles.css"))
goto cleanup;
if (expr != NULL)
{ /* Super Simple Search */
char *data = NULL;
if ((data = file_read(namelist[n]->d_name)) != NULL)
if (strstr(data, expr) == NULL)
if (strcmp(namelist[n]->d_name, expr) != 0)
goto cleanup;
}
stat(namelist[n]->d_name, &st);
/* ignore anything but regular readable files */
if (S_ISREG(st.st_mode) && access(namelist[n]->d_name, R_OK) == 0)
{
pages[i] = malloc(sizeof(WikiPageList));
pages[i]->name = strdup (namelist[n]->d_name);
pages[i]->mtime = st.st_mtime;
i++;
}
cleanup:
free(namelist[n]);
}
*n_pages = i;
free(namelist);
if (i==0) return NULL;
return pages;
}
| C | didiwiki | 0 |
CVE-2018-16790 | https://www.cvedetails.com/cve/CVE-2018-16790/ | CWE-125 | https://github.com/mongodb/mongo-c-driver/commit/0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84 | 0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84 | Fix for CVE-2018-16790 -- Verify bounds before binary length read.
As reported here: https://jira.mongodb.org/browse/CDRIVER-2819,
a heap overread occurs due a failure to correctly verify data
bounds.
In the original check, len - o returns the data left including the
sizeof(l) we just read. Instead, the comparison should check
against the data left NOT including the binary int32, i.e. just
subtype (byte*) instead of int32 subtype (byte*).
Added in test for corrupted BSON example. | bson_iter_double (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DOUBLE) {
return bson_iter_double_unsafe (iter);
}
return 0;
}
| bson_iter_double (const bson_iter_t *iter) /* IN */
{
BSON_ASSERT (iter);
if (ITER_TYPE (iter) == BSON_TYPE_DOUBLE) {
return bson_iter_double_unsafe (iter);
}
return 0;
}
| C | mongo-c-driver | 0 |
CVE-2018-11469 | https://www.cvedetails.com/cve/CVE-2018-11469/ | CWE-200 | https://git.haproxy.org/?p=haproxy-1.8.git;a=commit;h=17514045e5d934dede62116216c1b016fe23dd06 | 17514045e5d934dede62116216c1b016fe23dd06 | null | void http_init_txn(struct stream *s)
{
struct http_txn *txn = s->txn;
struct proxy *fe = strm_fe(s);
txn->flags = 0;
txn->status = -1;
txn->cookie_first_date = 0;
txn->cookie_last_date = 0;
txn->srv_cookie = NULL;
txn->cli_cookie = NULL;
txn->uri = NULL;
http_txn_reset_req(txn);
http_txn_reset_res(txn);
txn->req.chn = &s->req;
txn->rsp.chn = &s->res;
txn->auth.method = HTTP_AUTH_UNKNOWN;
txn->req.err_pos = txn->rsp.err_pos = -2; /* block buggy requests/responses */
if (fe->options2 & PR_O2_REQBUG_OK)
txn->req.err_pos = -1; /* let buggy requests pass */
if (txn->hdr_idx.v)
hdr_idx_init(&txn->hdr_idx);
vars_init(&s->vars_txn, SCOPE_TXN);
vars_init(&s->vars_reqres, SCOPE_REQ);
}
| void http_init_txn(struct stream *s)
{
struct http_txn *txn = s->txn;
struct proxy *fe = strm_fe(s);
txn->flags = 0;
txn->status = -1;
txn->cookie_first_date = 0;
txn->cookie_last_date = 0;
txn->srv_cookie = NULL;
txn->cli_cookie = NULL;
txn->uri = NULL;
http_txn_reset_req(txn);
http_txn_reset_res(txn);
txn->req.chn = &s->req;
txn->rsp.chn = &s->res;
txn->auth.method = HTTP_AUTH_UNKNOWN;
txn->req.err_pos = txn->rsp.err_pos = -2; /* block buggy requests/responses */
if (fe->options2 & PR_O2_REQBUG_OK)
txn->req.err_pos = -1; /* let buggy requests pass */
if (txn->hdr_idx.v)
hdr_idx_init(&txn->hdr_idx);
vars_init(&s->vars_txn, SCOPE_TXN);
vars_init(&s->vars_reqres, SCOPE_REQ);
}
| C | haproxy | 0 |
CVE-2015-8866 | https://www.cvedetails.com/cve/CVE-2015-8866/ | null | https://git.php.net/?p=php-src.git;a=commit;h=de31324c221c1791b26350ba106cc26bad23ace9 | de31324c221c1791b26350ba106cc26bad23ace9 | null | static PHP_RINIT_FUNCTION(libxml)
{
if (_php_libxml_per_request_initialization) {
/* report errors via handler rather than stderr */
xmlSetGenericErrorFunc(NULL, php_libxml_error_handler);
xmlParserInputBufferCreateFilenameDefault(php_libxml_input_buffer_create_filename);
xmlOutputBufferCreateFilenameDefault(php_libxml_output_buffer_create_filename);
/* Enable the entity loader by default. This ensure that
* other threads/requests that might have disable the loader
* do not affect the current request.
*/
LIBXML(entity_loader_disabled) = 0;
}
return SUCCESS;
}
| static PHP_RINIT_FUNCTION(libxml)
{
if (_php_libxml_per_request_initialization) {
/* report errors via handler rather than stderr */
xmlSetGenericErrorFunc(NULL, php_libxml_error_handler);
xmlParserInputBufferCreateFilenameDefault(php_libxml_input_buffer_create_filename);
xmlOutputBufferCreateFilenameDefault(php_libxml_output_buffer_create_filename);
}
return SUCCESS;
}
| C | php | 1 |
CVE-2016-3062 | https://www.cvedetails.com/cve/CVE-2016-3062/ | CWE-119 | https://github.com/FFmpeg/FFmpeg/commit/689e59b7ffed34eba6159dcc78e87133862e3746 | 689e59b7ffed34eba6159dcc78e87133862e3746 | mov: reset dref_count on realloc to keep values consistent.
This fixes a potential crash.
Signed-off-by: Michael Niedermayer <michaelni@gmx.at> | int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
{
AVStream *st;
MOVStreamContext *sc;
int j, pseudo_stream_id;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
for (pseudo_stream_id=0; pseudo_stream_id<entries; pseudo_stream_id++) {
enum CodecID id;
int dref_id = 1;
MOVAtom a = { AV_RL32("stsd") };
int64_t start_pos = avio_tell(pb);
int size = avio_rb32(pb); /* size */
uint32_t format = avio_rl32(pb); /* data format */
if (size >= 16) {
avio_rb32(pb); /* reserved */
avio_rb16(pb); /* reserved */
dref_id = avio_rb16(pb);
}else if (size <= 0){
av_log(c->fc, AV_LOG_ERROR, "invalid size %d in stsd\n", size);
return -1;
}
if (st->codec->codec_tag &&
st->codec->codec_tag != format &&
(c->fc->video_codec_id ? ff_codec_get_id(ff_codec_movvideo_tags, format) != c->fc->video_codec_id
: st->codec->codec_tag != MKTAG('j','p','e','g'))
){
/* Multiple fourcc, we skip JPEG. This is not correct, we should
* export it as a separate AVStream but this needs a few changes
* in the MOV demuxer, patch welcome. */
av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n");
avio_skip(pb, size - (avio_tell(pb) - start_pos));
continue;
}
/* we cannot demux concatenated h264 streams because of different extradata */
if (st->codec->codec_tag && st->codec->codec_tag == AV_RL32("avc1"))
av_log(c->fc, AV_LOG_WARNING, "Concatenated H.264 might not play corrently.\n");
sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id;
sc->dref_id= dref_id;
st->codec->codec_tag = format;
id = ff_codec_get_id(ff_codec_movaudio_tags, format);
if (id<=0 && ((format&0xFFFF) == 'm'+('s'<<8) || (format&0xFFFF) == 'T'+('S'<<8)))
id = ff_codec_get_id(ff_codec_wav_tags, av_bswap32(format)&0xFFFF);
if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO && id > 0) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO && /* do not overwrite codec type */
format && format != MKTAG('m','p','4','s')) { /* skip old asf mpeg4 tag */
id = ff_codec_get_id(ff_codec_movvideo_tags, format);
if (id <= 0)
id = ff_codec_get_id(ff_codec_bmp_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
else if (st->codec->codec_type == AVMEDIA_TYPE_DATA ||
(st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE &&
st->codec->codec_id == CODEC_ID_NONE)){
id = ff_codec_get_id(ff_codec_movsubtitle_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
}
}
av_dlog(c->fc, "size=%d 4CC= %c%c%c%c codec_type=%d\n", size,
(format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff,
(format >> 24) & 0xff, st->codec->codec_type);
if (st->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
unsigned int color_depth, len;
int color_greyscale;
int color_table_id;
st->codec->codec_id = id;
avio_rb16(pb); /* version */
avio_rb16(pb); /* revision level */
avio_rb32(pb); /* vendor */
avio_rb32(pb); /* temporal quality */
avio_rb32(pb); /* spatial quality */
st->codec->width = avio_rb16(pb); /* width */
st->codec->height = avio_rb16(pb); /* height */
avio_rb32(pb); /* horiz resolution */
avio_rb32(pb); /* vert resolution */
avio_rb32(pb); /* data size, always 0 */
avio_rb16(pb); /* frames per samples */
len = avio_r8(pb); /* codec name, pascal string */
if (len > 31)
len = 31;
mov_read_mac_string(c, pb, len, st->codec->codec_name, 32);
if (len < 31)
avio_skip(pb, 31 - len);
/* codec_tag YV12 triggers an UV swap in rawdec.c */
if (!memcmp(st->codec->codec_name, "Planar Y'CbCr 8-bit 4:2:0", 25))
st->codec->codec_tag=MKTAG('I', '4', '2', '0');
st->codec->bits_per_coded_sample = avio_rb16(pb); /* depth */
color_table_id = avio_rb16(pb); /* colortable id */
av_dlog(c->fc, "depth %d, ctab id %d\n",
st->codec->bits_per_coded_sample, color_table_id);
/* figure out the palette situation */
color_depth = st->codec->bits_per_coded_sample & 0x1F;
color_greyscale = st->codec->bits_per_coded_sample & 0x20;
/* if the depth is 2, 4, or 8 bpp, file is palettized */
if ((color_depth == 2) || (color_depth == 4) ||
(color_depth == 8)) {
/* for palette traversal */
unsigned int color_start, color_count, color_end;
unsigned char a, r, g, b;
if (color_greyscale) {
int color_index, color_dec;
/* compute the greyscale palette */
st->codec->bits_per_coded_sample = color_depth;
color_count = 1 << color_depth;
color_index = 255;
color_dec = 256 / (color_count - 1);
for (j = 0; j < color_count; j++) {
if (id == CODEC_ID_CINEPAK){
r = g = b = color_count - 1 - color_index;
}else
r = g = b = color_index;
sc->palette[j] =
(0xFFU << 24) | (r << 16) | (g << 8) | (b);
color_index -= color_dec;
if (color_index < 0)
color_index = 0;
}
} else if (color_table_id) {
const uint8_t *color_table;
/* if flag bit 3 is set, use the default palette */
color_count = 1 << color_depth;
if (color_depth == 2)
color_table = ff_qt_default_palette_4;
else if (color_depth == 4)
color_table = ff_qt_default_palette_16;
else
color_table = ff_qt_default_palette_256;
for (j = 0; j < color_count; j++) {
r = color_table[j * 3 + 0];
g = color_table[j * 3 + 1];
b = color_table[j * 3 + 2];
sc->palette[j] =
(0xFFU << 24) | (r << 16) | (g << 8) | (b);
}
} else {
/* load the palette from the file */
color_start = avio_rb32(pb);
color_count = avio_rb16(pb);
color_end = avio_rb16(pb);
if ((color_start <= 255) &&
(color_end <= 255)) {
for (j = color_start; j <= color_end; j++) {
/* each A, R, G, or B component is 16 bits;
* only use the top 8 bits */
a = avio_r8(pb);
avio_r8(pb);
r = avio_r8(pb);
avio_r8(pb);
g = avio_r8(pb);
avio_r8(pb);
b = avio_r8(pb);
avio_r8(pb);
sc->palette[j] =
(a << 24 ) | (r << 16) | (g << 8) | (b);
}
}
}
sc->has_palette = 1;
}
} else if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
int bits_per_sample, flags;
uint16_t version = avio_rb16(pb);
st->codec->codec_id = id;
avio_rb16(pb); /* revision level */
avio_rb32(pb); /* vendor */
st->codec->channels = avio_rb16(pb); /* channel count */
av_dlog(c->fc, "audio channels %d\n", st->codec->channels);
st->codec->bits_per_coded_sample = avio_rb16(pb); /* sample size */
sc->audio_cid = avio_rb16(pb);
avio_rb16(pb); /* packet size = 0 */
st->codec->sample_rate = ((avio_rb32(pb) >> 16));
av_dlog(c->fc, "version =%d, isom =%d\n",version,c->isom);
if (!c->isom) {
if (version==1) {
sc->samples_per_frame = avio_rb32(pb);
avio_rb32(pb); /* bytes per packet */
sc->bytes_per_frame = avio_rb32(pb);
avio_rb32(pb); /* bytes per sample */
} else if (version==2) {
avio_rb32(pb); /* sizeof struct only */
st->codec->sample_rate = av_int2double(avio_rb64(pb)); /* float 64 */
st->codec->channels = avio_rb32(pb);
avio_rb32(pb); /* always 0x7F000000 */
st->codec->bits_per_coded_sample = avio_rb32(pb); /* bits per channel if sound is uncompressed */
flags = avio_rb32(pb); /* lpcm format specific flag */
sc->bytes_per_frame = avio_rb32(pb); /* bytes per audio packet if constant */
sc->samples_per_frame = avio_rb32(pb); /* lpcm frames per audio packet if constant */
if (format == MKTAG('l','p','c','m'))
st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, flags);
}
}
switch (st->codec->codec_id) {
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
if (st->codec->bits_per_coded_sample == 16)
st->codec->codec_id = CODEC_ID_PCM_S16BE;
break;
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
if (st->codec->bits_per_coded_sample == 8)
st->codec->codec_id = CODEC_ID_PCM_S8;
else if (st->codec->bits_per_coded_sample == 24)
st->codec->codec_id =
st->codec->codec_id == CODEC_ID_PCM_S16BE ?
CODEC_ID_PCM_S24BE : CODEC_ID_PCM_S24LE;
break;
/* set values for old format before stsd version 1 appeared */
case CODEC_ID_MACE3:
sc->samples_per_frame = 6;
sc->bytes_per_frame = 2*st->codec->channels;
break;
case CODEC_ID_MACE6:
sc->samples_per_frame = 6;
sc->bytes_per_frame = 1*st->codec->channels;
break;
case CODEC_ID_ADPCM_IMA_QT:
sc->samples_per_frame = 64;
sc->bytes_per_frame = 34*st->codec->channels;
break;
case CODEC_ID_GSM:
sc->samples_per_frame = 160;
sc->bytes_per_frame = 33;
break;
default:
break;
}
bits_per_sample = av_get_bits_per_sample(st->codec->codec_id);
if (bits_per_sample) {
st->codec->bits_per_coded_sample = bits_per_sample;
sc->sample_size = (bits_per_sample >> 3) * st->codec->channels;
}
} else if (st->codec->codec_type==AVMEDIA_TYPE_SUBTITLE){
MOVAtom fake_atom = { .size = size - (avio_tell(pb) - start_pos) };
if (format != AV_RL32("mp4s")) // mp4s contains a regular esds atom
mov_read_glbl(c, pb, fake_atom);
st->codec->codec_id= id;
st->codec->width = sc->width;
st->codec->height = sc->height;
} else {
if (st->codec->codec_tag == MKTAG('t','m','c','d')) {
MOVStreamContext *tmcd_ctx = st->priv_data;
int val;
avio_rb32(pb); /* reserved */
val = avio_rb32(pb); /* flags */
tmcd_ctx->tmcd_flags = val;
if (val & 1)
st->codec->flags2 |= CODEC_FLAG2_DROP_FRAME_TIMECODE;
avio_rb32(pb); /* time scale */
avio_rb32(pb); /* frame duration */
st->codec->time_base.den = avio_r8(pb); /* number of frame */
st->codec->time_base.num = 1;
}
/* other codec type, just skip (rtp, mp4s, ...) */
avio_skip(pb, size - (avio_tell(pb) - start_pos));
}
/* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */
a.size = size - (avio_tell(pb) - start_pos);
if (a.size > 8) {
int ret;
if ((ret = mov_read_default(c, pb, a)) < 0)
return ret;
} else if (a.size > 0)
avio_skip(pb, a.size);
}
if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1)
st->codec->sample_rate= sc->time_scale;
/* special codec parameters handling */
switch (st->codec->codec_id) {
#if CONFIG_DV_DEMUXER
case CODEC_ID_DVAUDIO:
c->dv_fctx = avformat_alloc_context();
c->dv_demux = avpriv_dv_init_demux(c->dv_fctx);
if (!c->dv_demux) {
av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
return AVERROR(ENOMEM);
}
sc->dv_audio_container = 1;
st->codec->codec_id = CODEC_ID_PCM_S16LE;
break;
#endif
/* no ifdef since parameters are always those */
case CODEC_ID_QCELP:
if (st->codec->codec_tag != MKTAG('Q','c','l','p'))
st->codec->sample_rate = 8000;
st->codec->channels= 1; /* really needed */
break;
case CODEC_ID_AMR_NB:
st->codec->channels= 1; /* really needed */
/* force sample rate for amr, stsd in 3gp does not store sample rate */
st->codec->sample_rate = 8000;
break;
case CODEC_ID_AMR_WB:
st->codec->channels = 1;
st->codec->sample_rate = 16000;
break;
case CODEC_ID_MP2:
case CODEC_ID_MP3:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO; /* force type after stsd for m1a hdlr */
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case CODEC_ID_GSM:
case CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_IMA_WAV:
st->codec->block_align = sc->bytes_per_frame;
break;
case CODEC_ID_ALAC:
if (st->codec->extradata_size == 36) {
st->codec->channels = AV_RB8 (st->codec->extradata+21);
st->codec->sample_rate = AV_RB32(st->codec->extradata+32);
}
break;
case CODEC_ID_AC3:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case CODEC_ID_MPEG1VIDEO:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
default:
break;
}
return 0;
}
| int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries)
{
AVStream *st;
MOVStreamContext *sc;
int j, pseudo_stream_id;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
for (pseudo_stream_id=0; pseudo_stream_id<entries; pseudo_stream_id++) {
enum CodecID id;
int dref_id = 1;
MOVAtom a = { AV_RL32("stsd") };
int64_t start_pos = avio_tell(pb);
int size = avio_rb32(pb); /* size */
uint32_t format = avio_rl32(pb); /* data format */
if (size >= 16) {
avio_rb32(pb); /* reserved */
avio_rb16(pb); /* reserved */
dref_id = avio_rb16(pb);
}else if (size <= 0){
av_log(c->fc, AV_LOG_ERROR, "invalid size %d in stsd\n", size);
return -1;
}
if (st->codec->codec_tag &&
st->codec->codec_tag != format &&
(c->fc->video_codec_id ? ff_codec_get_id(ff_codec_movvideo_tags, format) != c->fc->video_codec_id
: st->codec->codec_tag != MKTAG('j','p','e','g'))
){
/* Multiple fourcc, we skip JPEG. This is not correct, we should
* export it as a separate AVStream but this needs a few changes
* in the MOV demuxer, patch welcome. */
av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n");
avio_skip(pb, size - (avio_tell(pb) - start_pos));
continue;
}
/* we cannot demux concatenated h264 streams because of different extradata */
if (st->codec->codec_tag && st->codec->codec_tag == AV_RL32("avc1"))
av_log(c->fc, AV_LOG_WARNING, "Concatenated H.264 might not play corrently.\n");
sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id;
sc->dref_id= dref_id;
st->codec->codec_tag = format;
id = ff_codec_get_id(ff_codec_movaudio_tags, format);
if (id<=0 && ((format&0xFFFF) == 'm'+('s'<<8) || (format&0xFFFF) == 'T'+('S'<<8)))
id = ff_codec_get_id(ff_codec_wav_tags, av_bswap32(format)&0xFFFF);
if (st->codec->codec_type != AVMEDIA_TYPE_VIDEO && id > 0) {
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
} else if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO && /* do not overwrite codec type */
format && format != MKTAG('m','p','4','s')) { /* skip old asf mpeg4 tag */
id = ff_codec_get_id(ff_codec_movvideo_tags, format);
if (id <= 0)
id = ff_codec_get_id(ff_codec_bmp_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
else if (st->codec->codec_type == AVMEDIA_TYPE_DATA ||
(st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE &&
st->codec->codec_id == CODEC_ID_NONE)){
id = ff_codec_get_id(ff_codec_movsubtitle_tags, format);
if (id > 0)
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
}
}
av_dlog(c->fc, "size=%d 4CC= %c%c%c%c codec_type=%d\n", size,
(format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff,
(format >> 24) & 0xff, st->codec->codec_type);
if (st->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
unsigned int color_depth, len;
int color_greyscale;
int color_table_id;
st->codec->codec_id = id;
avio_rb16(pb); /* version */
avio_rb16(pb); /* revision level */
avio_rb32(pb); /* vendor */
avio_rb32(pb); /* temporal quality */
avio_rb32(pb); /* spatial quality */
st->codec->width = avio_rb16(pb); /* width */
st->codec->height = avio_rb16(pb); /* height */
avio_rb32(pb); /* horiz resolution */
avio_rb32(pb); /* vert resolution */
avio_rb32(pb); /* data size, always 0 */
avio_rb16(pb); /* frames per samples */
len = avio_r8(pb); /* codec name, pascal string */
if (len > 31)
len = 31;
mov_read_mac_string(c, pb, len, st->codec->codec_name, 32);
if (len < 31)
avio_skip(pb, 31 - len);
/* codec_tag YV12 triggers an UV swap in rawdec.c */
if (!memcmp(st->codec->codec_name, "Planar Y'CbCr 8-bit 4:2:0", 25))
st->codec->codec_tag=MKTAG('I', '4', '2', '0');
st->codec->bits_per_coded_sample = avio_rb16(pb); /* depth */
color_table_id = avio_rb16(pb); /* colortable id */
av_dlog(c->fc, "depth %d, ctab id %d\n",
st->codec->bits_per_coded_sample, color_table_id);
/* figure out the palette situation */
color_depth = st->codec->bits_per_coded_sample & 0x1F;
color_greyscale = st->codec->bits_per_coded_sample & 0x20;
/* if the depth is 2, 4, or 8 bpp, file is palettized */
if ((color_depth == 2) || (color_depth == 4) ||
(color_depth == 8)) {
/* for palette traversal */
unsigned int color_start, color_count, color_end;
unsigned char a, r, g, b;
if (color_greyscale) {
int color_index, color_dec;
/* compute the greyscale palette */
st->codec->bits_per_coded_sample = color_depth;
color_count = 1 << color_depth;
color_index = 255;
color_dec = 256 / (color_count - 1);
for (j = 0; j < color_count; j++) {
if (id == CODEC_ID_CINEPAK){
r = g = b = color_count - 1 - color_index;
}else
r = g = b = color_index;
sc->palette[j] =
(0xFFU << 24) | (r << 16) | (g << 8) | (b);
color_index -= color_dec;
if (color_index < 0)
color_index = 0;
}
} else if (color_table_id) {
const uint8_t *color_table;
/* if flag bit 3 is set, use the default palette */
color_count = 1 << color_depth;
if (color_depth == 2)
color_table = ff_qt_default_palette_4;
else if (color_depth == 4)
color_table = ff_qt_default_palette_16;
else
color_table = ff_qt_default_palette_256;
for (j = 0; j < color_count; j++) {
r = color_table[j * 3 + 0];
g = color_table[j * 3 + 1];
b = color_table[j * 3 + 2];
sc->palette[j] =
(0xFFU << 24) | (r << 16) | (g << 8) | (b);
}
} else {
/* load the palette from the file */
color_start = avio_rb32(pb);
color_count = avio_rb16(pb);
color_end = avio_rb16(pb);
if ((color_start <= 255) &&
(color_end <= 255)) {
for (j = color_start; j <= color_end; j++) {
/* each A, R, G, or B component is 16 bits;
* only use the top 8 bits */
a = avio_r8(pb);
avio_r8(pb);
r = avio_r8(pb);
avio_r8(pb);
g = avio_r8(pb);
avio_r8(pb);
b = avio_r8(pb);
avio_r8(pb);
sc->palette[j] =
(a << 24 ) | (r << 16) | (g << 8) | (b);
}
}
}
sc->has_palette = 1;
}
} else if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
int bits_per_sample, flags;
uint16_t version = avio_rb16(pb);
st->codec->codec_id = id;
avio_rb16(pb); /* revision level */
avio_rb32(pb); /* vendor */
st->codec->channels = avio_rb16(pb); /* channel count */
av_dlog(c->fc, "audio channels %d\n", st->codec->channels);
st->codec->bits_per_coded_sample = avio_rb16(pb); /* sample size */
sc->audio_cid = avio_rb16(pb);
avio_rb16(pb); /* packet size = 0 */
st->codec->sample_rate = ((avio_rb32(pb) >> 16));
av_dlog(c->fc, "version =%d, isom =%d\n",version,c->isom);
if (!c->isom) {
if (version==1) {
sc->samples_per_frame = avio_rb32(pb);
avio_rb32(pb); /* bytes per packet */
sc->bytes_per_frame = avio_rb32(pb);
avio_rb32(pb); /* bytes per sample */
} else if (version==2) {
avio_rb32(pb); /* sizeof struct only */
st->codec->sample_rate = av_int2double(avio_rb64(pb)); /* float 64 */
st->codec->channels = avio_rb32(pb);
avio_rb32(pb); /* always 0x7F000000 */
st->codec->bits_per_coded_sample = avio_rb32(pb); /* bits per channel if sound is uncompressed */
flags = avio_rb32(pb); /* lpcm format specific flag */
sc->bytes_per_frame = avio_rb32(pb); /* bytes per audio packet if constant */
sc->samples_per_frame = avio_rb32(pb); /* lpcm frames per audio packet if constant */
if (format == MKTAG('l','p','c','m'))
st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, flags);
}
}
switch (st->codec->codec_id) {
case CODEC_ID_PCM_S8:
case CODEC_ID_PCM_U8:
if (st->codec->bits_per_coded_sample == 16)
st->codec->codec_id = CODEC_ID_PCM_S16BE;
break;
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
if (st->codec->bits_per_coded_sample == 8)
st->codec->codec_id = CODEC_ID_PCM_S8;
else if (st->codec->bits_per_coded_sample == 24)
st->codec->codec_id =
st->codec->codec_id == CODEC_ID_PCM_S16BE ?
CODEC_ID_PCM_S24BE : CODEC_ID_PCM_S24LE;
break;
/* set values for old format before stsd version 1 appeared */
case CODEC_ID_MACE3:
sc->samples_per_frame = 6;
sc->bytes_per_frame = 2*st->codec->channels;
break;
case CODEC_ID_MACE6:
sc->samples_per_frame = 6;
sc->bytes_per_frame = 1*st->codec->channels;
break;
case CODEC_ID_ADPCM_IMA_QT:
sc->samples_per_frame = 64;
sc->bytes_per_frame = 34*st->codec->channels;
break;
case CODEC_ID_GSM:
sc->samples_per_frame = 160;
sc->bytes_per_frame = 33;
break;
default:
break;
}
bits_per_sample = av_get_bits_per_sample(st->codec->codec_id);
if (bits_per_sample) {
st->codec->bits_per_coded_sample = bits_per_sample;
sc->sample_size = (bits_per_sample >> 3) * st->codec->channels;
}
} else if (st->codec->codec_type==AVMEDIA_TYPE_SUBTITLE){
MOVAtom fake_atom = { .size = size - (avio_tell(pb) - start_pos) };
if (format != AV_RL32("mp4s")) // mp4s contains a regular esds atom
mov_read_glbl(c, pb, fake_atom);
st->codec->codec_id= id;
st->codec->width = sc->width;
st->codec->height = sc->height;
} else {
if (st->codec->codec_tag == MKTAG('t','m','c','d')) {
MOVStreamContext *tmcd_ctx = st->priv_data;
int val;
avio_rb32(pb); /* reserved */
val = avio_rb32(pb); /* flags */
tmcd_ctx->tmcd_flags = val;
if (val & 1)
st->codec->flags2 |= CODEC_FLAG2_DROP_FRAME_TIMECODE;
avio_rb32(pb); /* time scale */
avio_rb32(pb); /* frame duration */
st->codec->time_base.den = avio_r8(pb); /* number of frame */
st->codec->time_base.num = 1;
}
/* other codec type, just skip (rtp, mp4s, ...) */
avio_skip(pb, size - (avio_tell(pb) - start_pos));
}
/* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */
a.size = size - (avio_tell(pb) - start_pos);
if (a.size > 8) {
int ret;
if ((ret = mov_read_default(c, pb, a)) < 0)
return ret;
} else if (a.size > 0)
avio_skip(pb, a.size);
}
if (st->codec->codec_type==AVMEDIA_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1)
st->codec->sample_rate= sc->time_scale;
/* special codec parameters handling */
switch (st->codec->codec_id) {
#if CONFIG_DV_DEMUXER
case CODEC_ID_DVAUDIO:
c->dv_fctx = avformat_alloc_context();
c->dv_demux = avpriv_dv_init_demux(c->dv_fctx);
if (!c->dv_demux) {
av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
return AVERROR(ENOMEM);
}
sc->dv_audio_container = 1;
st->codec->codec_id = CODEC_ID_PCM_S16LE;
break;
#endif
/* no ifdef since parameters are always those */
case CODEC_ID_QCELP:
if (st->codec->codec_tag != MKTAG('Q','c','l','p'))
st->codec->sample_rate = 8000;
st->codec->channels= 1; /* really needed */
break;
case CODEC_ID_AMR_NB:
st->codec->channels= 1; /* really needed */
/* force sample rate for amr, stsd in 3gp does not store sample rate */
st->codec->sample_rate = 8000;
break;
case CODEC_ID_AMR_WB:
st->codec->channels = 1;
st->codec->sample_rate = 16000;
break;
case CODEC_ID_MP2:
case CODEC_ID_MP3:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO; /* force type after stsd for m1a hdlr */
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case CODEC_ID_GSM:
case CODEC_ID_ADPCM_MS:
case CODEC_ID_ADPCM_IMA_WAV:
st->codec->block_align = sc->bytes_per_frame;
break;
case CODEC_ID_ALAC:
if (st->codec->extradata_size == 36) {
st->codec->channels = AV_RB8 (st->codec->extradata+21);
st->codec->sample_rate = AV_RB32(st->codec->extradata+32);
}
break;
case CODEC_ID_AC3:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
case CODEC_ID_MPEG1VIDEO:
st->need_parsing = AVSTREAM_PARSE_FULL;
break;
default:
break;
}
return 0;
}
| C | FFmpeg | 0 |
CVE-2016-3760 | https://www.cvedetails.com/cve/CVE-2016-3760/ | CWE-20 | https://android.googlesource.com/platform/system/bt/+/37c88107679d36c419572732b4af6e18bb2f7dce | 37c88107679d36c419572732b4af6e18bb2f7dce | Add guest mode functionality (2/3)
Add a flag to enable() to start Bluetooth in restricted
mode. In restricted mode, all devices that are paired during
restricted mode are deleted upon leaving restricted mode.
Right now restricted mode is only entered while a guest
user is active.
Bug: 27410683
Change-Id: I8f23d28ef0aa3a8df13d469c73005c8e1b894d19
| static void btif_config_write(void) {
assert(config != NULL);
assert(alarm_timer != NULL);
pthread_mutex_lock(&lock);
config_t *config_paired = config_new_clone(config);
btif_config_remove_unpaired(config_paired);
config_save(config_paired, CONFIG_FILE_PATH);
config_free(config_paired);
pthread_mutex_unlock(&lock);
}
| static void btif_config_write(void) {
assert(config != NULL);
assert(alarm_timer != NULL);
pthread_mutex_lock(&lock);
config_t *config_paired = config_new_clone(config);
btif_config_remove_unpaired(config_paired);
config_save(config_paired, CONFIG_FILE_PATH);
config_free(config_paired);
pthread_mutex_unlock(&lock);
}
| C | Android | 0 |
CVE-2011-4930 | https://www.cvedetails.com/cve/CVE-2011-4930/ | CWE-134 | https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867 | 5e5571d1a431eb3c61977b6dd6ec90186ef79867 | null | CronTab::sort( ExtArray<int> &list )
{
int ctr, ctr2, value;
for ( ctr = 1; ctr <= list.getlast(); ctr++ ) {
value = list[ctr];
ctr2 = ctr;
while ( ( ctr2 > 0 ) && ( list[ctr2 - 1] > value ) ) {
list[ctr2] = list[ctr2 - 1];
ctr2--;
} // WHILE
list[ctr2] = value;
} // FOR
return;
}
| CronTab::sort( ExtArray<int> &list )
{
int ctr, ctr2, value;
for ( ctr = 1; ctr <= list.getlast(); ctr++ ) {
value = list[ctr];
ctr2 = ctr;
while ( ( ctr2 > 0 ) && ( list[ctr2 - 1] > value ) ) {
list[ctr2] = list[ctr2 - 1];
ctr2--;
} // WHILE
list[ctr2] = value;
} // FOR
return;
}
| CPP | htcondor | 0 |
CVE-2015-6763 | https://www.cvedetails.com/cve/CVE-2015-6763/ | null | https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4 | f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4 | MacViews: Enable secure text input for password Textfields.
In Cocoa the NSTextInputContext automatically enables secure text input
when activated and it's in the secure text entry mode.
RenderWidgetHostViewMac did the similar thing for ages following the
WebKit example.
views::Textfield needs to do the same thing in a fashion that's
sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions
are possible when the Textfield gets focus, activates the secure text
input mode and the RWHVM loses focus immediately afterwards and disables
the secure text input instead of leaving it in the enabled state.
BUG=818133,677220
Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b
Reviewed-on: https://chromium-review.googlesource.com/943064
Commit-Queue: Michail Pishchagin <mblsha@yandex-team.ru>
Reviewed-by: Pavel Feldman <pfeldman@chromium.org>
Reviewed-by: Avi Drissman <avi@chromium.org>
Reviewed-by: Peter Kasting <pkasting@chromium.org>
Cr-Commit-Position: refs/heads/master@{#542517} | ui::EventDispatchDetails MockInputMethod::DispatchKeyEvent(ui::KeyEvent* key) {
#if defined(OS_MACOSX)
if (key->is_char())
return DispatchKeyEventPostIME(key);
#endif
if (key->is_char() && key->HasNativeEvent()) {
key->SetHandled();
return ui::EventDispatchDetails();
}
ui::EventDispatchDetails dispatch_details;
bool handled = !IsTextInputTypeNone() && HasComposition();
ClearStates();
if (handled) {
DCHECK(!key->is_char());
ui::KeyEvent mock_key(ui::ET_KEY_PRESSED,
ui::VKEY_PROCESSKEY,
key->flags());
dispatch_details = DispatchKeyEventPostIME(&mock_key);
} else {
dispatch_details = DispatchKeyEventPostIME(key);
}
if (key->handled() || dispatch_details.dispatcher_destroyed)
return dispatch_details;
ui::TextInputClient* client = GetTextInputClient();
if (client) {
if (handled) {
if (result_text_.length())
client->InsertText(result_text_);
if (composition_.text.length())
client->SetCompositionText(composition_);
else
client->ClearCompositionText();
} else if (key->type() == ui::ET_KEY_PRESSED) {
base::char16 ch = key->GetCharacter();
if (ch)
client->InsertChar(*key);
}
}
ClearComposition();
return dispatch_details;
}
| ui::EventDispatchDetails MockInputMethod::DispatchKeyEvent(ui::KeyEvent* key) {
#if defined(OS_MACOSX)
if (key->is_char())
return DispatchKeyEventPostIME(key);
#endif
if (key->is_char() && key->HasNativeEvent()) {
key->SetHandled();
return ui::EventDispatchDetails();
}
ui::EventDispatchDetails dispatch_details;
bool handled = !IsTextInputTypeNone() && HasComposition();
ClearStates();
if (handled) {
DCHECK(!key->is_char());
ui::KeyEvent mock_key(ui::ET_KEY_PRESSED,
ui::VKEY_PROCESSKEY,
key->flags());
dispatch_details = DispatchKeyEventPostIME(&mock_key);
} else {
dispatch_details = DispatchKeyEventPostIME(key);
}
if (key->handled() || dispatch_details.dispatcher_destroyed)
return dispatch_details;
ui::TextInputClient* client = GetTextInputClient();
if (client) {
if (handled) {
if (result_text_.length())
client->InsertText(result_text_);
if (composition_.text.length())
client->SetCompositionText(composition_);
else
client->ClearCompositionText();
} else if (key->type() == ui::ET_KEY_PRESSED) {
base::char16 ch = key->GetCharacter();
if (ch)
client->InsertChar(*key);
}
}
ClearComposition();
return dispatch_details;
}
| C | Chrome | 0 |
CVE-2014-3690 | https://www.cvedetails.com/cve/CVE-2014-3690/ | CWE-399 | https://github.com/torvalds/linux/commit/d974baa398f34393db76be45f7d4d04fbdbb4a0a | d974baa398f34393db76be45f7d4d04fbdbb4a0a | x86,kvm,vmx: Preserve CR4 across VM entry
CR4 isn't constant; at least the TSD and PCE bits can vary.
TBH, treating CR0 and CR3 as constant scares me a bit, too, but it looks
like it's correct.
This adds a branch and a read from cr4 to each vm entry. Because it is
extremely likely that consecutive entries into the same vcpu will have
the same host cr4 value, this fixes up the vmcs instead of restoring cr4
after the fact. A subsequent patch will add a kernel-wide cr4 shadow,
reducing the overhead in the common case to just two memory reads and a
branch.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: stable@vger.kernel.org
Cc: Petr Matousek <pmatouse@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static inline u64 vmx_control_msr(u32 low, u32 high)
{
return low | ((u64)high << 32);
}
| static inline u64 vmx_control_msr(u32 low, u32 high)
{
return low | ((u64)high << 32);
}
| C | linux | 0 |
CVE-2015-1793 | https://www.cvedetails.com/cve/CVE-2015-1793/ | CWE-254 | https://git.openssl.org/?p=openssl.git;a=commit;h=9a0db453ba017ebcaccbee933ee6511a9ae4d1c8 | 9a0db453ba017ebcaccbee933ee6511a9ae4d1c8 | null | static int check_name_constraints(X509_STORE_CTX *ctx)
{
X509 *x;
int i, j, rv;
/* Check name constraints for all certificates */
for (i = sk_X509_num(ctx->chain) - 1; i >= 0; i--) {
x = sk_X509_value(ctx->chain, i);
/* Ignore self issued certs unless last in chain */
if (i && (x->ex_flags & EXFLAG_SI))
continue;
/*
* Check against constraints for all certificates higher in chain
* including trust anchor. Trust anchor not strictly speaking needed
* but if it includes constraints it is to be assumed it expects them
* to be obeyed.
*/
for (j = sk_X509_num(ctx->chain) - 1; j > i; j--) {
NAME_CONSTRAINTS *nc = sk_X509_value(ctx->chain, j)->nc;
if (nc) {
rv = NAME_CONSTRAINTS_check(x, nc);
if (rv != X509_V_OK) {
ctx->error = rv;
ctx->error_depth = i;
ctx->current_cert = x;
if (!ctx->verify_cb(0, ctx))
return 0;
}
}
}
}
return 1;
}
| static int check_name_constraints(X509_STORE_CTX *ctx)
{
X509 *x;
int i, j, rv;
/* Check name constraints for all certificates */
for (i = sk_X509_num(ctx->chain) - 1; i >= 0; i--) {
x = sk_X509_value(ctx->chain, i);
/* Ignore self issued certs unless last in chain */
if (i && (x->ex_flags & EXFLAG_SI))
continue;
/*
* Check against constraints for all certificates higher in chain
* including trust anchor. Trust anchor not strictly speaking needed
* but if it includes constraints it is to be assumed it expects them
* to be obeyed.
*/
for (j = sk_X509_num(ctx->chain) - 1; j > i; j--) {
NAME_CONSTRAINTS *nc = sk_X509_value(ctx->chain, j)->nc;
if (nc) {
rv = NAME_CONSTRAINTS_check(x, nc);
if (rv != X509_V_OK) {
ctx->error = rv;
ctx->error_depth = i;
ctx->current_cert = x;
if (!ctx->verify_cb(0, ctx))
return 0;
}
}
}
}
return 1;
}
| C | openssl | 0 |
CVE-2016-1621 | https://www.cvedetails.com/cve/CVE-2016-1621/ | CWE-119 | https://android.googlesource.com/platform/external/libvpx/+/04839626ed859623901ebd3a5fd483982186b59d | 04839626ed859623901ebd3a5fd483982186b59d | libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039 | long long BlockGroup::GetPrevTimeCode() const
| long long BlockGroup::GetPrevTimeCode() const
{
return m_prev;
}
| C | Android | 1 |
CVE-2012-1179 | https://www.cvedetails.com/cve/CVE-2012-1179/ | CWE-264 | https://github.com/torvalds/linux/commit/4a1d704194a441bf83c636004a479e01360ec850 | 4a1d704194a441bf83c636004a479e01360ec850 | mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode
commit 1a5a9906d4e8d1976b701f889d8f35d54b928f25 upstream.
In some cases it may happen that pmd_none_or_clear_bad() is called with
the mmap_sem hold in read mode. In those cases the huge page faults can
allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a
false positive from pmd_bad() that will not like to see a pmd
materializing as trans huge.
It's not khugepaged causing the problem, khugepaged holds the mmap_sem
in write mode (and all those sites must hold the mmap_sem in read mode
to prevent pagetables to go away from under them, during code review it
seems vm86 mode on 32bit kernels requires that too unless it's
restricted to 1 thread per process or UP builds). The race is only with
the huge pagefaults that can convert a pmd_none() into a
pmd_trans_huge().
Effectively all these pmd_none_or_clear_bad() sites running with
mmap_sem in read mode are somewhat speculative with the page faults, and
the result is always undefined when they run simultaneously. This is
probably why it wasn't common to run into this. For example if the
madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page
fault, the hugepage will not be zapped, if the page fault runs first it
will be zapped.
Altering pmd_bad() not to error out if it finds hugepmds won't be enough
to fix this, because zap_pmd_range would then proceed to call
zap_pte_range (which would be incorrect if the pmd become a
pmd_trans_huge()).
The simplest way to fix this is to read the pmd in the local stack
(regardless of what we read, no need of actual CPU barriers, only
compiler barrier needed), and be sure it is not changing under the code
that computes its value. Even if the real pmd is changing under the
value we hold on the stack, we don't care. If we actually end up in
zap_pte_range it means the pmd was not none already and it was not huge,
and it can't become huge from under us (khugepaged locking explained
above).
All we need is to enforce that there is no way anymore that in a code
path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad
can run into a hugepmd. The overhead of a barrier() is just a compiler
tweak and should not be measurable (I only added it for THP builds). I
don't exclude different compiler versions may have prevented the race
too by caching the value of *pmd on the stack (that hasn't been
verified, but it wouldn't be impossible considering
pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines
and there's no external function called in between pmd_trans_huge and
pmd_none_or_clear_bad).
if (pmd_trans_huge(*pmd)) {
if (next-addr != HPAGE_PMD_SIZE) {
VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
split_huge_page_pmd(vma->vm_mm, pmd);
} else if (zap_huge_pmd(tlb, vma, pmd, addr))
continue;
/* fall through */
}
if (pmd_none_or_clear_bad(pmd))
Because this race condition could be exercised without special
privileges this was reported in CVE-2012-1179.
The race was identified and fully explained by Ulrich who debugged it.
I'm quoting his accurate explanation below, for reference.
====== start quote =======
mapcount 0 page_mapcount 1
kernel BUG at mm/huge_memory.c:1384!
At some point prior to the panic, a "bad pmd ..." message similar to the
following is logged on the console:
mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7).
The "bad pmd ..." message is logged by pmd_clear_bad() before it clears
the page's PMD table entry.
143 void pmd_clear_bad(pmd_t *pmd)
144 {
-> 145 pmd_ERROR(*pmd);
146 pmd_clear(pmd);
147 }
After the PMD table entry has been cleared, there is an inconsistency
between the actual number of PMD table entries that are mapping the page
and the page's map count (_mapcount field in struct page). When the page
is subsequently reclaimed, __split_huge_page() detects this inconsistency.
1381 if (mapcount != page_mapcount(page))
1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1383 mapcount, page_mapcount(page));
-> 1384 BUG_ON(mapcount != page_mapcount(page));
The root cause of the problem is a race of two threads in a multithreaded
process. Thread B incurs a page fault on a virtual address that has never
been accessed (PMD entry is zero) while Thread A is executing an madvise()
system call on a virtual address within the same 2 MB (huge page) range.
virtual address space
.---------------------.
| |
| |
.-|---------------------|
| | |
| | |<-- B(fault)
| | |
2 MB | |/////////////////////|-.
huge < |/////////////////////| > A(range)
page | |/////////////////////|-'
| | |
| | |
'-|---------------------|
| |
| |
'---------------------'
- Thread A is executing an madvise(..., MADV_DONTNEED) system call
on the virtual address range "A(range)" shown in the picture.
sys_madvise
// Acquire the semaphore in shared mode.
down_read(¤t->mm->mmap_sem)
...
madvise_vma
switch (behavior)
case MADV_DONTNEED:
madvise_dontneed
zap_page_range
unmap_vmas
unmap_page_range
zap_pud_range
zap_pmd_range
//
// Assume that this huge page has never been accessed.
// I.e. content of the PMD entry is zero (not mapped).
//
if (pmd_trans_huge(*pmd)) {
// We don't get here due to the above assumption.
}
//
// Assume that Thread B incurred a page fault and
.---------> // sneaks in here as shown below.
| //
| if (pmd_none_or_clear_bad(pmd))
| {
| if (unlikely(pmd_bad(*pmd)))
| pmd_clear_bad
| {
| pmd_ERROR
| // Log "bad pmd ..." message here.
| pmd_clear
| // Clear the page's PMD entry.
| // Thread B incremented the map count
| // in page_add_new_anon_rmap(), but
| // now the page is no longer mapped
| // by a PMD entry (-> inconsistency).
| }
| }
|
v
- Thread B is handling a page fault on virtual address "B(fault)" shown
in the picture.
...
do_page_fault
__do_page_fault
// Acquire the semaphore in shared mode.
down_read_trylock(&mm->mmap_sem)
...
handle_mm_fault
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma))
// We get here due to the above assumption (PMD entry is zero).
do_huge_pmd_anonymous_page
alloc_hugepage_vma
// Allocate a new transparent huge page here.
...
__do_huge_pmd_anonymous_page
...
spin_lock(&mm->page_table_lock)
...
page_add_new_anon_rmap
// Here we increment the page's map count (starts at -1).
atomic_set(&page->_mapcount, 0)
set_pmd_at
// Here we set the page's PMD entry which will be cleared
// when Thread A calls pmd_clear_bad().
...
spin_unlock(&mm->page_table_lock)
The mmap_sem does not prevent the race because both threads are acquiring
it in shared mode (down_read). Thread B holds the page_table_lock while
the page's map count and PMD table entry are updated. However, Thread A
does not synchronize on that lock.
====== end quote =======
[akpm@linux-foundation.org: checkpatch fixes]
Reported-by: Ulrich Obergfell <uobergfe@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Jones <davej@redhat.com>
Acked-by: Larry Woodman <lwoodman@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Mark Salter <msalter@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | void clear_huge_page(struct page *page,
unsigned long addr, unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
| void clear_huge_page(struct page *page,
unsigned long addr, unsigned int pages_per_huge_page)
{
int i;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, pages_per_huge_page);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
clear_user_highpage(page + i, addr + i * PAGE_SIZE);
}
}
| C | linux | 0 |
CVE-2018-17468 | https://www.cvedetails.com/cve/CVE-2018-17468/ | CWE-200 | https://github.com/chromium/chromium/commit/5fe74f831fddb92afa5ddfe46490bb49f083132b | 5fe74f831fddb92afa5ddfe46490bb49f083132b | Do not forward resource timing to parent frame after back-forward navigation
LocalFrame has |should_send_resource_timing_info_to_parent_| flag not to
send timing info to parent except for the first navigation. This flag is
cleared when the first timing is sent to parent, however this does not happen
if iframe's first navigation was by back-forward navigation. For such
iframes, we shouldn't send timings to parent at all.
Bug: 876822
Change-Id: I128b51a82ef278c439548afc8283ae63abdef5c5
Reviewed-on: https://chromium-review.googlesource.com/1186215
Reviewed-by: Kinuko Yasuda <kinuko@chromium.org>
Commit-Queue: Kunihiko Sakamoto <ksakamoto@chromium.org>
Cr-Commit-Position: refs/heads/master@{#585736} | void WebLocalFrameImpl::PostPausableTask(PausableTaskCallback callback) {
DCHECK(GetFrame());
Document* document = GetFrame()->GetDocument();
DCHECK(document);
PausableTask::Post(document, std::move(callback));
}
| void WebLocalFrameImpl::PostPausableTask(PausableTaskCallback callback) {
DCHECK(GetFrame());
Document* document = GetFrame()->GetDocument();
DCHECK(document);
PausableTask::Post(document, std::move(callback));
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/d1a59e4e845a01d7d7b80ef184b672752a9eae4d | d1a59e4e845a01d7d7b80ef184b672752a9eae4d | Fixing cross-process postMessage replies on more than two iterations.
When two frames are replying to each other using event.source across processes,
after the first two replies, things break down. The root cause is that in
RenderViewImpl::GetFrameByMappedID, the lookup was incorrect. It is now
properly searching for the remote frame id and returning the local one.
BUG=153445
Review URL: https://chromiumcodereview.appspot.com/11040015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@159924 0039d316-1c4b-4281-b951-d872f2087c98 | void RenderViewImpl::DidInitiatePaint() {
pepper_delegate_.ViewInitiatedPaint();
if (GetGuestToEmbedderChannel())
GetGuestToEmbedderChannel()->IssueSwapBuffers(guest_graphics_resource());
}
| void RenderViewImpl::DidInitiatePaint() {
pepper_delegate_.ViewInitiatedPaint();
if (GetGuestToEmbedderChannel())
GetGuestToEmbedderChannel()->IssueSwapBuffers(guest_graphics_resource());
}
| C | Chrome | 0 |
CVE-2013-0281 | https://www.cvedetails.com/cve/CVE-2013-0281/ | CWE-399 | https://github.com/ClusterLabs/pacemaker/commit/564f7cc2a51dcd2f28ab12a13394f31be5aa3c93 | 564f7cc2a51dcd2f28ab12a13394f31be5aa3c93 | High: core: Internal tls api improvements for reuse with future LRMD tls backend. | cib_ha_peer_callback(HA_Message * msg, void *private_data)
{
xmlNode *xml = convert_ha_message(NULL, msg, __FUNCTION__);
cib_peer_callback(xml, private_data);
free_xml(xml);
}
| cib_ha_peer_callback(HA_Message * msg, void *private_data)
{
xmlNode *xml = convert_ha_message(NULL, msg, __FUNCTION__);
cib_peer_callback(xml, private_data);
free_xml(xml);
}
| C | pacemaker | 0 |
CVE-2009-3605 | https://www.cvedetails.com/cve/CVE-2009-3605/ | CWE-189 | https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a | 7b2d314a61fd0e12f47c62996cb49ec0d1ba747a | null | inline void JBIG2Bitmap::getPixelPtr(int x, int y, JBIG2BitmapPtr *ptr) {
if (y < 0 || y >= h || x >= w) {
ptr->p = NULL;
} else if (x < 0) {
ptr->p = &data[y * line];
ptr->shift = 7;
ptr->x = x;
} else {
ptr->p = &data[y * line + (x >> 3)];
ptr->shift = 7 - (x & 7);
ptr->x = x;
}
}
| inline void JBIG2Bitmap::getPixelPtr(int x, int y, JBIG2BitmapPtr *ptr) {
if (y < 0 || y >= h || x >= w) {
ptr->p = NULL;
} else if (x < 0) {
ptr->p = &data[y * line];
ptr->shift = 7;
ptr->x = x;
} else {
ptr->p = &data[y * line + (x >> 3)];
ptr->shift = 7 - (x & 7);
ptr->x = x;
}
}
| CPP | poppler | 0 |
CVE-2016-3839 | https://www.cvedetails.com/cve/CVE-2016-3839/ | CWE-284 | https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c | 472271b153c5dc53c28beac55480a8d8434b2d5c | DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
| void btif_hl_free_buf(void **p)
{
if (*p != NULL)
{
BTIF_TRACE_DEBUG("%s OK", __FUNCTION__ );
GKI_freebuf(*p);
*p = NULL;
}
else
BTIF_TRACE_ERROR("%s NULL pointer",__FUNCTION__ );
}
| void btif_hl_free_buf(void **p)
{
if (*p != NULL)
{
BTIF_TRACE_DEBUG("%s OK", __FUNCTION__ );
GKI_freebuf(*p);
*p = NULL;
}
else
BTIF_TRACE_ERROR("%s NULL pointer",__FUNCTION__ );
}
| C | Android | 0 |
CVE-2016-2505 | https://www.cvedetails.com/cve/CVE-2016-2505/ | CWE-119 | https://android.googlesource.com/platform/frameworks/av/+/4f236c532039a61f0cf681d2e3c6e022911bbb5c | 4f236c532039a61f0cf681d2e3c6e022911bbb5c | Check section size when verifying CRC
Bug: 28333006
Change-Id: Ief7a2da848face78f0edde21e2f2009316076679
| status_t ATSParser::parsePID(
ABitReader *br, unsigned PID,
unsigned continuity_counter,
unsigned payload_unit_start_indicator,
SyncEvent *event) {
ssize_t sectionIndex = mPSISections.indexOfKey(PID);
if (sectionIndex >= 0) {
sp<PSISection> section = mPSISections.valueAt(sectionIndex);
if (payload_unit_start_indicator) {
if (!section->isEmpty()) {
ALOGW("parsePID encounters payload_unit_start_indicator when section is not empty");
section->clear();
}
unsigned skip = br->getBits(8);
section->setSkipBytes(skip + 1); // skip filler bytes + pointer field itself
br->skipBits(skip * 8);
}
if (br->numBitsLeft() % 8 != 0) {
return ERROR_MALFORMED;
}
status_t err = section->append(br->data(), br->numBitsLeft() / 8);
if (err != OK) {
return err;
}
if (!section->isComplete()) {
return OK;
}
if (!section->isCRCOkay()) {
return BAD_VALUE;
}
ABitReader sectionBits(section->data(), section->size());
if (PID == 0) {
parseProgramAssociationTable(§ionBits);
} else {
bool handled = false;
for (size_t i = 0; i < mPrograms.size(); ++i) {
status_t err;
if (!mPrograms.editItemAt(i)->parsePSISection(
PID, §ionBits, &err)) {
continue;
}
if (err != OK) {
return err;
}
handled = true;
break;
}
if (!handled) {
mPSISections.removeItem(PID);
section.clear();
}
}
if (section != NULL) {
section->clear();
}
return OK;
}
bool handled = false;
for (size_t i = 0; i < mPrograms.size(); ++i) {
status_t err;
if (mPrograms.editItemAt(i)->parsePID(
PID, continuity_counter, payload_unit_start_indicator,
br, &err, event)) {
if (err != OK) {
return err;
}
handled = true;
break;
}
}
if (!handled) {
ALOGV("PID 0x%04x not handled.", PID);
}
return OK;
}
| status_t ATSParser::parsePID(
ABitReader *br, unsigned PID,
unsigned continuity_counter,
unsigned payload_unit_start_indicator,
SyncEvent *event) {
ssize_t sectionIndex = mPSISections.indexOfKey(PID);
if (sectionIndex >= 0) {
sp<PSISection> section = mPSISections.valueAt(sectionIndex);
if (payload_unit_start_indicator) {
if (!section->isEmpty()) {
ALOGW("parsePID encounters payload_unit_start_indicator when section is not empty");
section->clear();
}
unsigned skip = br->getBits(8);
section->setSkipBytes(skip + 1); // skip filler bytes + pointer field itself
br->skipBits(skip * 8);
}
if (br->numBitsLeft() % 8 != 0) {
return ERROR_MALFORMED;
}
status_t err = section->append(br->data(), br->numBitsLeft() / 8);
if (err != OK) {
return err;
}
if (!section->isComplete()) {
return OK;
}
if (!section->isCRCOkay()) {
return BAD_VALUE;
}
ABitReader sectionBits(section->data(), section->size());
if (PID == 0) {
parseProgramAssociationTable(§ionBits);
} else {
bool handled = false;
for (size_t i = 0; i < mPrograms.size(); ++i) {
status_t err;
if (!mPrograms.editItemAt(i)->parsePSISection(
PID, §ionBits, &err)) {
continue;
}
if (err != OK) {
return err;
}
handled = true;
break;
}
if (!handled) {
mPSISections.removeItem(PID);
section.clear();
}
}
if (section != NULL) {
section->clear();
}
return OK;
}
bool handled = false;
for (size_t i = 0; i < mPrograms.size(); ++i) {
status_t err;
if (mPrograms.editItemAt(i)->parsePID(
PID, continuity_counter, payload_unit_start_indicator,
br, &err, event)) {
if (err != OK) {
return err;
}
handled = true;
break;
}
}
if (!handled) {
ALOGV("PID 0x%04x not handled.", PID);
}
return OK;
}
| C | Android | 0 |
CVE-2017-16527 | https://www.cvedetails.com/cve/CVE-2017-16527/ | CWE-416 | https://github.com/torvalds/linux/commit/124751d5e63c823092060074bd0abaae61aaa9c4 | 124751d5e63c823092060074bd0abaae61aaa9c4 | ALSA: usb-audio: Kill stray URB at exiting
USB-audio driver may leave a stray URB for the mixer interrupt when it
exits by some error during probe. This leads to a use-after-free
error as spotted by syzkaller like:
==================================================================
BUG: KASAN: use-after-free in snd_usb_mixer_interrupt+0x604/0x6f0
Call Trace:
<IRQ>
__dump_stack lib/dump_stack.c:16
dump_stack+0x292/0x395 lib/dump_stack.c:52
print_address_description+0x78/0x280 mm/kasan/report.c:252
kasan_report_error mm/kasan/report.c:351
kasan_report+0x23d/0x350 mm/kasan/report.c:409
__asan_report_load8_noabort+0x19/0x20 mm/kasan/report.c:430
snd_usb_mixer_interrupt+0x604/0x6f0 sound/usb/mixer.c:2490
__usb_hcd_giveback_urb+0x2e0/0x650 drivers/usb/core/hcd.c:1779
....
Allocated by task 1484:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:551
kmem_cache_alloc_trace+0x11e/0x2d0 mm/slub.c:2772
kmalloc ./include/linux/slab.h:493
kzalloc ./include/linux/slab.h:666
snd_usb_create_mixer+0x145/0x1010 sound/usb/mixer.c:2540
create_standard_mixer_quirk+0x58/0x80 sound/usb/quirks.c:516
snd_usb_create_quirk+0x92/0x100 sound/usb/quirks.c:560
create_composite_quirk+0x1c4/0x3e0 sound/usb/quirks.c:59
snd_usb_create_quirk+0x92/0x100 sound/usb/quirks.c:560
usb_audio_probe+0x1040/0x2c10 sound/usb/card.c:618
....
Freed by task 1484:
save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
save_stack+0x43/0xd0 mm/kasan/kasan.c:447
set_track mm/kasan/kasan.c:459
kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:524
slab_free_hook mm/slub.c:1390
slab_free_freelist_hook mm/slub.c:1412
slab_free mm/slub.c:2988
kfree+0xf6/0x2f0 mm/slub.c:3919
snd_usb_mixer_free+0x11a/0x160 sound/usb/mixer.c:2244
snd_usb_mixer_dev_free+0x36/0x50 sound/usb/mixer.c:2250
__snd_device_free+0x1ff/0x380 sound/core/device.c:91
snd_device_free_all+0x8f/0xe0 sound/core/device.c:244
snd_card_do_free sound/core/init.c:461
release_card_device+0x47/0x170 sound/core/init.c:181
device_release+0x13f/0x210 drivers/base/core.c:814
....
Actually such a URB is killed properly at disconnection when the
device gets probed successfully, and what we need is to apply it for
the error-path, too.
In this patch, we apply snd_usb_mixer_disconnect() at releasing.
Also introduce a new flag, disconnected, to struct usb_mixer_interface
for not performing the disconnection procedure twice.
Reported-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de> | static int snd_usb_mixer_activate(struct usb_mixer_interface *mixer)
{
int err;
if (mixer->urb) {
err = usb_submit_urb(mixer->urb, GFP_NOIO);
if (err < 0)
return err;
}
return 0;
}
| static int snd_usb_mixer_activate(struct usb_mixer_interface *mixer)
{
int err;
if (mixer->urb) {
err = usb_submit_urb(mixer->urb, GFP_NOIO);
if (err < 0)
return err;
}
return 0;
}
| C | linux | 0 |
CVE-2017-15391 | https://www.cvedetails.com/cve/CVE-2017-15391/ | null | https://github.com/chromium/chromium/commit/f1afce25b3f94d8bddec69b08ffbc29b989ad844 | f1afce25b3f94d8bddec69b08ffbc29b989ad844 | [Extensions] Update navigations across hypothetical extension extents
Update code to treat navigations across hypothetical extension extents
(e.g. for nonexistent extensions) the same as we do for navigations
crossing installed extension extents.
Bug: 598265
Change-Id: Ibdf2f563ce1fd108ead279077901020a24de732b
Reviewed-on: https://chromium-review.googlesource.com/617180
Commit-Queue: Devlin <rdevlin.cronin@chromium.org>
Reviewed-by: Alex Moshchuk <alexmos@chromium.org>
Reviewed-by: Nasko Oskov <nasko@chromium.org>
Cr-Commit-Position: refs/heads/master@{#495779} | void ChromeContentBrowserClientExtensionsPart::RenderProcessWillLaunch(
content::RenderProcessHost* host) {
int id = host->GetID();
Profile* profile = Profile::FromBrowserContext(host->GetBrowserContext());
host->AddFilter(new ChromeExtensionMessageFilter(id, profile));
host->AddFilter(new ExtensionMessageFilter(id, profile));
host->AddFilter(new IOThreadExtensionMessageFilter(id, profile));
host->AddFilter(new ExtensionsGuestViewMessageFilter(id, profile));
if (extensions::ExtensionsClient::Get()
->ExtensionAPIEnabledInExtensionServiceWorkers()) {
host->AddFilter(new ExtensionServiceWorkerMessageFilter(
id, profile, host->GetStoragePartition()->GetServiceWorkerContext()));
}
}
| void ChromeContentBrowserClientExtensionsPart::RenderProcessWillLaunch(
content::RenderProcessHost* host) {
int id = host->GetID();
Profile* profile = Profile::FromBrowserContext(host->GetBrowserContext());
host->AddFilter(new ChromeExtensionMessageFilter(id, profile));
host->AddFilter(new ExtensionMessageFilter(id, profile));
host->AddFilter(new IOThreadExtensionMessageFilter(id, profile));
host->AddFilter(new ExtensionsGuestViewMessageFilter(id, profile));
if (extensions::ExtensionsClient::Get()
->ExtensionAPIEnabledInExtensionServiceWorkers()) {
host->AddFilter(new ExtensionServiceWorkerMessageFilter(
id, profile, host->GetStoragePartition()->GetServiceWorkerContext()));
}
}
| C | Chrome | 0 |
CVE-2019-11815 | https://www.cvedetails.com/cve/CVE-2019-11815/ | CWE-362 | https://github.com/torvalds/linux/commit/cb66ddd156203daefb8d71158036b27b0e2caf63 | cb66ddd156203daefb8d71158036b27b0e2caf63 | net: rds: force to destroy connection if t_sock is NULL in rds_tcp_kill_sock().
When it is to cleanup net namespace, rds_tcp_exit_net() will call
rds_tcp_kill_sock(), if t_sock is NULL, it will not call
rds_conn_destroy(), rds_conn_path_destroy() and rds_tcp_conn_free() to free
connection, and the worker cp_conn_w is not stopped, afterwards the net is freed in
net_drop_ns(); While cp_conn_w rds_connect_worker() will call rds_tcp_conn_path_connect()
and reference 'net' which has already been freed.
In rds_tcp_conn_path_connect(), rds_tcp_set_callbacks() will set t_sock = sock before
sock->ops->connect, but if connect() is failed, it will call
rds_tcp_restore_callbacks() and set t_sock = NULL, if connect is always
failed, rds_connect_worker() will try to reconnect all the time, so
rds_tcp_kill_sock() will never to cancel worker cp_conn_w and free the
connections.
Therefore, the condition !tc->t_sock is not needed if it is going to do
cleanup_net->rds_tcp_exit_net->rds_tcp_kill_sock, because tc->t_sock is always
NULL, and there is on other path to cancel cp_conn_w and free
connection. So this patch is to fix this.
rds_tcp_kill_sock():
...
if (net != c_net || !tc->t_sock)
...
Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
==================================================================
BUG: KASAN: use-after-free in inet_create+0xbcc/0xd28
net/ipv4/af_inet.c:340
Read of size 4 at addr ffff8003496a4684 by task kworker/u8:4/3721
CPU: 3 PID: 3721 Comm: kworker/u8:4 Not tainted 5.1.0 #11
Hardware name: linux,dummy-virt (DT)
Workqueue: krdsd rds_connect_worker
Call trace:
dump_backtrace+0x0/0x3c0 arch/arm64/kernel/time.c:53
show_stack+0x28/0x38 arch/arm64/kernel/traps.c:152
__dump_stack lib/dump_stack.c:77 [inline]
dump_stack+0x120/0x188 lib/dump_stack.c:113
print_address_description+0x68/0x278 mm/kasan/report.c:253
kasan_report_error mm/kasan/report.c:351 [inline]
kasan_report+0x21c/0x348 mm/kasan/report.c:409
__asan_report_load4_noabort+0x30/0x40 mm/kasan/report.c:429
inet_create+0xbcc/0xd28 net/ipv4/af_inet.c:340
__sock_create+0x4f8/0x770 net/socket.c:1276
sock_create_kern+0x50/0x68 net/socket.c:1322
rds_tcp_conn_path_connect+0x2b4/0x690 net/rds/tcp_connect.c:114
rds_connect_worker+0x108/0x1d0 net/rds/threads.c:175
process_one_work+0x6e8/0x1700 kernel/workqueue.c:2153
worker_thread+0x3b0/0xdd0 kernel/workqueue.c:2296
kthread+0x2f0/0x378 kernel/kthread.c:255
ret_from_fork+0x10/0x18 arch/arm64/kernel/entry.S:1117
Allocated by task 687:
save_stack mm/kasan/kasan.c:448 [inline]
set_track mm/kasan/kasan.c:460 [inline]
kasan_kmalloc+0xd4/0x180 mm/kasan/kasan.c:553
kasan_slab_alloc+0x14/0x20 mm/kasan/kasan.c:490
slab_post_alloc_hook mm/slab.h:444 [inline]
slab_alloc_node mm/slub.c:2705 [inline]
slab_alloc mm/slub.c:2713 [inline]
kmem_cache_alloc+0x14c/0x388 mm/slub.c:2718
kmem_cache_zalloc include/linux/slab.h:697 [inline]
net_alloc net/core/net_namespace.c:384 [inline]
copy_net_ns+0xc4/0x2d0 net/core/net_namespace.c:424
create_new_namespaces+0x300/0x658 kernel/nsproxy.c:107
unshare_nsproxy_namespaces+0xa0/0x198 kernel/nsproxy.c:206
ksys_unshare+0x340/0x628 kernel/fork.c:2577
__do_sys_unshare kernel/fork.c:2645 [inline]
__se_sys_unshare kernel/fork.c:2643 [inline]
__arm64_sys_unshare+0x38/0x58 kernel/fork.c:2643
__invoke_syscall arch/arm64/kernel/syscall.c:35 [inline]
invoke_syscall arch/arm64/kernel/syscall.c:47 [inline]
el0_svc_common+0x168/0x390 arch/arm64/kernel/syscall.c:83
el0_svc_handler+0x60/0xd0 arch/arm64/kernel/syscall.c:129
el0_svc+0x8/0xc arch/arm64/kernel/entry.S:960
Freed by task 264:
save_stack mm/kasan/kasan.c:448 [inline]
set_track mm/kasan/kasan.c:460 [inline]
__kasan_slab_free+0x114/0x220 mm/kasan/kasan.c:521
kasan_slab_free+0x10/0x18 mm/kasan/kasan.c:528
slab_free_hook mm/slub.c:1370 [inline]
slab_free_freelist_hook mm/slub.c:1397 [inline]
slab_free mm/slub.c:2952 [inline]
kmem_cache_free+0xb8/0x3a8 mm/slub.c:2968
net_free net/core/net_namespace.c:400 [inline]
net_drop_ns.part.6+0x78/0x90 net/core/net_namespace.c:407
net_drop_ns net/core/net_namespace.c:406 [inline]
cleanup_net+0x53c/0x6d8 net/core/net_namespace.c:569
process_one_work+0x6e8/0x1700 kernel/workqueue.c:2153
worker_thread+0x3b0/0xdd0 kernel/workqueue.c:2296
kthread+0x2f0/0x378 kernel/kthread.c:255
ret_from_fork+0x10/0x18 arch/arm64/kernel/entry.S:1117
The buggy address belongs to the object at ffff8003496a3f80
which belongs to the cache net_namespace of size 7872
The buggy address is located 1796 bytes inside of
7872-byte region [ffff8003496a3f80, ffff8003496a5e40)
The buggy address belongs to the page:
page:ffff7e000d25a800 count:1 mapcount:0 mapping:ffff80036ce4b000
index:0x0 compound_mapcount: 0
flags: 0xffffe0000008100(slab|head)
raw: 0ffffe0000008100 dead000000000100 dead000000000200 ffff80036ce4b000
raw: 0000000000000000 0000000080040004 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8003496a4580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8003496a4600: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
>ffff8003496a4680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
^
ffff8003496a4700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8003496a4780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
==================================================================
Fixes: 467fa15356ac("RDS-TCP: Support multiple RDS-TCP listen endpoints, one per netns.")
Reported-by: Hulk Robot <hulkci@huawei.com>
Signed-off-by: Mao Wenan <maowenan@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static __net_init int rds_tcp_init_net(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct ctl_table *tbl;
int err = 0;
memset(rtn, 0, sizeof(*rtn));
/* {snd, rcv}buf_size default to 0, which implies we let the
* stack pick the value, and permit auto-tuning of buffer size.
*/
if (net == &init_net) {
tbl = rds_tcp_sysctl_table;
} else {
tbl = kmemdup(rds_tcp_sysctl_table,
sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
if (!tbl) {
pr_warn("could not set allocate syctl table\n");
return -ENOMEM;
}
rtn->ctl_table = tbl;
}
tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl);
if (!rtn->rds_tcp_sysctl) {
pr_warn("could not register sysctl\n");
err = -ENOMEM;
goto fail;
}
#if IS_ENABLED(CONFIG_IPV6)
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true);
#else
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
#endif
if (!rtn->rds_tcp_listen_sock) {
pr_warn("could not set up IPv6 listen sock\n");
#if IS_ENABLED(CONFIG_IPV6)
/* Try IPv4 as some systems disable IPv6 */
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
if (!rtn->rds_tcp_listen_sock) {
#endif
unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
rtn->rds_tcp_sysctl = NULL;
err = -EAFNOSUPPORT;
goto fail;
#if IS_ENABLED(CONFIG_IPV6)
}
#endif
}
INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
return 0;
fail:
if (net != &init_net)
kfree(tbl);
return err;
}
| static __net_init int rds_tcp_init_net(struct net *net)
{
struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
struct ctl_table *tbl;
int err = 0;
memset(rtn, 0, sizeof(*rtn));
/* {snd, rcv}buf_size default to 0, which implies we let the
* stack pick the value, and permit auto-tuning of buffer size.
*/
if (net == &init_net) {
tbl = rds_tcp_sysctl_table;
} else {
tbl = kmemdup(rds_tcp_sysctl_table,
sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
if (!tbl) {
pr_warn("could not set allocate syctl table\n");
return -ENOMEM;
}
rtn->ctl_table = tbl;
}
tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl);
if (!rtn->rds_tcp_sysctl) {
pr_warn("could not register sysctl\n");
err = -ENOMEM;
goto fail;
}
#if IS_ENABLED(CONFIG_IPV6)
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, true);
#else
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
#endif
if (!rtn->rds_tcp_listen_sock) {
pr_warn("could not set up IPv6 listen sock\n");
#if IS_ENABLED(CONFIG_IPV6)
/* Try IPv4 as some systems disable IPv6 */
rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net, false);
if (!rtn->rds_tcp_listen_sock) {
#endif
unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
rtn->rds_tcp_sysctl = NULL;
err = -EAFNOSUPPORT;
goto fail;
#if IS_ENABLED(CONFIG_IPV6)
}
#endif
}
INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
return 0;
fail:
if (net != &init_net)
kfree(tbl);
return err;
}
| C | linux | 0 |
CVE-2016-6888 | https://www.cvedetails.com/cve/CVE-2016-6888/ | CWE-190 | https://git.qemu.org/?p=qemu.git;a=commit;h=47882fa4975bf0b58dd74474329fdd7154e8f04c | 47882fa4975bf0b58dd74474329fdd7154e8f04c | null | bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc)
{
bool res;
pkt->is_loopback = true;
res = net_tx_pkt_send(pkt, nc);
pkt->is_loopback = false;
return res;
}
| bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc)
{
bool res;
pkt->is_loopback = true;
res = net_tx_pkt_send(pkt, nc);
pkt->is_loopback = false;
return res;
}
| C | qemu | 0 |
CVE-2011-3091 | https://www.cvedetails.com/cve/CVE-2011-3091/ | CWE-399 | https://github.com/chromium/chromium/commit/cc7cde43832b547cdab856fe1bedc9514ca38e13 | cc7cde43832b547cdab856fe1bedc9514ca38e13 | Add DCHECK to ensure IndexedDBDispatcher doesn't get re-created.
This could happen if there are IDB objects that survive the call to
didStopWorkerRunLoop.
BUG=121734
TEST=
Review URL: http://codereview.chromium.org/9999035
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@131679 0039d316-1c4b-4281-b951-d872f2087c98 | int32 IndexedDBDispatcher::TransactionId(
const WebIDBTransaction& transaction) {
const RendererWebIDBTransactionImpl* impl =
static_cast<const RendererWebIDBTransactionImpl*>(&transaction);
return impl->id();
}
| int32 IndexedDBDispatcher::TransactionId(
const WebIDBTransaction& transaction) {
const RendererWebIDBTransactionImpl* impl =
static_cast<const RendererWebIDBTransactionImpl*>(&transaction);
return impl->id();
}
| C | Chrome | 0 |
CVE-2019-11884 | https://www.cvedetails.com/cve/CVE-2019-11884/ | CWE-77 | https://github.com/torvalds/linux/commit/a1616a5ac99ede5d605047a9012481ce7ff18b16 | a1616a5ac99ede5d605047a9012481ce7ff18b16 | Bluetooth: hidp: fix buffer overflow
Struct ca is copied from userspace. It is not checked whether the "name"
field is NULL terminated, which allows local users to obtain potentially
sensitive information from kernel stack memory, via a HIDPCONNADD command.
This vulnerability is similar to CVE-2011-1079.
Signed-off-by: Young Xiao <YangX92@hotmail.com>
Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
Cc: stable@vger.kernel.org | static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &hidp_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
bt_sock_link(&hidp_sk_list, sk);
return 0;
}
| static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &hidp_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
bt_sock_link(&hidp_sk_list, sk);
return 0;
}
| C | linux | 0 |
CVE-2016-10066 | https://www.cvedetails.com/cve/CVE-2016-10066/ | CWE-119 | https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6 | f6e9d0d9955e85bdd7540b251cd50d598dacc5e6 | null | static MagickBooleanType WritePCDTile(Image *image,const char *page_geometry,
const char *tile_geometry)
{
GeometryInfo
geometry_info;
Image
*downsample_image,
*tile_image;
MagickBooleanType
status;
MagickStatusType
flags;
RectangleInfo
geometry;
register const PixelPacket
*p,
*q;
register ssize_t
i,
x;
ssize_t
y;
/*
Scale image to tile size.
*/
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
if ((geometry.width % 2) != 0)
geometry.width--;
if ((geometry.height % 2) != 0)
geometry.height--;
tile_image=ResizeImage(image,geometry.width,geometry.height,TriangleFilter,
1.0,&image->exception);
if (tile_image == (Image *) NULL)
return(MagickFalse);
flags=ParseGeometry(page_geometry,&geometry_info);
geometry.width=(size_t) geometry_info.rho;
geometry.height=(size_t) geometry_info.sigma;
if ((flags & SigmaValue) == 0)
geometry.height=geometry.width;
if ((tile_image->columns != geometry.width) ||
(tile_image->rows != geometry.height))
{
Image
*bordered_image;
RectangleInfo
border_info;
/*
Put a border around the image.
*/
border_info.width=(geometry.width-tile_image->columns+1) >> 1;
border_info.height=(geometry.height-tile_image->rows+1) >> 1;
bordered_image=BorderImage(tile_image,&border_info,&image->exception);
if (bordered_image == (Image *) NULL)
return(MagickFalse);
tile_image=DestroyImage(tile_image);
tile_image=bordered_image;
}
(void) TransformImage(&tile_image,(char *) NULL,tile_geometry);
(void) TransformImageColorspace(tile_image,YCCColorspace);
downsample_image=ResizeImage(tile_image,tile_image->columns/2,
tile_image->rows/2,TriangleFilter,1.0,&image->exception);
if (downsample_image == (Image *) NULL)
return(MagickFalse);
/*
Write tile to PCD file.
*/
for (y=0; y < (ssize_t) tile_image->rows; y+=2)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,2,
&tile_image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) (tile_image->columns << 1); x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
p++;
}
q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,
1,&downsample_image->exception);
if (q == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) downsample_image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(q)));
q++;
}
q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,
1,&downsample_image->exception);
if (q == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) downsample_image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(q)));
q++;
}
status=SetImageProgress(image,SaveImageTag,y,tile_image->rows);
if (status == MagickFalse)
break;
}
for (i=0; i < 0x800; i++)
(void) WriteBlobByte(image,'\0');
downsample_image=DestroyImage(downsample_image);
tile_image=DestroyImage(tile_image);
return(MagickTrue);
}
| static MagickBooleanType WritePCDTile(Image *image,const char *page_geometry,
const char *tile_geometry)
{
GeometryInfo
geometry_info;
Image
*downsample_image,
*tile_image;
MagickBooleanType
status;
MagickStatusType
flags;
RectangleInfo
geometry;
register const PixelPacket
*p,
*q;
register ssize_t
i,
x;
ssize_t
y;
/*
Scale image to tile size.
*/
SetGeometry(image,&geometry);
(void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
if ((geometry.width % 2) != 0)
geometry.width--;
if ((geometry.height % 2) != 0)
geometry.height--;
tile_image=ResizeImage(image,geometry.width,geometry.height,TriangleFilter,
1.0,&image->exception);
if (tile_image == (Image *) NULL)
return(MagickFalse);
flags=ParseGeometry(page_geometry,&geometry_info);
geometry.width=(size_t) geometry_info.rho;
geometry.height=(size_t) geometry_info.sigma;
if ((flags & SigmaValue) == 0)
geometry.height=geometry.width;
if ((tile_image->columns != geometry.width) ||
(tile_image->rows != geometry.height))
{
Image
*bordered_image;
RectangleInfo
border_info;
/*
Put a border around the image.
*/
border_info.width=(geometry.width-tile_image->columns+1) >> 1;
border_info.height=(geometry.height-tile_image->rows+1) >> 1;
bordered_image=BorderImage(tile_image,&border_info,&image->exception);
if (bordered_image == (Image *) NULL)
return(MagickFalse);
tile_image=DestroyImage(tile_image);
tile_image=bordered_image;
}
(void) TransformImage(&tile_image,(char *) NULL,tile_geometry);
(void) TransformImageColorspace(tile_image,YCCColorspace);
downsample_image=ResizeImage(tile_image,tile_image->columns/2,
tile_image->rows/2,TriangleFilter,1.0,&image->exception);
if (downsample_image == (Image *) NULL)
return(MagickFalse);
/*
Write tile to PCD file.
*/
for (y=0; y < (ssize_t) tile_image->rows; y+=2)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,2,
&tile_image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) (tile_image->columns << 1); x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
p++;
}
q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,
1,&downsample_image->exception);
if (q == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) downsample_image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(q)));
q++;
}
q=GetVirtualPixels(downsample_image,0,y >> 1,downsample_image->columns,
1,&downsample_image->exception);
if (q == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) downsample_image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(q)));
q++;
}
status=SetImageProgress(image,SaveImageTag,y,tile_image->rows);
if (status == MagickFalse)
break;
}
for (i=0; i < 0x800; i++)
(void) WriteBlobByte(image,'\0');
downsample_image=DestroyImage(downsample_image);
tile_image=DestroyImage(tile_image);
return(MagickTrue);
}
| C | ImageMagick | 0 |
null | null | null | https://github.com/chromium/chromium/commit/91b27188b728e90c651c55a985d23ad0c26eb662 | 91b27188b728e90c651c55a985d23ad0c26eb662 | window.atob() returns wrong value when given a string container only white spaces
window.atob() returns wrong value when given a string container only white
spaces. The reason was that base64DecodeInternal() was calling Vector::grow()
before the loop but failed to call Vector::shrink() in case of early return.
R=
BUG=357332
Review URL: https://codereview.chromium.org/215833002
git-svn-id: svn://svn.chromium.org/blink/trunk@170264 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | bool base64Decode(const UChar* data, unsigned length, Vector<char>& out, CharacterMatchFunctionPtr shouldIgnoreCharacter, Base64DecodePolicy policy)
{
return base64DecodeInternal<UChar>(data, length, out, shouldIgnoreCharacter, policy);
}
| bool base64Decode(const UChar* data, unsigned length, Vector<char>& out, CharacterMatchFunctionPtr shouldIgnoreCharacter, Base64DecodePolicy policy)
{
return base64DecodeInternal<UChar>(data, length, out, shouldIgnoreCharacter, policy);
}
| C | Chrome | 0 |
CVE-2013-2927 | https://www.cvedetails.com/cve/CVE-2013-2927/ | CWE-399 | https://github.com/chromium/chromium/commit/4d77eed905ce1d00361282e8822a2a3be61d25c0 | 4d77eed905ce1d00361282e8822a2a3be61d25c0 | Fix a crash in HTMLFormElement::prepareForSubmission.
BUG=297478
TEST=automated with ASAN.
Review URL: https://chromiumcodereview.appspot.com/24910003
git-svn-id: svn://svn.chromium.org/blink/trunk@158428 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | bool HTMLFormElement::checkValidity()
{
Vector<RefPtr<FormAssociatedElement> > controls;
return !checkInvalidControlsAndCollectUnhandled(&controls);
}
| bool HTMLFormElement::checkValidity()
{
Vector<RefPtr<FormAssociatedElement> > controls;
return !checkInvalidControlsAndCollectUnhandled(&controls);
}
| C | Chrome | 0 |
CVE-2016-9084 | https://www.cvedetails.com/cve/CVE-2016-9084/ | CWE-190 | https://github.com/torvalds/linux/commit/05692d7005a364add85c6e25a6c4447ce08f913a | 05692d7005a364add85c6e25a6c4447ce08f913a | vfio/pci: Fix integer overflows, bitmask check
The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize
user-supplied integers, potentially allowing memory corruption. This
patch adds appropriate integer overflow checks, checks the range bounds
for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element
in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set.
VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in
vfio_pci_set_irqs_ioctl().
Furthermore, a kzalloc is changed to a kcalloc because the use of a
kzalloc with an integer multiplication allowed an integer overflow
condition to be reached without this patch. kcalloc checks for overflow
and should prevent a similar occurrence.
Signed-off-by: Vlad Tsyrklevich <vlad@tsyrklevich.net>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com> | static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
struct vfio_device *device;
if (devs->cur_index == devs->max_index)
return -ENOSPC;
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -EINVAL;
if (pci_dev_driver(pdev) != &vfio_pci_driver) {
vfio_device_put(device);
return -EBUSY;
}
devs->devices[devs->cur_index++] = device;
return 0;
}
| static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
{
struct vfio_devices *devs = data;
struct vfio_device *device;
if (devs->cur_index == devs->max_index)
return -ENOSPC;
device = vfio_device_get_from_dev(&pdev->dev);
if (!device)
return -EINVAL;
if (pci_dev_driver(pdev) != &vfio_pci_driver) {
vfio_device_put(device);
return -EBUSY;
}
devs->devices[devs->cur_index++] = device;
return 0;
}
| C | linux | 0 |
CVE-2017-5077 | https://www.cvedetails.com/cve/CVE-2017-5077/ | CWE-125 | https://github.com/chromium/chromium/commit/fec26ff33bf372476a70326f3669a35f34a9d474 | fec26ff33bf372476a70326f3669a35f34a9d474 | Origins should be represented as url::Origin (not as GURL).
As pointed out in //docs/security/origin-vs-url.md, origins should be
represented as url::Origin (not as GURL). This CL applies this
guideline to predictor-related code and changes the type of the
following fields from GURL to url::Origin:
- OriginRequestSummary::origin
- PreconnectedRequestStats::origin
- PreconnectRequest::origin
The old code did not depend on any non-origin parts of GURL
(like path and/or query). Therefore, this CL has no intended
behavior change.
Bug: 973885
Change-Id: Idd14590b4834cb9d50c74ed747b595fe1a4ba357
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1895167
Commit-Queue: Łukasz Anforowicz <lukasza@chromium.org>
Reviewed-by: Alex Ilin <alexilin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#716311} | void VerifyAndClearExpectations() const {
base::RunLoop().RunUntilIdle();
Mock::VerifyAndClearExpectations(mock_network_context_.get());
Mock::VerifyAndClearExpectations(mock_delegate_.get());
}
| void VerifyAndClearExpectations() const {
base::RunLoop().RunUntilIdle();
Mock::VerifyAndClearExpectations(mock_network_context_.get());
Mock::VerifyAndClearExpectations(mock_delegate_.get());
}
| C | Chrome | 0 |
CVE-2011-2517 | https://www.cvedetails.com/cve/CVE-2011-2517/ | CWE-119 | https://github.com/torvalds/linux/commit/208c72f4fe44fe09577e7975ba0e7fa0278f3d03 | 208c72f4fe44fe09577e7975ba0e7fa0278f3d03 | nl80211: fix check for valid SSID size in scan operations
In both trigger_scan and sched_scan operations, we were checking for
the SSID length before assigning the value correctly. Since the
memory was just kzalloc'ed, the check was always failing and SSID with
over 32 characters were allowed to go through.
This was causing a buffer overflow when copying the actual SSID to the
proper place.
This bug has been there since 2.6.29-rc4.
Cc: stable@kernel.org
Signed-off-by: Luciano Coelho <coelho@ti.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com> | static int parse_reg_rule(struct nlattr *tb[],
struct ieee80211_reg_rule *reg_rule)
{
struct ieee80211_freq_range *freq_range = ®_rule->freq_range;
struct ieee80211_power_rule *power_rule = ®_rule->power_rule;
if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_START])
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_END])
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
return -EINVAL;
if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
return -EINVAL;
reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
freq_range->start_freq_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
freq_range->end_freq_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
freq_range->max_bandwidth_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
power_rule->max_eirp =
nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
power_rule->max_antenna_gain =
nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
return 0;
}
| static int parse_reg_rule(struct nlattr *tb[],
struct ieee80211_reg_rule *reg_rule)
{
struct ieee80211_freq_range *freq_range = ®_rule->freq_range;
struct ieee80211_power_rule *power_rule = ®_rule->power_rule;
if (!tb[NL80211_ATTR_REG_RULE_FLAGS])
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_START])
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_END])
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
return -EINVAL;
if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
return -EINVAL;
reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]);
freq_range->start_freq_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
freq_range->end_freq_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
freq_range->max_bandwidth_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
power_rule->max_eirp =
nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN])
power_rule->max_antenna_gain =
nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]);
return 0;
}
| C | linux | 0 |
CVE-2016-5147 | https://www.cvedetails.com/cve/CVE-2016-5147/ | CWE-79 | https://github.com/chromium/chromium/commit/5472db1c7eca35822219d03be5c817d9a9258c11 | 5472db1c7eca35822219d03be5c817d9a9258c11 | Always call UpdateCompositedScrollOffset, not just for the root layer
Bug: 927560
Change-Id: I1d5522aae4f11dd3f5b8947bb089bac1bf19bdb4
Reviewed-on: https://chromium-review.googlesource.com/c/1452701
Reviewed-by: Chris Harrelson <chrishtr@chromium.org>
Commit-Queue: Mason Freed <masonfreed@chromium.org>
Cr-Commit-Position: refs/heads/master@{#628942} | void PaintLayerScrollableArea::UpdateScrollbarProportions() {
if (Scrollbar* horizontal_scrollbar = HorizontalScrollbar())
horizontal_scrollbar->SetProportion(VisibleWidth(), ContentsSize().Width());
if (Scrollbar* vertical_scrollbar = VerticalScrollbar())
vertical_scrollbar->SetProportion(VisibleHeight(), ContentsSize().Height());
}
| void PaintLayerScrollableArea::UpdateScrollbarProportions() {
if (Scrollbar* horizontal_scrollbar = HorizontalScrollbar())
horizontal_scrollbar->SetProportion(VisibleWidth(), ContentsSize().Width());
if (Scrollbar* vertical_scrollbar = VerticalScrollbar())
vertical_scrollbar->SetProportion(VisibleHeight(), ContentsSize().Height());
}
| C | Chrome | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.