CVE ID
stringlengths
13
43
CVE Page
stringlengths
45
48
CWE ID
stringclasses
90 values
codeLink
stringlengths
46
139
commit_id
stringlengths
6
81
commit_message
stringlengths
3
13.3k
func_after
stringlengths
14
241k
func_before
stringlengths
14
241k
lang
stringclasses
3 values
project
stringclasses
309 values
vul
int8
0
1
CVE-2009-3605
https://www.cvedetails.com/cve/CVE-2009-3605/
CWE-189
https://cgit.freedesktop.org/poppler/poppler/commit/?id=7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
7b2d314a61fd0e12f47c62996cb49ec0d1ba747a
null
JBIG2PatternDict::JBIG2PatternDict(Guint segNumA, Guint sizeA): JBIG2Segment(segNumA) { size = sizeA; bitmaps = (JBIG2Bitmap **)gmallocn(size, sizeof(JBIG2Bitmap *)); }
JBIG2PatternDict::JBIG2PatternDict(Guint segNumA, Guint sizeA): JBIG2Segment(segNumA) { size = sizeA; bitmaps = (JBIG2Bitmap **)gmallocn(size, sizeof(JBIG2Bitmap *)); }
CPP
poppler
0
CVE-2018-7191
https://www.cvedetails.com/cve/CVE-2018-7191/
CWE-476
https://github.com/torvalds/linux/commit/0ad646c81b2182f7fa67ec0c8c825e0ee165696d
0ad646c81b2182f7fa67ec0c8c825e0ee165696d
tun: call dev_get_valid_name() before register_netdevice() register_netdevice() could fail early when we have an invalid dev name, in which case ->ndo_uninit() is not called. For tun device, this is a problem because a timer etc. are already initialized and it expects ->ndo_uninit() to clean them up. We could move these initializations into a ->ndo_init() so that register_netdevice() knows better, however this is still complicated due to the logic in tun_detach(). Therefore, I choose to just call dev_get_valid_name() before register_netdevice(), which is quicker and much easier to audit. And for this specific case, it is already enough. Fixes: 96442e42429e ("tuntap: choose the txq based on rxq") Reported-by: Dmitry Alexeev <avekceeb@gmail.com> Cc: Jason Wang <jasowang@redhat.com> Cc: "Michael S. Tsirkin" <mst@redhat.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; struct tun_pcpu_stats *stats; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); total = skb->len + vlan_hlen + vnet_hdr_sz; if (!(tun->flags & IFF_NO_PI)) { if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; total += sizeof(pi); if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; } if (vnet_hdr_sz) { struct virtio_net_hdr gso; if (iov_iter_count(iter) < vnet_hdr_sz) return -EINVAL; if (virtio_net_hdr_from_skb(skb, &gso, tun_is_little_endian(tun), true)) { struct skb_shared_info *sinfo = skb_shinfo(skb); pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), tun16_to_cpu(tun, gso.hdr_len)); print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE, 16, 1, skb->head, min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); WARN_ON_ONCE(1); return -EINVAL; } if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) return -EFAULT; iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } if (vlan_hlen) { int ret; struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; } veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) goto done; ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: /* caller is in process context, */ stats = get_cpu_ptr(tun->pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += skb->len + vlan_hlen; u64_stats_update_end(&stats->syncp); put_cpu_ptr(tun->pcpu_stats); return total; }
static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; struct tun_pcpu_stats *stats; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); total = skb->len + vlan_hlen + vnet_hdr_sz; if (!(tun->flags & IFF_NO_PI)) { if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; total += sizeof(pi); if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; } if (vnet_hdr_sz) { struct virtio_net_hdr gso; if (iov_iter_count(iter) < vnet_hdr_sz) return -EINVAL; if (virtio_net_hdr_from_skb(skb, &gso, tun_is_little_endian(tun), true)) { struct skb_shared_info *sinfo = skb_shinfo(skb); pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), tun16_to_cpu(tun, gso.hdr_len)); print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE, 16, 1, skb->head, min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); WARN_ON_ONCE(1); return -EINVAL; } if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) return -EFAULT; iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } if (vlan_hlen) { int ret; struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; } veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) goto done; ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: /* caller is in process context, */ stats = get_cpu_ptr(tun->pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += skb->len + vlan_hlen; u64_stats_update_end(&stats->syncp); put_cpu_ptr(tun->pcpu_stats); return total; }
C
linux
0
CVE-2016-8645
https://www.cvedetails.com/cve/CVE-2016-8645/
CWE-284
https://github.com/torvalds/linux/commit/ac6e780070e30e4c35bd395acfe9191e6268bdd3
ac6e780070e30e4c35bd395acfe9191e6268bdd3
tcp: take care of truncations done by sk_filter() With syzkaller help, Marco Grassi found a bug in TCP stack, crashing in tcp_collapse() Root cause is that sk_filter() can truncate the incoming skb, but TCP stack was not really expecting this to happen. It probably was expecting a simple DROP or ACCEPT behavior. We first need to make sure no part of TCP header could be removed. Then we need to adjust TCP_SKB_CB(skb)->end_seq Many thanks to syzkaller team and Marco for giving us a reproducer. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Marco Grassi <marco.gra@gmail.com> Reported-by: Vladis Dronov <vdronov@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static void get_openreq6(struct seq_file *seq, const struct request_sock *req, int i) { long ttd = req->rsk_timer.expires - jiffies; const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; if (ttd < 0) ttd = 0; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], inet_rsk(req)->ir_num, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], ntohs(inet_rsk(req)->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->num_timeout, from_kuid_munged(seq_user_ns(seq), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); }
static void get_openreq6(struct seq_file *seq, const struct request_sock *req, int i) { long ttd = req->rsk_timer.expires - jiffies; const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; if (ttd < 0) ttd = 0; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], inet_rsk(req)->ir_num, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], ntohs(inet_rsk(req)->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->num_timeout, from_kuid_munged(seq_user_ns(seq), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); }
C
linux
0
CVE-2018-6053
https://www.cvedetails.com/cve/CVE-2018-6053/
CWE-200
https://github.com/chromium/chromium/commit/6c6888565ff1fde9ef21ef17c27ad4c8304643d2
6c6888565ff1fde9ef21ef17c27ad4c8304643d2
TopSites: Clear thumbnails from the cache when their URLs get removed We already cleared the thumbnails from persistent storage, but they remained in the in-memory cache, so they remained accessible (until the next Chrome restart) even after all browsing data was cleared. Bug: 758169 Change-Id: Id916d22358430a82e6d5043ac04fa463a32f824f Reviewed-on: https://chromium-review.googlesource.com/758640 Commit-Queue: Marc Treib <treib@chromium.org> Reviewed-by: Sylvain Defresne <sdefresne@chromium.org> Cr-Commit-Position: refs/heads/master@{#514861}
void TopSitesImpl::OnTopSitesAvailableFromHistory( const MostVisitedURLList* pages) { DCHECK(pages); SetTopSites(*pages, CALL_LOCATION_FROM_OTHER_PLACES); }
void TopSitesImpl::OnTopSitesAvailableFromHistory( const MostVisitedURLList* pages) { DCHECK(pages); SetTopSites(*pages, CALL_LOCATION_FROM_OTHER_PLACES); }
C
Chrome
0
CVE-2016-9933
https://www.cvedetails.com/cve/CVE-2016-9933/
CWE-119
https://github.com/libgd/libgd/commit/77f619d48259383628c3ec4654b1ad578e9eb40e
77f619d48259383628c3ec4654b1ad578e9eb40e
fix #215 gdImageFillToBorder stack-overflow when invalid color is used
BGD_DECLARE(void) gdImageRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int thick = im->thick; if (x1 == x2 && y1 == y2 && thick == 1) { gdImageSetPixel(im, x1, y1, color); return; } if (y2 < y1) { int t; t = y1; y1 = y2; y2 = t; t = x1; x1 = x2; x2 = t; } if (thick > 1) { int cx, cy, x1ul, y1ul, x2lr, y2lr; int half = thick >> 1; x1ul = x1 - half; y1ul = y1 - half; x2lr = x2 + half; y2lr = y2 + half; cy = y1ul + thick; while (cy-- > y1ul) { cx = x1ul - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } cy = y2lr - thick; while (cy++ < y2lr) { cx = x1ul - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } cy = y1ul + thick - 1; while (cy++ < y2lr -thick) { cx = x1ul - 1; while (cx++ < x1ul + thick) { gdImageSetPixel(im, cx, cy, color); } } cy = y1ul + thick - 1; while (cy++ < y2lr -thick) { cx = x2lr - thick - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } return; } else { gdImageLine(im, x1, y1, x2, y1, color); gdImageLine(im, x1, y2, x2, y2, color); gdImageLine(im, x1, y1 + 1, x1, y2 - 1, color); gdImageLine(im, x2, y1 + 1, x2, y2 - 1, color); } }
BGD_DECLARE(void) gdImageRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color) { int thick = im->thick; if (x1 == x2 && y1 == y2 && thick == 1) { gdImageSetPixel(im, x1, y1, color); return; } if (y2 < y1) { int t; t = y1; y1 = y2; y2 = t; t = x1; x1 = x2; x2 = t; } if (thick > 1) { int cx, cy, x1ul, y1ul, x2lr, y2lr; int half = thick >> 1; x1ul = x1 - half; y1ul = y1 - half; x2lr = x2 + half; y2lr = y2 + half; cy = y1ul + thick; while (cy-- > y1ul) { cx = x1ul - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } cy = y2lr - thick; while (cy++ < y2lr) { cx = x1ul - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } cy = y1ul + thick - 1; while (cy++ < y2lr -thick) { cx = x1ul - 1; while (cx++ < x1ul + thick) { gdImageSetPixel(im, cx, cy, color); } } cy = y1ul + thick - 1; while (cy++ < y2lr -thick) { cx = x2lr - thick - 1; while (cx++ < x2lr) { gdImageSetPixel(im, cx, cy, color); } } return; } else { gdImageLine(im, x1, y1, x2, y1, color); gdImageLine(im, x1, y2, x2, y2, color); gdImageLine(im, x1, y1 + 1, x1, y2 - 1, color); gdImageLine(im, x2, y1 + 1, x2, y2 - 1, color); } }
C
php-src
0
CVE-2013-3301
https://www.cvedetails.com/cve/CVE-2013-3301/
null
https://github.com/torvalds/linux/commit/6a76f8c0ab19f215af2a3442870eeb5f0e81998d
6a76f8c0ab19f215af2a3442870eeb5f0e81998d
tracing: Fix possible NULL pointer dereferences Currently set_ftrace_pid and set_graph_function files use seq_lseek for their fops. However seq_open() is called only for FMODE_READ in the fops->open() so that if an user tries to seek one of those file when she open it for writing, it sees NULL seq_file and then panic. It can be easily reproduced with following command: $ cd /sys/kernel/debug/tracing $ echo 1234 | sudo tee -a set_ftrace_pid In this example, GNU coreutils' tee opens the file with fopen(, "a") and then the fopen() internally calls lseek(). Link: http://lkml.kernel.org/r/1365663302-2170-1-git-send-email-namhyung@kernel.org Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: stable@vger.kernel.org Signed-off-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
static int g_show(struct seq_file *m, void *v) { unsigned long *ptr = v; if (!ptr) return 0; if (ptr == (unsigned long *)1) { seq_printf(m, "#### all functions enabled ####\n"); return 0; } seq_printf(m, "%ps\n", (void *)*ptr); return 0; }
static int g_show(struct seq_file *m, void *v) { unsigned long *ptr = v; if (!ptr) return 0; if (ptr == (unsigned long *)1) { seq_printf(m, "#### all functions enabled ####\n"); return 0; } seq_printf(m, "%ps\n", (void *)*ptr); return 0; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/0fb75f1e468fe9054be3b3d3d5b9bf9a66e4199d
0fb75f1e468fe9054be3b3d3d5b9bf9a66e4199d
StackSamplingProfiler: walk a copy of the stack Changes the stack walking strategy to copy the stack while the target thread is suspended, then walk the copy of the stack after the thread has been resumed. This avoids deadlock on locks taken by RtlLookupFunctionEntry when walking the actual stack while the target thread is suspended. BUG=528129 Review URL: https://codereview.chromium.org/1367633002 Cr-Commit-Position: refs/heads/master@{#353004}
PlatformThreadId id() const { return id_; }
PlatformThreadId id() const { return id_; }
C
Chrome
0
CVE-2011-4594
https://www.cvedetails.com/cve/CVE-2011-4594/
null
https://github.com/torvalds/linux/commit/bc909d9ddbf7778371e36a651d6e4194b1cc7d4c
bc909d9ddbf7778371e36a651d6e4194b1cc7d4c
sendmmsg/sendmsg: fix unsafe user pointer access Dereferencing a user pointer directly from kernel-space without going through the copy_from_user family of functions is a bad idea. Two of such usages can be found in the sendmsg code path called from sendmmsg, added by commit c71d8ebe7a4496fb7231151cb70a6baa0cb56f9a upstream. commit 5b47b8038f183b44d2d8ff1c7d11a5c1be706b34 in the 3.0-stable tree. Usages are performed through memcmp() and memcpy() directly. Fix those by using the already copied msg_sys structure instead of the __user *msg structure. Note that msg_sys can be set to NULL by verify_compat_iovec() or verify_iovec(), which requires additional NULL pointer checks. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: David Goulet <dgoulet@ev0ke.net> CC: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> CC: Anton Blanchard <anton@samba.org> CC: David S. Miller <davem@davemloft.net> CC: stable <stable@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; }
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; }
C
linux
0
CVE-2014-9710
https://www.cvedetails.com/cve/CVE-2014-9710/
CWE-362
https://github.com/torvalds/linux/commit/5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
Btrfs: make xattr replace operations atomic Replacing a xattr consists of doing a lookup for its existing value, delete the current value from the respective leaf, release the search path and then finally insert the new value. This leaves a time window where readers (getxattr, listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs, so this has security implications. This change also fixes 2 other existing issues which were: *) Deleting the old xattr value without verifying first if the new xattr will fit in the existing leaf item (in case multiple xattrs are packed in the same item due to name hash collision); *) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't exist but we have have an existing item that packs muliple xattrs with the same name hash as the input xattr. In this case we should return ENOSPC. A test case for xfstests follows soon. Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace implementation. Reported-by: Alexandre Oliva <oliva@gnu.org> Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
static void root_sub_used(struct btrfs_root *root, u32 size) { spin_lock(&root->accounting_lock); btrfs_set_root_used(&root->root_item, btrfs_root_used(&root->root_item) - size); spin_unlock(&root->accounting_lock); }
static void root_sub_used(struct btrfs_root *root, u32 size) { spin_lock(&root->accounting_lock); btrfs_set_root_used(&root->root_item, btrfs_root_used(&root->root_item) - size); spin_unlock(&root->accounting_lock); }
C
linux
0
CVE-2019-12098
https://www.cvedetails.com/cve/CVE-2019-12098/
CWE-320
https://github.com/heimdal/heimdal/commit/2f7f3d9960aa6ea21358bdf3687cee5149aa35cf
2f7f3d9960aa6ea21358bdf3687cee5149aa35cf
CVE-2019-12098: krb5: always confirm PA-PKINIT-KX for anon PKINIT RFC8062 Section 7 requires verification of the PA-PKINIT-KX key excahnge when anonymous PKINIT is used. Failure to do so can permit an active attacker to become a man-in-the-middle. Introduced by a1ef548600c5bb51cf52a9a9ea12676506ede19f. First tagged release Heimdal 1.4.0. CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N (4.8) Change-Id: I6cc1c0c24985936468af08693839ac6c3edda133 Signed-off-by: Jeffrey Altman <jaltman@auristor.com> Approved-by: Jeffrey Altman <jaltman@auritor.com> (cherry picked from commit 38c797e1ae9b9c8f99ae4aa2e73957679031fd2b)
pa_data_add_pac_request(krb5_context context, krb5_get_init_creds_ctx *ctx, METHOD_DATA *md) { size_t len = 0, length; krb5_error_code ret; PA_PAC_REQUEST req; void *buf; switch (ctx->req_pac) { case KRB5_INIT_CREDS_TRISTATE_UNSET: return 0; /* don't bother */ case KRB5_INIT_CREDS_TRISTATE_TRUE: req.include_pac = 1; break; case KRB5_INIT_CREDS_TRISTATE_FALSE: req.include_pac = 0; } ASN1_MALLOC_ENCODE(PA_PAC_REQUEST, buf, length, &req, &len, ret); if (ret) return ret; if(len != length) krb5_abortx(context, "internal error in ASN.1 encoder"); ret = krb5_padata_add(context, md, KRB5_PADATA_PA_PAC_REQUEST, buf, len); if (ret) free(buf); return 0; }
pa_data_add_pac_request(krb5_context context, krb5_get_init_creds_ctx *ctx, METHOD_DATA *md) { size_t len = 0, length; krb5_error_code ret; PA_PAC_REQUEST req; void *buf; switch (ctx->req_pac) { case KRB5_INIT_CREDS_TRISTATE_UNSET: return 0; /* don't bother */ case KRB5_INIT_CREDS_TRISTATE_TRUE: req.include_pac = 1; break; case KRB5_INIT_CREDS_TRISTATE_FALSE: req.include_pac = 0; } ASN1_MALLOC_ENCODE(PA_PAC_REQUEST, buf, length, &req, &len, ret); if (ret) return ret; if(len != length) krb5_abortx(context, "internal error in ASN.1 encoder"); ret = krb5_padata_add(context, md, KRB5_PADATA_PA_PAC_REQUEST, buf, len); if (ret) free(buf); return 0; }
C
heimdal
0
CVE-2017-10911
https://www.cvedetails.com/cve/CVE-2017-10911/
CWE-200
https://github.com/torvalds/linux/commit/089bc0143f489bd3a4578bdff5f4ca68fb26f341
089bc0143f489bd3a4578bdff5f4ca68fb26f341
xen-blkback: don't leak stack data via response ring Rather than constructing a local structure instance on the stack, fill the fields directly on the shared ring, just like other backends do. Build on the fact that all response structure flavors are actually identical (the old code did make this assumption too). This is XSA-216. Cc: stable@vger.kernel.org Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
static int xen_blkbk_map(struct xen_blkif_ring *ring, struct grant_page *pages[], int num, bool ro) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt = NULL; phys_addr_t addr = 0; int i, seg_idx, new_map_idx; int segs_to_map = 0; int ret = 0; int last_map = 0, map_until = 0; int use_persistent_gnts; struct xen_blkif *blkif = ring->blkif; use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); /* * Fill out preq.nr_sects with proper amount of sectors, and setup * assign map[..] with the PFN of the page in our domain with the * corresponding grant reference for each page. */ again: for (i = map_until; i < num; i++) { uint32_t flags; if (use_persistent_gnts) { persistent_gnt = get_persistent_gnt( ring, pages[i]->gref); } if (persistent_gnt) { /* * We are using persistent grants and * the grant is already mapped */ pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { if (get_free_page(ring, &pages[i]->page)) goto out_of_memory; addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; flags = GNTMAP_host_map; if (!use_persistent_gnts && ro) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[segs_to_map++], addr, flags, pages[i]->gref, blkif->domid); } map_until = i + 1; if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) break; } if (segs_to_map) { ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); BUG_ON(ret); } /* * Now swizzle the MFN in our domain with the MFN from the other domain * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { if (!pages[seg_idx]->persistent_gnt) { /* This is a newly mapped grant */ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { pr_debug("invalid buffer -- could not remap it\n"); put_free_pages(ring, &pages[seg_idx]->page, 1); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; ret |= 1; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; } else { continue; } if (use_persistent_gnts && ring->persistent_gnt_c < xen_blkif_max_pgrants) { /* * We are using persistent grants, the grant is * not mapped but we might have room for it. */ persistent_gnt = kmalloc(sizeof(struct persistent_gnt), GFP_KERNEL); if (!persistent_gnt) { /* * If we don't have enough memory to * allocate the persistent_gnt struct * map this grant non-persistenly */ goto next; } persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->page = pages[seg_idx]->page; if (add_persistent_gnt(ring, persistent_gnt)) { kfree(persistent_gnt); persistent_gnt = NULL; goto next; } pages[seg_idx]->persistent_gnt = persistent_gnt; pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, ring->persistent_gnt_c, xen_blkif_max_pgrants); goto next; } if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { blkif->vbd.overflow_max_grants = 1; pr_debug("domain %u, device %#x is using maximum number of persistent grants\n", blkif->domid, blkif->vbd.handle); } /* * We could not map this grant persistently, so use it as * a non-persistent grant. */ next: new_map_idx++; } segs_to_map = 0; last_map = map_until; if (map_until != num) goto again; return ret; out_of_memory: pr_alert("%s: out of memory\n", __func__); put_free_pages(ring, pages_to_gnt, segs_to_map); return -ENOMEM; }
static int xen_blkbk_map(struct xen_blkif_ring *ring, struct grant_page *pages[], int num, bool ro) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt = NULL; phys_addr_t addr = 0; int i, seg_idx, new_map_idx; int segs_to_map = 0; int ret = 0; int last_map = 0, map_until = 0; int use_persistent_gnts; struct xen_blkif *blkif = ring->blkif; use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); /* * Fill out preq.nr_sects with proper amount of sectors, and setup * assign map[..] with the PFN of the page in our domain with the * corresponding grant reference for each page. */ again: for (i = map_until; i < num; i++) { uint32_t flags; if (use_persistent_gnts) { persistent_gnt = get_persistent_gnt( ring, pages[i]->gref); } if (persistent_gnt) { /* * We are using persistent grants and * the grant is already mapped */ pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { if (get_free_page(ring, &pages[i]->page)) goto out_of_memory; addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; flags = GNTMAP_host_map; if (!use_persistent_gnts && ro) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[segs_to_map++], addr, flags, pages[i]->gref, blkif->domid); } map_until = i + 1; if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) break; } if (segs_to_map) { ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); BUG_ON(ret); } /* * Now swizzle the MFN in our domain with the MFN from the other domain * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { if (!pages[seg_idx]->persistent_gnt) { /* This is a newly mapped grant */ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { pr_debug("invalid buffer -- could not remap it\n"); put_free_pages(ring, &pages[seg_idx]->page, 1); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; ret |= 1; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; } else { continue; } if (use_persistent_gnts && ring->persistent_gnt_c < xen_blkif_max_pgrants) { /* * We are using persistent grants, the grant is * not mapped but we might have room for it. */ persistent_gnt = kmalloc(sizeof(struct persistent_gnt), GFP_KERNEL); if (!persistent_gnt) { /* * If we don't have enough memory to * allocate the persistent_gnt struct * map this grant non-persistenly */ goto next; } persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->page = pages[seg_idx]->page; if (add_persistent_gnt(ring, persistent_gnt)) { kfree(persistent_gnt); persistent_gnt = NULL; goto next; } pages[seg_idx]->persistent_gnt = persistent_gnt; pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, ring->persistent_gnt_c, xen_blkif_max_pgrants); goto next; } if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { blkif->vbd.overflow_max_grants = 1; pr_debug("domain %u, device %#x is using maximum number of persistent grants\n", blkif->domid, blkif->vbd.handle); } /* * We could not map this grant persistently, so use it as * a non-persistent grant. */ next: new_map_idx++; } segs_to_map = 0; last_map = map_until; if (map_until != num) goto again; return ret; out_of_memory: pr_alert("%s: out of memory\n", __func__); put_free_pages(ring, pages_to_gnt, segs_to_map); return -ENOMEM; }
C
linux
0
CVE-2016-1658
https://www.cvedetails.com/cve/CVE-2016-1658/
CWE-284
https://github.com/chromium/chromium/commit/5c437bcc7a51edbef45242c5173cf7871fde2866
5c437bcc7a51edbef45242c5173cf7871fde2866
Make extensions use a correct same-origin check. GURL::GetOrigin does not do the right thing for all types of URLs. BUG=573317 Review URL: https://codereview.chromium.org/1658913002 Cr-Commit-Position: refs/heads/master@{#373381}
void ExtensionViewGuest::DidCommitProvisionalLoadForFrame( content::RenderFrameHost* render_frame_host, const GURL& url, ui::PageTransition transition_type) { if (render_frame_host->GetParent()) return; url_ = url; scoped_ptr<base::DictionaryValue> args(new base::DictionaryValue()); args->SetString(guest_view::kUrl, url_.spec()); DispatchEventToView(make_scoped_ptr( new GuestViewEvent(extensionview::kEventLoadCommit, std::move(args)))); }
void ExtensionViewGuest::DidCommitProvisionalLoadForFrame( content::RenderFrameHost* render_frame_host, const GURL& url, ui::PageTransition transition_type) { if (render_frame_host->GetParent()) return; url_ = url; scoped_ptr<base::DictionaryValue> args(new base::DictionaryValue()); args->SetString(guest_view::kUrl, url_.spec()); DispatchEventToView(make_scoped_ptr( new GuestViewEvent(extensionview::kEventLoadCommit, std::move(args)))); }
C
Chrome
0
CVE-2016-5769
https://www.cvedetails.com/cve/CVE-2016-5769/
CWE-190
https://github.com/php/php-src/commit/6c5211a0cef0cc2854eaa387e0eb036e012904d0?w=1
6c5211a0cef0cc2854eaa387e0eb036e012904d0?w=1
Fix bug #72455: Heap Overflow due to integer overflows
PHP_FUNCTION(mcrypt_generic) { zval *mcryptind; char *data; int data_len; php_mcrypt *pm; unsigned char* data_s; int block_size, data_size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs", &mcryptind, &data, &data_len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(pm, php_mcrypt *, &mcryptind, -1, "MCrypt", le_mcrypt); PHP_MCRYPT_INIT_CHECK if (data_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "An empty string was passed"); RETURN_FALSE } /* Check blocksize */ if (mcrypt_enc_is_block_mode(pm->td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(pm->td); data_size = (((data_len - 1) / block_size) + 1) * block_size; if (data_size <= 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "Integer overflow in data size"); RETURN_FALSE; } data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } else { /* It's not a block algorithm */ data_size = data_len; data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } mcrypt_generic(pm->td, data_s, data_size); data_s[data_size] = '\0'; RETVAL_STRINGL(data_s, data_size, 1); efree(data_s); }
PHP_FUNCTION(mcrypt_generic) { zval *mcryptind; char *data; int data_len; php_mcrypt *pm; unsigned char* data_s; int block_size, data_size; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rs", &mcryptind, &data, &data_len) == FAILURE) { return; } ZEND_FETCH_RESOURCE(pm, php_mcrypt *, &mcryptind, -1, "MCrypt", le_mcrypt); PHP_MCRYPT_INIT_CHECK if (data_len == 0) { php_error_docref(NULL TSRMLS_CC, E_WARNING, "An empty string was passed"); RETURN_FALSE } /* Check blocksize */ if (mcrypt_enc_is_block_mode(pm->td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(pm->td); data_size = (((data_len - 1) / block_size) + 1) * block_size; data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } else { /* It's not a block algorithm */ data_size = data_len; data_s = emalloc(data_size + 1); memset(data_s, 0, data_size); memcpy(data_s, data, data_len); } mcrypt_generic(pm->td, data_s, data_size); data_s[data_size] = '\0'; RETVAL_STRINGL(data_s, data_size, 1); efree(data_s); }
C
php-src
1
CVE-2018-1065
https://www.cvedetails.com/cve/CVE-2018-1065/
CWE-476
https://github.com/torvalds/linux/commit/57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
57ebd808a97d7c5b1e1afb937c2db22beba3c1f8
netfilter: add back stackpointer size checks The rationale for removing the check is only correct for rulesets generated by ip(6)tables. In iptables, a jump can only occur to a user-defined chain, i.e. because we size the stack based on number of user-defined chains we cannot exceed stack size. However, the underlying binary format has no such restriction, and the validation step only ensures that the jump target is a valid rule start point. IOW, its possible to build a rule blob that has no user-defined chains but does contain a jump. If this happens, no jump stack gets allocated and crash occurs because no jumpstack was allocated. Fixes: 7814b6ec6d0d6 ("netfilter: xtables: don't save/restore jumpstack offset") Reported-by: syzbot+e783f671527912cd9403@syzkaller.appspotmail.com Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) return -EINVAL; if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET); #endif t = xt_request_find_table_lock(net, AF_INET, name); if (!IS_ERR(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = PTR_ERR(t); #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET); #endif return ret; }
static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ipt_getinfo)) return -EINVAL; if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET); #endif t = xt_request_find_table_lock(net, AF_INET, name); if (!IS_ERR(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = PTR_ERR(t); #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET); #endif return ret; }
C
linux
0
CVE-2015-5330
https://www.cvedetails.com/cve/CVE-2015-5330/
CWE-200
https://git.samba.org/?p=samba.git;a=commit;h=538d305de91e34a2938f5f219f18bf0e1918763f
538d305de91e34a2938f5f219f18bf0e1918763f
null
struct smb_iconv_handle *get_iconv_testing_handle(TALLOC_CTX *mem_ctx, const char *dos_charset, const char *unix_charset, bool use_builtin_handlers) { return smb_iconv_handle_reinit(mem_ctx, dos_charset, unix_charset, use_builtin_handlers, NULL); }
struct smb_iconv_handle *get_iconv_testing_handle(TALLOC_CTX *mem_ctx, const char *dos_charset, const char *unix_charset, bool use_builtin_handlers) { return smb_iconv_handle_reinit(mem_ctx, dos_charset, unix_charset, use_builtin_handlers, NULL); }
C
samba
0
CVE-2013-2141
https://www.cvedetails.com/cve/CVE-2013-2141/
CWE-399
https://github.com/torvalds/linux/commit/b9e146d8eb3b9ecae5086d373b50fa0c1f3e7f0f
b9e146d8eb3b9ecae5086d373b50fa0c1f3e7f0f
kernel/signal.c: stop info leak via the tkill and the tgkill syscalls This fixes a kernel memory contents leak via the tkill and tgkill syscalls for compat processes. This is visible in the siginfo_t->_sifields._rt.si_sigval.sival_ptr field when handling signals delivered from tkill. The place of the infoleak: int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) { ... put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr); ... } Signed-off-by: Emese Revfy <re.emese@gmail.com> Reviewed-by: PaX Team <pageexec@freemail.hu> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Oleg Nesterov <oleg@redhat.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Serge Hallyn <serge.hallyn@canonical.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) { return do_sigaltstack(uss, uoss, current_user_stack_pointer()); }
SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) { return do_sigaltstack(uss, uoss, current_user_stack_pointer()); }
C
linux
0
CVE-2014-7842
https://www.cvedetails.com/cve/CVE-2014-7842/
CWE-362
https://github.com/torvalds/linux/commit/a2b9e6c1a35afcc0973acb72e591c714e78885ff
a2b9e6c1a35afcc0973acb72e591c714e78885ff
KVM: x86: Don't report guest userspace emulation error to userspace Commit fc3a9157d314 ("KVM: X86: Don't report L2 emulation failures to user-space") disabled the reporting of L2 (nested guest) emulation failures to userspace due to race-condition between a vmexit and the instruction emulator. The same rational applies also to userspace applications that are permitted by the guest OS to access MMIO area or perform PIO. This patch extends the current behavior - of injecting a #UD instead of reporting it to userspace - also for guest userspace code. Signed-off-by: Nadav Amit <namit@cs.technion.ac.il> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) *val = vcpu->arch.dr6; else *val = kvm_x86_ops->get_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; }
static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) *val = vcpu->arch.dr6; else *val = kvm_x86_ops->get_dr6(vcpu); break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; }
C
linux
0
CVE-2017-16932
https://www.cvedetails.com/cve/CVE-2017-16932/
CWE-835
https://github.com/GNOME/libxml2/commit/899a5d9f0ed13b8e32449a08a361e0de127dd961
899a5d9f0ed13b8e32449a08a361e0de127dd961
Detect infinite recursion in parameter entities When expanding a parameter entity in a DTD, infinite recursion could lead to an infinite loop or memory exhaustion. Thanks to Wei Lei for the first of many reports. Fixes bug 759579.
xmlParseMarkupDecl(xmlParserCtxtPtr ctxt) { GROW; if (CUR == '<') { if (NXT(1) == '!') { switch (NXT(2)) { case 'E': if (NXT(3) == 'L') xmlParseElementDecl(ctxt); else if (NXT(3) == 'N') xmlParseEntityDecl(ctxt); break; case 'A': xmlParseAttributeListDecl(ctxt); break; case 'N': xmlParseNotationDecl(ctxt); break; case '-': xmlParseComment(ctxt); break; default: /* there is an error but it will be detected later */ break; } } else if (NXT(1) == '?') { xmlParsePI(ctxt); } } /* * detect requirement to exit there and act accordingly * and avoid having instate overriden later on */ if (ctxt->instate == XML_PARSER_EOF) return; /* * Conditional sections are allowed from entities included * by PE References in the internal subset. */ if ((ctxt->external == 0) && (ctxt->inputNr > 1)) { if ((RAW == '<') && (NXT(1) == '!') && (NXT(2) == '[')) { xmlParseConditionalSections(ctxt); } } ctxt->instate = XML_PARSER_DTD; }
xmlParseMarkupDecl(xmlParserCtxtPtr ctxt) { GROW; if (CUR == '<') { if (NXT(1) == '!') { switch (NXT(2)) { case 'E': if (NXT(3) == 'L') xmlParseElementDecl(ctxt); else if (NXT(3) == 'N') xmlParseEntityDecl(ctxt); break; case 'A': xmlParseAttributeListDecl(ctxt); break; case 'N': xmlParseNotationDecl(ctxt); break; case '-': xmlParseComment(ctxt); break; default: /* there is an error but it will be detected later */ break; } } else if (NXT(1) == '?') { xmlParsePI(ctxt); } } /* * detect requirement to exit there and act accordingly * and avoid having instate overriden later on */ if (ctxt->instate == XML_PARSER_EOF) return; /* * Conditional sections are allowed from entities included * by PE References in the internal subset. */ if ((ctxt->external == 0) && (ctxt->inputNr > 1)) { if ((RAW == '<') && (NXT(1) == '!') && (NXT(2) == '[')) { xmlParseConditionalSections(ctxt); } } ctxt->instate = XML_PARSER_DTD; }
C
libxml2
0
CVE-2016-2476
https://www.cvedetails.com/cve/CVE-2016-2476/
CWE-119
https://android.googlesource.com/platform/frameworks/av/+/94d9e646454f6246bf823b6897bd6aea5f08eda3
94d9e646454f6246bf823b6897bd6aea5f08eda3
Fix initialization of AAC presentation struct Otherwise the new size checks trip on this. Bug: 27207275 Change-Id: I1f8f01097e3a88ff041b69279a6121be842f1766
status_t ACodec::setupVideoDecoder( const char *mime, const sp<AMessage> &msg, bool haveNativeWindow) { int32_t width, height; if (!msg->findInt32("width", &width) || !msg->findInt32("height", &height)) { return INVALID_OPERATION; } OMX_VIDEO_CODINGTYPE compressionFormat; status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat); if (err != OK) { return err; } err = setVideoPortFormatType( kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused); if (err != OK) { return err; } int32_t tmp; if (msg->findInt32("color-format", &tmp)) { OMX_COLOR_FORMATTYPE colorFormat = static_cast<OMX_COLOR_FORMATTYPE>(tmp); err = setVideoPortFormatType( kPortIndexOutput, OMX_VIDEO_CodingUnused, colorFormat, haveNativeWindow); if (err != OK) { ALOGW("[%s] does not support color format %d", mComponentName.c_str(), colorFormat); err = setSupportedOutputFormat(!haveNativeWindow /* getLegacyFlexibleFormat */); } } else { err = setSupportedOutputFormat(!haveNativeWindow /* getLegacyFlexibleFormat */); } if (err != OK) { return err; } int32_t frameRateInt; float frameRateFloat; if (!msg->findFloat("frame-rate", &frameRateFloat)) { if (!msg->findInt32("frame-rate", &frameRateInt)) { frameRateInt = -1; } frameRateFloat = (float)frameRateInt; } err = setVideoFormatOnPort( kPortIndexInput, width, height, compressionFormat, frameRateFloat); if (err != OK) { return err; } err = setVideoFormatOnPort( kPortIndexOutput, width, height, OMX_VIDEO_CodingUnused); if (err != OK) { return err; } return OK; }
status_t ACodec::setupVideoDecoder( const char *mime, const sp<AMessage> &msg, bool haveNativeWindow) { int32_t width, height; if (!msg->findInt32("width", &width) || !msg->findInt32("height", &height)) { return INVALID_OPERATION; } OMX_VIDEO_CODINGTYPE compressionFormat; status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat); if (err != OK) { return err; } err = setVideoPortFormatType( kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused); if (err != OK) { return err; } int32_t tmp; if (msg->findInt32("color-format", &tmp)) { OMX_COLOR_FORMATTYPE colorFormat = static_cast<OMX_COLOR_FORMATTYPE>(tmp); err = setVideoPortFormatType( kPortIndexOutput, OMX_VIDEO_CodingUnused, colorFormat, haveNativeWindow); if (err != OK) { ALOGW("[%s] does not support color format %d", mComponentName.c_str(), colorFormat); err = setSupportedOutputFormat(!haveNativeWindow /* getLegacyFlexibleFormat */); } } else { err = setSupportedOutputFormat(!haveNativeWindow /* getLegacyFlexibleFormat */); } if (err != OK) { return err; } int32_t frameRateInt; float frameRateFloat; if (!msg->findFloat("frame-rate", &frameRateFloat)) { if (!msg->findInt32("frame-rate", &frameRateInt)) { frameRateInt = -1; } frameRateFloat = (float)frameRateInt; } err = setVideoFormatOnPort( kPortIndexInput, width, height, compressionFormat, frameRateFloat); if (err != OK) { return err; } err = setVideoFormatOnPort( kPortIndexOutput, width, height, OMX_VIDEO_CodingUnused); if (err != OK) { return err; } return OK; }
C
Android
0
CVE-2017-15423
https://www.cvedetails.com/cve/CVE-2017-15423/
CWE-310
https://github.com/chromium/chromium/commit/a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
Roll src/third_party/boringssl/src 664e99a64..696c13bd6 https://boringssl.googlesource.com/boringssl/+log/664e99a6486c293728097c661332f92bf2d847c6..696c13bd6ab78011adfe7b775519c8b7cc82b604 BUG=778101 Change-Id: I8dda4f3db952597148e3c7937319584698d00e1c Reviewed-on: https://chromium-review.googlesource.com/747941 Reviewed-by: Avi Drissman <avi@chromium.org> Reviewed-by: David Benjamin <davidben@chromium.org> Commit-Queue: Steven Valdez <svaldez@chromium.org> Cr-Commit-Position: refs/heads/master@{#513774}
int BrowserMainLoop::CreateThreads() { TRACE_EVENT0("startup,rail", "BrowserMainLoop::CreateThreads"); { auto task_scheduler_init_params = GetContentClient()->browser()->GetTaskSchedulerInitParams(); if (!task_scheduler_init_params) task_scheduler_init_params = GetDefaultTaskSchedulerInitParams(); DCHECK(task_scheduler_init_params); if (base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kSingleProcess)) { const base::SchedulerWorkerPoolParams& current_foreground_worker_pool_params( task_scheduler_init_params->foreground_worker_pool_params); task_scheduler_init_params->foreground_worker_pool_params = base::SchedulerWorkerPoolParams( std::max(GetMinThreadsInRendererTaskSchedulerForegroundPool(), current_foreground_worker_pool_params.max_threads()), current_foreground_worker_pool_params.suggested_reclaim_time(), current_foreground_worker_pool_params.backward_compatibility()); } base::TaskScheduler::GetInstance()->Start( *task_scheduler_init_params.get()); } base::SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess(); base::Thread::Options io_message_loop_options; io_message_loop_options.message_loop_type = base::MessageLoop::TYPE_IO; for (size_t thread_id = BrowserThread::UI + 1; thread_id < BrowserThread::ID_COUNT; ++thread_id) { std::unique_ptr<BrowserProcessSubThread>* thread_to_start = nullptr; base::Thread::Options options; base::MessageLoop* message_loop = nullptr; base::TaskTraits non_ui_non_io_task_runner_traits; constexpr base::TaskTraits kUserVisibleTraits = { base::MayBlock(), base::WithBaseSyncPrimitives(), base::TaskPriority::USER_VISIBLE, base::TaskShutdownBehavior::BLOCK_SHUTDOWN}; constexpr base::TaskTraits kUserBlockingTraits = { base::MayBlock(), base::WithBaseSyncPrimitives(), base::TaskPriority::USER_BLOCKING, base::TaskShutdownBehavior::BLOCK_SHUTDOWN}; switch (thread_id) { case BrowserThread::DB: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::DB"); non_ui_non_io_task_runner_traits = kUserVisibleTraits; break; case BrowserThread::FILE_USER_BLOCKING: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::FILE_USER_BLOCKING"); non_ui_non_io_task_runner_traits = kUserBlockingTraits; break; case BrowserThread::FILE: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::FILE"); non_ui_non_io_task_runner_traits = kUserVisibleTraits; break; case BrowserThread::PROCESS_LAUNCHER: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::PROCESS_LAUNCHER"); #if defined(OS_ANDROID) message_loop = android::LauncherThread::GetMessageLoop(); DCHECK(message_loop); thread_to_start = &process_launcher_thread_; #else // defined(OS_ANDROID) non_ui_non_io_task_runner_traits = kUserBlockingTraits; #endif // defined(OS_ANDROID) break; case BrowserThread::CACHE: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::CACHE"); #if defined(OS_WIN) thread_to_start = &cache_thread_; options = io_message_loop_options; options.timer_slack = base::TIMER_SLACK_MAXIMUM; #else // OS_WIN non_ui_non_io_task_runner_traits = kUserBlockingTraits; #endif // OS_WIN break; case BrowserThread::IO: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::IO"); thread_to_start = &io_thread_; options = io_message_loop_options; #if defined(OS_ANDROID) || defined(OS_CHROMEOS) options.priority = base::ThreadPriority::DISPLAY; #endif break; case BrowserThread::UI: // Falls through. case BrowserThread::ID_COUNT: // Falls through. NOTREACHED(); break; } BrowserThread::ID id = static_cast<BrowserThread::ID>(thread_id); if (thread_to_start) { (*thread_to_start) .reset(message_loop ? new BrowserProcessSubThread(id, message_loop) : new BrowserProcessSubThread(id)); if (!message_loop && !(*thread_to_start)->StartWithOptions(options)) LOG(FATAL) << "Failed to start the browser thread: id == " << id; } else { scoped_refptr<base::SingleThreadTaskRunner> redirection_task_runner; #if defined(OS_WIN) redirection_task_runner = (thread_id == BrowserThread::FILE) ? base::CreateCOMSTATaskRunnerWithTraits( non_ui_non_io_task_runner_traits, base::SingleThreadTaskRunnerThreadMode::DEDICATED) : base::CreateSingleThreadTaskRunnerWithTraits( non_ui_non_io_task_runner_traits, base::SingleThreadTaskRunnerThreadMode::DEDICATED); #else // defined(OS_WIN) redirection_task_runner = base::CreateSingleThreadTaskRunnerWithTraits( non_ui_non_io_task_runner_traits, base::SingleThreadTaskRunnerThreadMode::DEDICATED); #endif // defined(OS_WIN) DCHECK(redirection_task_runner); BrowserThreadImpl::RedirectThreadIDToTaskRunner( id, std::move(redirection_task_runner)); } TRACE_EVENT_END0("startup", "BrowserMainLoop::CreateThreads:start"); } created_threads_ = true; return result_code_; }
int BrowserMainLoop::CreateThreads() { TRACE_EVENT0("startup,rail", "BrowserMainLoop::CreateThreads"); { auto task_scheduler_init_params = GetContentClient()->browser()->GetTaskSchedulerInitParams(); if (!task_scheduler_init_params) task_scheduler_init_params = GetDefaultTaskSchedulerInitParams(); DCHECK(task_scheduler_init_params); if (base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kSingleProcess)) { const base::SchedulerWorkerPoolParams& current_foreground_worker_pool_params( task_scheduler_init_params->foreground_worker_pool_params); task_scheduler_init_params->foreground_worker_pool_params = base::SchedulerWorkerPoolParams( std::max(GetMinThreadsInRendererTaskSchedulerForegroundPool(), current_foreground_worker_pool_params.max_threads()), current_foreground_worker_pool_params.suggested_reclaim_time(), current_foreground_worker_pool_params.backward_compatibility()); } base::TaskScheduler::GetInstance()->Start( *task_scheduler_init_params.get()); } base::SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess(); base::Thread::Options io_message_loop_options; io_message_loop_options.message_loop_type = base::MessageLoop::TYPE_IO; for (size_t thread_id = BrowserThread::UI + 1; thread_id < BrowserThread::ID_COUNT; ++thread_id) { std::unique_ptr<BrowserProcessSubThread>* thread_to_start = nullptr; base::Thread::Options options; base::MessageLoop* message_loop = nullptr; base::TaskTraits non_ui_non_io_task_runner_traits; constexpr base::TaskTraits kUserVisibleTraits = { base::MayBlock(), base::WithBaseSyncPrimitives(), base::TaskPriority::USER_VISIBLE, base::TaskShutdownBehavior::BLOCK_SHUTDOWN}; constexpr base::TaskTraits kUserBlockingTraits = { base::MayBlock(), base::WithBaseSyncPrimitives(), base::TaskPriority::USER_BLOCKING, base::TaskShutdownBehavior::BLOCK_SHUTDOWN}; switch (thread_id) { case BrowserThread::DB: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::DB"); non_ui_non_io_task_runner_traits = kUserVisibleTraits; break; case BrowserThread::FILE_USER_BLOCKING: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::FILE_USER_BLOCKING"); non_ui_non_io_task_runner_traits = kUserBlockingTraits; break; case BrowserThread::FILE: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::FILE"); non_ui_non_io_task_runner_traits = kUserVisibleTraits; break; case BrowserThread::PROCESS_LAUNCHER: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::PROCESS_LAUNCHER"); #if defined(OS_ANDROID) message_loop = android::LauncherThread::GetMessageLoop(); DCHECK(message_loop); thread_to_start = &process_launcher_thread_; #else // defined(OS_ANDROID) non_ui_non_io_task_runner_traits = kUserBlockingTraits; #endif // defined(OS_ANDROID) break; case BrowserThread::CACHE: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::CACHE"); #if defined(OS_WIN) thread_to_start = &cache_thread_; options = io_message_loop_options; options.timer_slack = base::TIMER_SLACK_MAXIMUM; #else // OS_WIN non_ui_non_io_task_runner_traits = kUserBlockingTraits; #endif // OS_WIN break; case BrowserThread::IO: TRACE_EVENT_BEGIN1("startup", "BrowserMainLoop::CreateThreads:start", "Thread", "BrowserThread::IO"); thread_to_start = &io_thread_; options = io_message_loop_options; #if defined(OS_ANDROID) || defined(OS_CHROMEOS) options.priority = base::ThreadPriority::DISPLAY; #endif break; case BrowserThread::UI: // Falls through. case BrowserThread::ID_COUNT: // Falls through. NOTREACHED(); break; } BrowserThread::ID id = static_cast<BrowserThread::ID>(thread_id); if (thread_to_start) { (*thread_to_start) .reset(message_loop ? new BrowserProcessSubThread(id, message_loop) : new BrowserProcessSubThread(id)); if (!message_loop && !(*thread_to_start)->StartWithOptions(options)) LOG(FATAL) << "Failed to start the browser thread: id == " << id; } else { scoped_refptr<base::SingleThreadTaskRunner> redirection_task_runner; #if defined(OS_WIN) redirection_task_runner = (thread_id == BrowserThread::FILE) ? base::CreateCOMSTATaskRunnerWithTraits( non_ui_non_io_task_runner_traits, base::SingleThreadTaskRunnerThreadMode::DEDICATED) : base::CreateSingleThreadTaskRunnerWithTraits( non_ui_non_io_task_runner_traits, base::SingleThreadTaskRunnerThreadMode::DEDICATED); #else // defined(OS_WIN) redirection_task_runner = base::CreateSingleThreadTaskRunnerWithTraits( non_ui_non_io_task_runner_traits, base::SingleThreadTaskRunnerThreadMode::DEDICATED); #endif // defined(OS_WIN) DCHECK(redirection_task_runner); BrowserThreadImpl::RedirectThreadIDToTaskRunner( id, std::move(redirection_task_runner)); } TRACE_EVENT_END0("startup", "BrowserMainLoop::CreateThreads:start"); } created_threads_ = true; return result_code_; }
C
Chrome
0
CVE-2016-6787
https://www.cvedetails.com/cve/CVE-2016-6787/
CWE-264
https://github.com/torvalds/linux/commit/f63a8daa5812afef4f06c962351687e1ff9ccb2b
f63a8daa5812afef4f06c962351687e1ff9ccb2b
perf: Fix event->ctx locking There have been a few reported issues wrt. the lack of locking around changing event->ctx. This patch tries to address those. It avoids the whole rwsem thing; and while it appears to work, please give it some thought in review. What I did fail at is sensible runtime checks on the use of event->ctx, the RCU use makes it very hard. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20150123125834.209535886@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
static void perf_mmap_open(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; atomic_inc(&event->mmap_count); atomic_inc(&event->rb->mmap_count); }
static void perf_mmap_open(struct vm_area_struct *vma) { struct perf_event *event = vma->vm_file->private_data; atomic_inc(&event->mmap_count); atomic_inc(&event->rb->mmap_count); }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/ec14f31eca3a51f665432973552ee575635132b3
ec14f31eca3a51f665432973552ee575635132b3
[EFL] Change the behavior of ewk_view_scale_set. https://bugs.webkit.org/show_bug.cgi?id=70078 Reviewed by Eric Seidel. Remove center point basis zoom alignment from ewk_view_scale_set to call Page::setPageScaleFactor without any adjustment. * ewk/ewk_view.cpp: (ewk_view_scale_set): * ewk/ewk_view.h: git-svn-id: svn://svn.chromium.org/blink/trunk@103288 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void ewk_view_frame_main_icon_received(Evas_Object* ewkView) { DBG("ewkView=%p", ewkView); Evas_Object* frame = ewk_view_frame_main_get(ewkView); evas_object_smart_callback_call(frame, "icon,received", 0); }
void ewk_view_frame_main_icon_received(Evas_Object* ewkView) { DBG("ewkView=%p", ewkView); Evas_Object* frame = ewk_view_frame_main_get(ewkView); evas_object_smart_callback_call(frame, "icon,received", 0); }
C
Chrome
0
CVE-2016-7466
https://www.cvedetails.com/cve/CVE-2016-7466/
CWE-399
https://git.qemu.org/?p=qemu.git;a=commit;h=b53dd4495ced2432a0b652ea895e651d07336f7e
b53dd4495ced2432a0b652ea895e651d07336f7e
null
static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb, dma_addr_t *addr) { PCIDevice *pci_dev = PCI_DEVICE(xhci); while (1) { TRBType type; pci_dma_read(pci_dev, ring->dequeue, trb, TRB_SIZE); trb->addr = ring->dequeue; trb->ccs = ring->ccs; le64_to_cpus(&trb->parameter); le32_to_cpus(&trb->status); le32_to_cpus(&trb->control); trace_usb_xhci_fetch_trb(ring->dequeue, trb_name(trb), trb->parameter, trb->status, trb->control); if ((trb->control & TRB_C) != ring->ccs) { return 0; } type = TRB_TYPE(*trb); if (type != TR_LINK) { if (addr) { *addr = ring->dequeue; } ring->dequeue += TRB_SIZE; return type; } else { ring->dequeue = xhci_mask64(trb->parameter); if (trb->control & TRB_LK_TC) { ring->ccs = !ring->ccs; } } } }
static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb, dma_addr_t *addr) { PCIDevice *pci_dev = PCI_DEVICE(xhci); while (1) { TRBType type; pci_dma_read(pci_dev, ring->dequeue, trb, TRB_SIZE); trb->addr = ring->dequeue; trb->ccs = ring->ccs; le64_to_cpus(&trb->parameter); le32_to_cpus(&trb->status); le32_to_cpus(&trb->control); trace_usb_xhci_fetch_trb(ring->dequeue, trb_name(trb), trb->parameter, trb->status, trb->control); if ((trb->control & TRB_C) != ring->ccs) { return 0; } type = TRB_TYPE(*trb); if (type != TR_LINK) { if (addr) { *addr = ring->dequeue; } ring->dequeue += TRB_SIZE; return type; } else { ring->dequeue = xhci_mask64(trb->parameter); if (trb->control & TRB_LK_TC) { ring->ccs = !ring->ccs; } } } }
C
qemu
0
CVE-2011-1768
https://www.cvedetails.com/cve/CVE-2011-1768/
CWE-362
https://github.com/torvalds/linux/commit/d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
d5aa407f59f5b83d2c50ec88f5bf56d40f1f8978
tunnels: fix netns vs proto registration ordering Same stuff as in ip_gre patch: receive hook can be called before netns setup is done, oopsing in net_generic(). Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) { if (INET_ECN_is_ce(iph->tos)) IP6_ECN_set_ce(ipv6_hdr(skb)); }
static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) { if (INET_ECN_is_ce(iph->tos)) IP6_ECN_set_ce(ipv6_hdr(skb)); }
C
linux
0
CVE-2013-1789
https://www.cvedetails.com/cve/CVE-2013-1789/
null
https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=a9b8ab4657dec65b8b86c225d12c533ad7e984e2
a9b8ab4657dec65b8b86c225d12c533ad7e984e2
null
void Splash::setTransfer(Guchar *red, Guchar *green, Guchar *blue, Guchar *gray) { state->setTransfer(red, green, blue, gray); }
void Splash::setTransfer(Guchar *red, Guchar *green, Guchar *blue, Guchar *gray) { state->setTransfer(red, green, blue, gray); }
CPP
poppler
0
CVE-2017-15423
https://www.cvedetails.com/cve/CVE-2017-15423/
CWE-310
https://github.com/chromium/chromium/commit/a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
a263d1cf62a9c75be6aaafdec88aacfcef1e8fd2
Roll src/third_party/boringssl/src 664e99a64..696c13bd6 https://boringssl.googlesource.com/boringssl/+log/664e99a6486c293728097c661332f92bf2d847c6..696c13bd6ab78011adfe7b775519c8b7cc82b604 BUG=778101 Change-Id: I8dda4f3db952597148e3c7937319584698d00e1c Reviewed-on: https://chromium-review.googlesource.com/747941 Reviewed-by: Avi Drissman <avi@chromium.org> Reviewed-by: David Benjamin <davidben@chromium.org> Commit-Queue: Steven Valdez <svaldez@chromium.org> Cr-Commit-Position: refs/heads/master@{#513774}
static void GotResult(bool hdr_enabled) { display::win::ScreenWin::SetHDREnabled(hdr_enabled); }
static void GotResult(bool hdr_enabled) { display::win::ScreenWin::SetHDREnabled(hdr_enabled); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/1da0daecc540238cb473f0d6322da51d3a544244
1da0daecc540238cb473f0d6322da51d3a544244
Change VideoDecoder::ReadCB to take const scoped_refptr<VideoFrame>&. BUG=none TEST=media_unittests, media layout tests. Review URL: https://chromiumcodereview.appspot.com/10559074 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@143192 0039d316-1c4b-4281-b951-d872f2087c98
ACTION(DeleteDataBuffer) { delete[] arg0->memory_pointer; }
ACTION(DeleteDataBuffer) { delete[] arg0->memory_pointer; }
C
Chrome
0
CVE-2011-4594
https://www.cvedetails.com/cve/CVE-2011-4594/
null
https://github.com/torvalds/linux/commit/bc909d9ddbf7778371e36a651d6e4194b1cc7d4c
bc909d9ddbf7778371e36a651d6e4194b1cc7d4c
sendmmsg/sendmsg: fix unsafe user pointer access Dereferencing a user pointer directly from kernel-space without going through the copy_from_user family of functions is a bad idea. Two of such usages can be found in the sendmsg code path called from sendmmsg, added by commit c71d8ebe7a4496fb7231151cb70a6baa0cb56f9a upstream. commit 5b47b8038f183b44d2d8ff1c7d11a5c1be706b34 in the 3.0-stable tree. Usages are performed through memcmp() and memcpy() directly. Fix those by using the already copied msg_sys structure instead of the __user *msg structure. Note that msg_sys can be set to NULL by verify_compat_iovec() or verify_iovec(), which requires additional NULL pointer checks. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: David Goulet <dgoulet@ev0ke.net> CC: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> CC: Anton Blanchard <anton@samba.org> CC: David S. Miller <davem@davemloft.net> CC: stable <stable@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; if (more) flags |= MSG_MORE; return kernel_sendpage(sock, page, offset, size, flags); }
static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; if (more) flags |= MSG_MORE; return kernel_sendpage(sock, page, offset, size, flags); }
C
linux
0
CVE-2013-6626
https://www.cvedetails.com/cve/CVE-2013-6626/
null
https://github.com/chromium/chromium/commit/90fb08ed0146c9beacfd4dde98a20fc45419fff3
90fb08ed0146c9beacfd4dde98a20fc45419fff3
Cancel JavaScript dialogs when an interstitial appears. BUG=295695 TEST=See bug for repro steps. Review URL: https://chromiumcodereview.appspot.com/24360011 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
string16 WindowCaptionFromPageTitle(const string16& page_title) { #if defined(OS_MACOSX) || defined(OS_CHROMEOS) if (page_title.empty()) return l10n_util::GetStringUTF16(IDS_BROWSER_WINDOW_MAC_TAB_UNTITLED); return page_title; #else if (page_title.empty()) return l10n_util::GetStringUTF16(IDS_PRODUCT_NAME); return l10n_util::GetStringFUTF16(IDS_BROWSER_WINDOW_TITLE_FORMAT, page_title); #endif }
string16 WindowCaptionFromPageTitle(const string16& page_title) { #if defined(OS_MACOSX) || defined(OS_CHROMEOS) if (page_title.empty()) return l10n_util::GetStringUTF16(IDS_BROWSER_WINDOW_MAC_TAB_UNTITLED); return page_title; #else if (page_title.empty()) return l10n_util::GetStringUTF16(IDS_PRODUCT_NAME); return l10n_util::GetStringFUTF16(IDS_BROWSER_WINDOW_TITLE_FORMAT, page_title); #endif }
C
Chrome
0
CVE-2018-14017
https://www.cvedetails.com/cve/CVE-2018-14017/
CWE-125
https://github.com/radare/radare2/commit/eb0fb72b3c5307ec8e33effb6bf947e38cfdffe8
eb0fb72b3c5307ec8e33effb6bf947e38cfdffe8
Fix #10498 - Crash in fuzzed java file
R_API ut64 r_bin_java_class_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; size += 2; return size; }
R_API ut64 r_bin_java_class_cp_calc_size(RBinJavaCPTypeObj *obj) { ut64 size = 0; size += 1; size += 2; return size; }
C
radare2
0
CVE-2016-5093
https://www.cvedetails.com/cve/CVE-2016-5093/
CWE-125
https://github.com/php/php-src/commit/97eff7eb57fc2320c267a949cffd622c38712484?w=1
97eff7eb57fc2320c267a949cffd622c38712484?w=1
Fix bug #72241: get_icu_value_internal out-of-bounds read
PHP_FUNCTION(locale_compose) { smart_str loc_name_s = {0}; smart_str *loc_name = &loc_name_s; zval* arr = NULL; HashTable* hash_arr = NULL; int result = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "a", &arr) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } hash_arr = HASH_OF( arr ); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) RETURN_FALSE; /* Check for grandfathered first */ result = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG); if( result == SUCCESS){ RETURN_SMART_STR(loc_name); } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Not grandfathered */ result = append_key_value(loc_name, hash_arr , LOC_LANG_TAG); if( result == LOC_NOT_FOUND ){ intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array does not contain 'language' tag.", 0 TSRMLS_CC ); smart_str_free(loc_name); RETURN_FALSE; } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Extlang */ result = append_multiple_key_values(loc_name, hash_arr , LOC_EXTLANG_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Script */ result = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Region */ result = append_key_value( loc_name, hash_arr , LOC_REGION_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Variant */ result = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Private */ result = append_multiple_key_values( loc_name, hash_arr , LOC_PRIVATE_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } RETURN_SMART_STR(loc_name); }
PHP_FUNCTION(locale_compose) { smart_str loc_name_s = {0}; smart_str *loc_name = &loc_name_s; zval* arr = NULL; HashTable* hash_arr = NULL; int result = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "a", &arr) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } hash_arr = HASH_OF( arr ); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) RETURN_FALSE; /* Check for grandfathered first */ result = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG); if( result == SUCCESS){ RETURN_SMART_STR(loc_name); } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Not grandfathered */ result = append_key_value(loc_name, hash_arr , LOC_LANG_TAG); if( result == LOC_NOT_FOUND ){ intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array does not contain 'language' tag.", 0 TSRMLS_CC ); smart_str_free(loc_name); RETURN_FALSE; } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Extlang */ result = append_multiple_key_values(loc_name, hash_arr , LOC_EXTLANG_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Script */ result = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Region */ result = append_key_value( loc_name, hash_arr , LOC_REGION_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Variant */ result = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Private */ result = append_multiple_key_values( loc_name, hash_arr , LOC_PRIVATE_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } RETURN_SMART_STR(loc_name); }
C
php-src
1
CVE-2015-8816
https://www.cvedetails.com/cve/CVE-2015-8816/
null
https://github.com/torvalds/linux/commit/e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
e50293ef9775c5f1cf3fcc093037dd6a8c5684ea
USB: fix invalid memory access in hub_activate() Commit 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") changed the hub_activate() routine to make part of it run in a workqueue. However, the commit failed to take a reference to the usb_hub structure or to lock the hub interface while doing so. As a result, if a hub is plugged in and quickly unplugged before the work routine can run, the routine will try to access memory that has been deallocated. Or, if the hub is unplugged while the routine is running, the memory may be deallocated while it is in active use. This patch fixes the problem by taking a reference to the usb_hub at the start of hub_activate() and releasing it at the end (when the work is finished), and by locking the hub interface while the work routine is running. It also adds a check at the start of the routine to see if the hub has already been disconnected, in which nothing should be done. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Reported-by: Alexandru Cornea <alexandru.cornea@intel.com> Tested-by: Alexandru Cornea <alexandru.cornea@intel.com> Fixes: 8520f38099cc ("USB: change hub initialization sleeps to delayed_work") CC: <stable@vger.kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected) { int ret; u16 portchange, portstatus; unsigned connection = 0xffff; int total_time, stable_time = 0; struct usb_port *port_dev = hub->ports[port1 - 1]; for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { ret = hub_port_status(hub, port1, &portstatus, &portchange); if (ret < 0) return ret; if (!(portchange & USB_PORT_STAT_C_CONNECTION) && (portstatus & USB_PORT_STAT_CONNECTION) == connection) { if (!must_be_connected || (connection == USB_PORT_STAT_CONNECTION)) stable_time += HUB_DEBOUNCE_STEP; if (stable_time >= HUB_DEBOUNCE_STABLE) break; } else { stable_time = 0; connection = portstatus & USB_PORT_STAT_CONNECTION; } if (portchange & USB_PORT_STAT_C_CONNECTION) { usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (total_time >= HUB_DEBOUNCE_TIMEOUT) break; msleep(HUB_DEBOUNCE_STEP); } dev_dbg(&port_dev->dev, "debounce total %dms stable %dms status 0x%x\n", total_time, stable_time, portstatus); if (stable_time < HUB_DEBOUNCE_STABLE) return -ETIMEDOUT; return portstatus; }
int hub_port_debounce(struct usb_hub *hub, int port1, bool must_be_connected) { int ret; u16 portchange, portstatus; unsigned connection = 0xffff; int total_time, stable_time = 0; struct usb_port *port_dev = hub->ports[port1 - 1]; for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { ret = hub_port_status(hub, port1, &portstatus, &portchange); if (ret < 0) return ret; if (!(portchange & USB_PORT_STAT_C_CONNECTION) && (portstatus & USB_PORT_STAT_CONNECTION) == connection) { if (!must_be_connected || (connection == USB_PORT_STAT_CONNECTION)) stable_time += HUB_DEBOUNCE_STEP; if (stable_time >= HUB_DEBOUNCE_STABLE) break; } else { stable_time = 0; connection = portstatus & USB_PORT_STAT_CONNECTION; } if (portchange & USB_PORT_STAT_C_CONNECTION) { usb_clear_port_feature(hub->hdev, port1, USB_PORT_FEAT_C_CONNECTION); } if (total_time >= HUB_DEBOUNCE_TIMEOUT) break; msleep(HUB_DEBOUNCE_STEP); } dev_dbg(&port_dev->dev, "debounce total %dms stable %dms status 0x%x\n", total_time, stable_time, portstatus); if (stable_time < HUB_DEBOUNCE_STABLE) return -ETIMEDOUT; return portstatus; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/bfa69d49b17f33635c79f79819b90a8d2089c4b3
bfa69d49b17f33635c79f79819b90a8d2089c4b3
Change notification cmd line enabling to use the new RuntimeEnabledFeatures code. BUG=25318 TEST=none Review URL: http://codereview.chromium.org/339093 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@30660 0039d316-1c4b-4281-b951-d872f2087c98
void BrowserRenderProcessHost::BadMessageTerminateProcess( uint16 msg_type, base::ProcessHandle process) { LOG(ERROR) << "bad message " << msg_type << " terminating renderer."; if (BrowserRenderProcessHost::run_renderer_in_process()) { CHECK(false); } NOTREACHED(); base::KillProcess(process, ResultCodes::KILLED_BAD_MESSAGE, false); }
void BrowserRenderProcessHost::BadMessageTerminateProcess( uint16 msg_type, base::ProcessHandle process) { LOG(ERROR) << "bad message " << msg_type << " terminating renderer."; if (BrowserRenderProcessHost::run_renderer_in_process()) { CHECK(false); } NOTREACHED(); base::KillProcess(process, ResultCodes::KILLED_BAD_MESSAGE, false); }
C
Chrome
0
CVE-2011-1428
https://www.cvedetails.com/cve/CVE-2011-1428/
CWE-20
https://git.savannah.gnu.org/gitweb/?p=weechat.git;a=commit;h=c265cad1c95b84abfd4e8d861f25926ef13b5d91
c265cad1c95b84abfd4e8d861f25926ef13b5d91
null
irc_server_get_prefix_modes (struct t_irc_server *server) { return (server && server->prefix_modes) ? server->prefix_modes : irc_server_prefix_modes_default; }
irc_server_get_prefix_modes (struct t_irc_server *server) { return (server && server->prefix_modes) ? server->prefix_modes : irc_server_prefix_modes_default; }
C
savannah
0
CVE-2016-10208
https://www.cvedetails.com/cve/CVE-2016-10208/
CWE-125
https://github.com/torvalds/linux/commit/3a4b77cd47bb837b8557595ec7425f281f2ca1fe
3a4b77cd47bb837b8557595ec7425f281f2ca1fe
ext4: validate s_first_meta_bg at mount time Ralf Spenneberg reported that he hit a kernel crash when mounting a modified ext4 image. And it turns out that kernel crashed when calculating fs overhead (ext4_calculate_overhead()), this is because the image has very large s_first_meta_bg (debug code shows it's 842150400), and ext4 overruns the memory in count_overhead() when setting bitmap buffer, which is PAGE_SIZE. ext4_calculate_overhead(): buf = get_zeroed_page(GFP_NOFS); <=== PAGE_SIZE buffer blks = count_overhead(sb, i, buf); count_overhead(): for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { <=== j = 842150400 ext4_set_bit(EXT4_B2C(sbi, s++), buf); <=== buffer overrun count++; } This can be reproduced easily for me by this script: #!/bin/bash rm -f fs.img mkdir -p /mnt/ext4 fallocate -l 16M fs.img mke2fs -t ext4 -O bigalloc,meta_bg,^resize_inode -F fs.img debugfs -w -R "ssv first_meta_bg 842150400" fs.img mount -o loop fs.img /mnt/ext4 Fix it by validating s_first_meta_bg first at mount time, and refusing to mount if its value exceeds the largest possible meta_bg number. Reported-by: Ralf Spenneberg <ralf@os-t.de> Signed-off-by: Eryu Guan <guaneryu@gmail.com> Signed-off-by: Theodore Ts'o <tytso@mit.edu> Reviewed-by: Andreas Dilger <adilger@dilger.ca>
static void print_daily_error_info(unsigned long arg) { struct super_block *sb = (struct super_block *) arg; struct ext4_sb_info *sbi; struct ext4_super_block *es; sbi = EXT4_SB(sb); es = sbi->s_es; if (es->s_error_count) /* fsck newer than v1.41.13 is needed to clean this condition. */ ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, le32_to_cpu(es->s_first_error_line)); if (es->s_first_error_ino) printk(KERN_CONT ": inode %u", le32_to_cpu(es->s_first_error_ino)); if (es->s_first_error_block) printk(KERN_CONT ": block %llu", (unsigned long long) le64_to_cpu(es->s_first_error_block)); printk(KERN_CONT "\n"); } if (es->s_last_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, le32_to_cpu(es->s_last_error_line)); if (es->s_last_error_ino) printk(KERN_CONT ": inode %u", le32_to_cpu(es->s_last_error_ino)); if (es->s_last_error_block) printk(KERN_CONT ": block %llu", (unsigned long long) le64_to_cpu(es->s_last_error_block)); printk(KERN_CONT "\n"); } mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ }
static void print_daily_error_info(unsigned long arg) { struct super_block *sb = (struct super_block *) arg; struct ext4_sb_info *sbi; struct ext4_super_block *es; sbi = EXT4_SB(sb); es = sbi->s_es; if (es->s_error_count) /* fsck newer than v1.41.13 is needed to clean this condition. */ ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u", le32_to_cpu(es->s_error_count)); if (es->s_first_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_first_error_time), (int) sizeof(es->s_first_error_func), es->s_first_error_func, le32_to_cpu(es->s_first_error_line)); if (es->s_first_error_ino) printk(KERN_CONT ": inode %u", le32_to_cpu(es->s_first_error_ino)); if (es->s_first_error_block) printk(KERN_CONT ": block %llu", (unsigned long long) le64_to_cpu(es->s_first_error_block)); printk(KERN_CONT "\n"); } if (es->s_last_error_time) { printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d", sb->s_id, le32_to_cpu(es->s_last_error_time), (int) sizeof(es->s_last_error_func), es->s_last_error_func, le32_to_cpu(es->s_last_error_line)); if (es->s_last_error_ino) printk(KERN_CONT ": inode %u", le32_to_cpu(es->s_last_error_ino)); if (es->s_last_error_block) printk(KERN_CONT ": block %llu", (unsigned long long) le64_to_cpu(es->s_last_error_block)); printk(KERN_CONT "\n"); } mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ }
C
linux
0
CVE-2013-0904
https://www.cvedetails.com/cve/CVE-2013-0904/
CWE-119
https://github.com/chromium/chromium/commit/b2b21468c1f7f08b30a7c1755316f6026c50eb2a
b2b21468c1f7f08b30a7c1755316f6026c50eb2a
Separate repaint and layout requirements of StyleDifference (Step 1) Previously StyleDifference was an enum that proximately bigger values imply smaller values (e.g. StyleDifferenceLayout implies StyleDifferenceRepaint). This causes unnecessary repaints in some cases on layout change. Convert StyleDifference to a structure containing relatively independent flags. This change doesn't directly improve the result, but can make further repaint optimizations possible. Step 1 doesn't change any functionality. RenderStyle still generate the legacy StyleDifference enum when comparing styles and convert the result to the new StyleDifference. Implicit requirements are not handled during the conversion. Converted call sites to use the new StyleDifference according to the following conversion rules: - diff == StyleDifferenceEqual (&& !context) => diff.hasNoChange() - diff == StyleDifferenceRepaint => diff.needsRepaintObjectOnly() - diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff == StyleDifferenceRepaint || diff == StyleDifferenceRepaintLayer => diff.needsRepaintLayer() - diff >= StyleDifferenceRepaint => diff.needsRepaint() || diff.needsLayout() - diff >= StyleDifferenceRepaintLayer => diff.needsRepaintLayer() || diff.needsLayout() - diff > StyleDifferenceRepaintLayer => diff.needsLayout() - diff == StyleDifferencePositionedMovementLayoutOnly => diff.needsPositionedMovementLayoutOnly() - diff == StyleDifferenceLayout => diff.needsFullLayout() BUG=358460 TEST=All existing layout tests. R=eseidel@chromium.org, esprehn@chromium.org, jchaffraix@chromium.org Committed: https://src.chromium.org/viewvc/blink?view=rev&revision=171983 Review URL: https://codereview.chromium.org/236203020 git-svn-id: svn://svn.chromium.org/blink/trunk@172331 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void RenderBlock::addContinuationWithOutline(RenderInline* flow) { ASSERT(!flow->layer() && !flow->isInlineElementContinuation()); ContinuationOutlineTableMap* table = continuationOutlineTable(); ListHashSet<RenderInline*>* continuations = table->get(this); if (!continuations) { continuations = new ListHashSet<RenderInline*>; table->set(this, adoptPtr(continuations)); } continuations->add(flow); }
void RenderBlock::addContinuationWithOutline(RenderInline* flow) { ASSERT(!flow->layer() && !flow->isInlineElementContinuation()); ContinuationOutlineTableMap* table = continuationOutlineTable(); ListHashSet<RenderInline*>* continuations = table->get(this); if (!continuations) { continuations = new ListHashSet<RenderInline*>; table->set(this, adoptPtr(continuations)); } continuations->add(flow); }
C
Chrome
0
CVE-2013-0837
https://www.cvedetails.com/cve/CVE-2013-0837/
CWE-20
https://github.com/chromium/chromium/commit/d333e22282bd4bdaa2864980cd45c272f206a44c
d333e22282bd4bdaa2864980cd45c272f206a44c
[BlackBerry] GraphicsLayer: rename notifySyncRequired to notifyFlushRequired https://bugs.webkit.org/show_bug.cgi?id=111997 Patch by Alberto Garcia <agarcia@igalia.com> on 2013-03-11 Reviewed by Rob Buis. This changed in r130439 but the old name was introduced again by mistake in r144465. * platform/graphics/blackberry/GraphicsLayerBlackBerry.h: (WebCore::GraphicsLayerBlackBerry::notifyFlushRequired): * platform/graphics/blackberry/LayerWebKitThread.cpp: (WebCore::LayerWebKitThread::setNeedsCommit): git-svn-id: svn://svn.chromium.org/blink/trunk@145363 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void LayerWebKitThread::remove(Vector<RefPtr<LayerWebKitThread> >& vector, LayerWebKitThread* sublayer) { int foundIndex = vector.find(sublayer); if (foundIndex == notFound) return; sublayer->setSuperlayer(0); vector.remove(foundIndex); setNeedsCommit(); }
void LayerWebKitThread::remove(Vector<RefPtr<LayerWebKitThread> >& vector, LayerWebKitThread* sublayer) { int foundIndex = vector.find(sublayer); if (foundIndex == notFound) return; sublayer->setSuperlayer(0); vector.remove(foundIndex); setNeedsCommit(); }
C
Chrome
0
CVE-2013-7421
https://www.cvedetails.com/cve/CVE-2013-7421/
CWE-264
https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
crypto: prefix module autoloading with "crypto-" This prefixes all crypto module loading with "crypto-" so we never run the risk of exposing module auto-loading to userspace via a crypto API, as demonstrated by Mathias Krause: https://lkml.org/lkml/2013/3/4/70 Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->encrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); }
static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct camellia_sparc64_ctx *ctx = crypto_tfm_ctx(tfm); camellia_sparc64_crypt(&ctx->encrypt_key[0], (const u32 *) src, (u32 *) dst, ctx->key_len); }
C
linux
0
CVE-2016-3746
https://www.cvedetails.com/cve/CVE-2016-3746/
null
https://android.googlesource.com/platform/hardware/qcom/media/+/5b82f4f90c3d531313714df4b936f92fb0ff15cf
5b82f4f90c3d531313714df4b936f92fb0ff15cf
DO NOT MERGE mm-video-v4l2: vdec: Avoid processing ETBs/FTBs in invalid states (per the spec) ETB/FTB should not be handled in states other than Executing, Paused and Idle. This avoids accessing invalid buffers. Also add a lock to protect the private-buffers from being deleted while accessing from another thread. Bug: 27890802 Security Vulnerability - Heap Use-After-Free and Possible LPE in MediaServer (libOmxVdec problem #6) CRs-Fixed: 1008882 Change-Id: Iaac2e383cd53cf9cf8042c9ed93ddc76dba3907e
OMX_ERRORTYPE omx_vdec::set_buffer_req(vdec_allocatorproperty *buffer_prop) { OMX_ERRORTYPE eRet = OMX_ErrorNone; unsigned buf_size = 0; struct v4l2_format fmt; struct v4l2_requestbuffers bufreq; int ret; DEBUG_PRINT_LOW("SetBufReq IN: ActCnt(%d) Size(%u)", buffer_prop->actualcount, (unsigned int)buffer_prop->buffer_size); buf_size = (buffer_prop->buffer_size + buffer_prop->alignment - 1)&(~(buffer_prop->alignment - 1)); if (buf_size != buffer_prop->buffer_size) { DEBUG_PRINT_ERROR("Buffer size alignment error: Requested(%u) Required(%d)", (unsigned int)buffer_prop->buffer_size, buf_size); eRet = OMX_ErrorBadParameter; } else { memset(&fmt, 0x0, sizeof(struct v4l2_format)); fmt.fmt.pix_mp.height = drv_ctx.video_resolution.frame_height; fmt.fmt.pix_mp.width = drv_ctx.video_resolution.frame_width; if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_INPUT) { fmt.type =V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; fmt.fmt.pix_mp.pixelformat = output_capability; fmt.fmt.pix_mp.plane_fmt[0].sizeimage = buf_size; } else if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) { fmt.type =V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; fmt.fmt.pix_mp.pixelformat = capture_capability; } else { eRet = OMX_ErrorBadParameter; } ret = ioctl(drv_ctx.video_driver_fd, VIDIOC_S_FMT, &fmt); if (ret) { /*TODO: How to handle this case */ DEBUG_PRINT_ERROR("Setting buffer requirements (format) failed %d", ret); eRet = OMX_ErrorInsufficientResources; } bufreq.memory = V4L2_MEMORY_USERPTR; bufreq.count = buffer_prop->actualcount; if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_INPUT) { bufreq.type=V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; } else if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) { bufreq.type=V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; } else { eRet = OMX_ErrorBadParameter; } if (eRet==OMX_ErrorNone) { ret = ioctl(drv_ctx.video_driver_fd,VIDIOC_REQBUFS, &bufreq); } if (ret) { DEBUG_PRINT_ERROR("Setting buffer requirements (reqbufs) failed %d", ret); /*TODO: How to handle this case */ eRet = OMX_ErrorInsufficientResources; } else if (bufreq.count < buffer_prop->actualcount) { DEBUG_PRINT_ERROR("Driver refused to change the number of buffers" " on v4l2 port %d to %d (prefers %d)", bufreq.type, buffer_prop->actualcount, bufreq.count); eRet = OMX_ErrorInsufficientResources; } else { if (!client_buffers.update_buffer_req()) { DEBUG_PRINT_ERROR("Setting c2D buffer requirements failed"); eRet = OMX_ErrorInsufficientResources; } } } return eRet; }
OMX_ERRORTYPE omx_vdec::set_buffer_req(vdec_allocatorproperty *buffer_prop) { OMX_ERRORTYPE eRet = OMX_ErrorNone; unsigned buf_size = 0; struct v4l2_format fmt; struct v4l2_requestbuffers bufreq; int ret; DEBUG_PRINT_LOW("SetBufReq IN: ActCnt(%d) Size(%u)", buffer_prop->actualcount, (unsigned int)buffer_prop->buffer_size); buf_size = (buffer_prop->buffer_size + buffer_prop->alignment - 1)&(~(buffer_prop->alignment - 1)); if (buf_size != buffer_prop->buffer_size) { DEBUG_PRINT_ERROR("Buffer size alignment error: Requested(%u) Required(%d)", (unsigned int)buffer_prop->buffer_size, buf_size); eRet = OMX_ErrorBadParameter; } else { memset(&fmt, 0x0, sizeof(struct v4l2_format)); fmt.fmt.pix_mp.height = drv_ctx.video_resolution.frame_height; fmt.fmt.pix_mp.width = drv_ctx.video_resolution.frame_width; if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_INPUT) { fmt.type =V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; fmt.fmt.pix_mp.pixelformat = output_capability; fmt.fmt.pix_mp.plane_fmt[0].sizeimage = buf_size; } else if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) { fmt.type =V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; fmt.fmt.pix_mp.pixelformat = capture_capability; } else { eRet = OMX_ErrorBadParameter; } ret = ioctl(drv_ctx.video_driver_fd, VIDIOC_S_FMT, &fmt); if (ret) { /*TODO: How to handle this case */ DEBUG_PRINT_ERROR("Setting buffer requirements (format) failed %d", ret); eRet = OMX_ErrorInsufficientResources; } bufreq.memory = V4L2_MEMORY_USERPTR; bufreq.count = buffer_prop->actualcount; if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_INPUT) { bufreq.type=V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; } else if (buffer_prop->buffer_type == VDEC_BUFFER_TYPE_OUTPUT) { bufreq.type=V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; } else { eRet = OMX_ErrorBadParameter; } if (eRet==OMX_ErrorNone) { ret = ioctl(drv_ctx.video_driver_fd,VIDIOC_REQBUFS, &bufreq); } if (ret) { DEBUG_PRINT_ERROR("Setting buffer requirements (reqbufs) failed %d", ret); /*TODO: How to handle this case */ eRet = OMX_ErrorInsufficientResources; } else if (bufreq.count < buffer_prop->actualcount) { DEBUG_PRINT_ERROR("Driver refused to change the number of buffers" " on v4l2 port %d to %d (prefers %d)", bufreq.type, buffer_prop->actualcount, bufreq.count); eRet = OMX_ErrorInsufficientResources; } else { if (!client_buffers.update_buffer_req()) { DEBUG_PRINT_ERROR("Setting c2D buffer requirements failed"); eRet = OMX_ErrorInsufficientResources; } } } return eRet; }
C
Android
0
CVE-2017-5019
https://www.cvedetails.com/cve/CVE-2017-5019/
CWE-416
https://github.com/chromium/chromium/commit/f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
f03ea5a5c2ff26e239dfd23e263b15da2d9cee93
Convert FrameHostMsg_DidAddMessageToConsole to Mojo. Note: Since this required changing the test RenderViewImplTest.DispatchBeforeUnloadCanDetachFrame, I manually re-introduced https://crbug.com/666714 locally (the bug the test was added for), and reran the test to confirm that it still covers the bug. Bug: 786836 Change-Id: I110668fa6f0f261fd2ac36bb91a8d8b31c99f4f1 Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1526270 Commit-Queue: Lowell Manners <lowell@chromium.org> Reviewed-by: Daniel Cheng <dcheng@chromium.org> Reviewed-by: Camille Lamy <clamy@chromium.org> Cr-Commit-Position: refs/heads/master@{#653137}
int RenderFrameImpl::UniqueNameFrameAdapter::GetChildCount() const { int child_count = 0; for (blink::WebFrame* frame = GetWebFrame()->FirstChild(); frame; frame = frame->NextSibling()) { ++child_count; } return child_count; }
int RenderFrameImpl::UniqueNameFrameAdapter::GetChildCount() const { int child_count = 0; for (blink::WebFrame* frame = GetWebFrame()->FirstChild(); frame; frame = frame->NextSibling()) { ++child_count; } return child_count; }
C
Chrome
0
CVE-2017-16805
https://www.cvedetails.com/cve/CVE-2017-16805/
CWE-125
https://github.com/radare/radare2/commit/2ca9ab45891b6ae8e32b6c28c81eebca059cbe5d
2ca9ab45891b6ae8e32b6c28c81eebca059cbe5d
Fix #8813 - segfault in dwarf parser
static int r_bin_dwarf_init_debug_abbrev(RBinDwarfDebugAbbrev *da) { if (!da) { return -EINVAL; } da->decls = calloc (sizeof (RBinDwarfAbbrevDecl), DEBUG_ABBREV_CAP); if (!da->decls) { return -ENOMEM; } da->capacity = DEBUG_ABBREV_CAP; da->length = 0; return 0; }
static int r_bin_dwarf_init_debug_abbrev(RBinDwarfDebugAbbrev *da) { if (!da) { return -EINVAL; } da->decls = calloc (sizeof (RBinDwarfAbbrevDecl), DEBUG_ABBREV_CAP); if (!da->decls) { return -ENOMEM; } da->capacity = DEBUG_ABBREV_CAP; da->length = 0; return 0; }
C
radare2
0
null
null
null
https://github.com/chromium/chromium/commit/04cca6c05e4923f1b91e0dddf053e088456d8645
04cca6c05e4923f1b91e0dddf053e088456d8645
https://bugs.webkit.org/show_bug.cgi?id=45164 Reviewed by Dan Bernstein. REGRESSION: <a><img align=top></a> Clickable area too large Make sure to clamp hit testing of quirky inline flow boxes the same way we already clamped painting. Source/WebCore: * rendering/InlineFlowBox.cpp: (WebCore::InlineFlowBox::nodeAtPoint): LayoutTests: * fast/inline/inline-position-top-align-expected.txt: Added. * fast/inline/inline-position-top-align.html: Added. git-svn-id: svn://svn.chromium.org/blink/trunk@81055 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void InlineFlowBox::extractLine() { if (!m_extracted) extractLineBoxFromRenderObject(); for (InlineBox* child = firstChild(); child; child = child->nextOnLine()) child->extractLine(); }
void InlineFlowBox::extractLine() { if (!m_extracted) extractLineBoxFromRenderObject(); for (InlineBox* child = firstChild(); child; child = child->nextOnLine()) child->extractLine(); }
C
Chrome
0
CVE-2017-18187
https://www.cvedetails.com/cve/CVE-2017-18187/
CWE-190
https://github.com/ARMmbed/mbedtls/commit/83c9f495ffe70c7dd280b41fdfd4881485a3bc28
83c9f495ffe70c7dd280b41fdfd4881485a3bc28
Prevent bounds check bypass through overflow in PSK identity parsing The check `if( *p + n > end )` in `ssl_parse_client_psk_identity` is unsafe because `*p + n` might overflow, thus bypassing the check. As `n` is a user-specified value up to 65K, this is relevant if the library happens to be located in the last 65K of virtual memory. This commit replaces the check by a safe version.
static int ssl_parse_servername_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { int ret; size_t servername_list_size, hostname_len; const unsigned char *p; MBEDTLS_SSL_DEBUG_MSG( 3, ( "parse ServerName extension" ) ); servername_list_size = ( ( buf[0] << 8 ) | ( buf[1] ) ); if( servername_list_size + 2 != len ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } p = buf + 2; while( servername_list_size > 0 ) { hostname_len = ( ( p[1] << 8 ) | p[2] ); if( hostname_len + 3 > servername_list_size ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } if( p[0] == MBEDTLS_TLS_EXT_SERVERNAME_HOSTNAME ) { ret = ssl->conf->f_sni( ssl->conf->p_sni, ssl, p + 3, hostname_len ); if( ret != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "ssl_sni_wrapper", ret ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_UNRECOGNIZED_NAME ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } return( 0 ); } servername_list_size -= hostname_len + 3; p += hostname_len + 3; } if( servername_list_size != 0 ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } return( 0 ); }
static int ssl_parse_servername_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { int ret; size_t servername_list_size, hostname_len; const unsigned char *p; MBEDTLS_SSL_DEBUG_MSG( 3, ( "parse ServerName extension" ) ); servername_list_size = ( ( buf[0] << 8 ) | ( buf[1] ) ); if( servername_list_size + 2 != len ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } p = buf + 2; while( servername_list_size > 0 ) { hostname_len = ( ( p[1] << 8 ) | p[2] ); if( hostname_len + 3 > servername_list_size ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_DECODE_ERROR ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } if( p[0] == MBEDTLS_TLS_EXT_SERVERNAME_HOSTNAME ) { ret = ssl->conf->f_sni( ssl->conf->p_sni, ssl, p + 3, hostname_len ); if( ret != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, "ssl_sni_wrapper", ret ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_UNRECOGNIZED_NAME ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } return( 0 ); } servername_list_size -= hostname_len + 3; p += hostname_len + 3; } if( servername_list_size != 0 ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); mbedtls_ssl_send_alert_message( ssl, MBEDTLS_SSL_ALERT_LEVEL_FATAL, MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } return( 0 ); }
C
mbedtls
0
CVE-2017-18200
https://www.cvedetails.com/cve/CVE-2017-18200/
CWE-20
https://github.com/torvalds/linux/commit/638164a2718f337ea224b747cf5977ef143166a4
638164a2718f337ea224b747cf5977ef143166a4
f2fs: fix potential panic during fstrim As Ju Hyung Park reported: "When 'fstrim' is called for manual trim, a BUG() can be triggered randomly with this patch. I'm seeing this issue on both x86 Desktop and arm64 Android phone. On x86 Desktop, this was caused during Ubuntu boot-up. I have a cronjob installed which calls 'fstrim -v /' during boot. On arm64 Android, this was caused during GC looping with 1ms gc_min_sleep_time & gc_max_sleep_time." Root cause of this issue is that f2fs_wait_discard_bios can only be used by f2fs_put_super, because during put_super there must be no other referrers, so it can ignore discard entry's reference count when removing the entry, otherwise in other caller we will hit bug_on in __remove_discard_cmd as there may be other issuer added reference count in discard entry. Thread A Thread B - issue_discard_thread - f2fs_ioc_fitrim - f2fs_trim_fs - f2fs_wait_discard_bios - __issue_discard_cmd - __submit_discard_cmd - __wait_discard_cmd - dc->ref++ - __wait_one_discard_bio - __wait_discard_cmd - __remove_discard_cmd - f2fs_bug_on(sbi, dc->ref) Fixes: 969d1b180d987c2be02de890d0fff0f66a0e80de Reported-by: Ju Hyung Park <qkrwngud825@gmail.com> Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); block_t total_count, user_block_count, start_count, ovp_count; u64 avail_node_count; total_count = le64_to_cpu(sbi->raw_super->block_count); user_block_count = sbi->user_block_count; start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; buf->f_type = F2FS_SUPER_MAGIC; buf->f_bsize = sbi->blocksize; buf->f_blocks = total_count - start_count; buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count; buf->f_bavail = user_block_count - valid_user_blocks(sbi) - sbi->reserved_blocks; avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; if (avail_node_count > user_block_count) { buf->f_files = user_block_count; buf->f_ffree = buf->f_bavail; } else { buf->f_files = avail_node_count; buf->f_ffree = min(avail_node_count - valid_node_count(sbi), buf->f_bavail); } buf->f_namelen = F2FS_NAME_LEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); #ifdef CONFIG_QUOTA if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) && sb_has_quota_limits_enabled(sb, PRJQUOTA)) { f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf); } #endif return 0; }
static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); block_t total_count, user_block_count, start_count, ovp_count; u64 avail_node_count; total_count = le64_to_cpu(sbi->raw_super->block_count); user_block_count = sbi->user_block_count; start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; buf->f_type = F2FS_SUPER_MAGIC; buf->f_bsize = sbi->blocksize; buf->f_blocks = total_count - start_count; buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count; buf->f_bavail = user_block_count - valid_user_blocks(sbi) - sbi->reserved_blocks; avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; if (avail_node_count > user_block_count) { buf->f_files = user_block_count; buf->f_ffree = buf->f_bavail; } else { buf->f_files = avail_node_count; buf->f_ffree = min(avail_node_count - valid_node_count(sbi), buf->f_bavail); } buf->f_namelen = F2FS_NAME_LEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); #ifdef CONFIG_QUOTA if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) && sb_has_quota_limits_enabled(sb, PRJQUOTA)) { f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf); } #endif return 0; }
C
linux
0
CVE-2014-7815
https://www.cvedetails.com/cve/CVE-2014-7815/
CWE-264
https://git.qemu.org/?p=qemu.git;a=commit;h=e6908bfe8e07f2b452e78e677da1b45b1c0f6829
e6908bfe8e07f2b452e78e677da1b45b1c0f6829
null
static int vnc_refresh_server_surface(VncDisplay *vd) { int width = MIN(pixman_image_get_width(vd->guest.fb), pixman_image_get_width(vd->server)); int height = MIN(pixman_image_get_height(vd->guest.fb), pixman_image_get_height(vd->server)); int cmp_bytes, server_stride, min_stride, guest_stride, y = 0; uint8_t *guest_row0 = NULL, *server_row0; VncState *vs; int has_dirty = 0; pixman_image_t *tmpbuf = NULL; struct timeval tv = { 0, 0 }; if (!vd->non_adaptive) { gettimeofday(&tv, NULL); has_dirty = vnc_update_stats(vd, &tv); } /* * Walk through the guest dirty map. * Check and copy modified bits from guest to server surface. * Update server dirty map. */ server_row0 = (uint8_t *)pixman_image_get_data(vd->server); server_stride = guest_stride = pixman_image_get_stride(vd->server); cmp_bytes = MIN(VNC_DIRTY_PIXELS_PER_BIT * VNC_SERVER_FB_BYTES, server_stride); if (vd->guest.format != VNC_SERVER_FB_FORMAT) { int width = pixman_image_get_width(vd->server); tmpbuf = qemu_pixman_linebuf_create(VNC_SERVER_FB_FORMAT, width); } else { guest_row0 = (uint8_t *)pixman_image_get_data(vd->guest.fb); guest_stride = pixman_image_get_stride(vd->guest.fb); } min_stride = MIN(server_stride, guest_stride); for (;;) { int x; uint8_t *guest_ptr, *server_ptr; unsigned long offset = find_next_bit((unsigned long *) &vd->guest.dirty, height * VNC_DIRTY_BPL(&vd->guest), y * VNC_DIRTY_BPL(&vd->guest)); if (offset == height * VNC_DIRTY_BPL(&vd->guest)) { /* no more dirty bits */ break; } y = offset / VNC_DIRTY_BPL(&vd->guest); x = offset % VNC_DIRTY_BPL(&vd->guest); server_ptr = server_row0 + y * server_stride + x * cmp_bytes; if (vd->guest.format != VNC_SERVER_FB_FORMAT) { qemu_pixman_linebuf_fill(tmpbuf, vd->guest.fb, width, 0, y); guest_ptr = (uint8_t *)pixman_image_get_data(tmpbuf); } else { guest_ptr = guest_row0 + y * guest_stride; } guest_ptr += x * cmp_bytes; for (; x < DIV_ROUND_UP(width, VNC_DIRTY_PIXELS_PER_BIT); x++, guest_ptr += cmp_bytes, server_ptr += cmp_bytes) { int _cmp_bytes = cmp_bytes; if (!test_and_clear_bit(x, vd->guest.dirty[y])) { continue; } if ((x + 1) * cmp_bytes > min_stride) { _cmp_bytes = min_stride - x * cmp_bytes; } if (memcmp(server_ptr, guest_ptr, _cmp_bytes) == 0) { continue; } memcpy(server_ptr, guest_ptr, _cmp_bytes); if (!vd->non_adaptive) { vnc_rect_updated(vd, x * VNC_DIRTY_PIXELS_PER_BIT, y, &tv); } QTAILQ_FOREACH(vs, &vd->clients, next) { set_bit(x, vs->dirty[y]); } has_dirty++; } y++; } qemu_pixman_image_unref(tmpbuf); return has_dirty; }
static int vnc_refresh_server_surface(VncDisplay *vd) { int width = MIN(pixman_image_get_width(vd->guest.fb), pixman_image_get_width(vd->server)); int height = MIN(pixman_image_get_height(vd->guest.fb), pixman_image_get_height(vd->server)); int cmp_bytes, server_stride, min_stride, guest_stride, y = 0; uint8_t *guest_row0 = NULL, *server_row0; VncState *vs; int has_dirty = 0; pixman_image_t *tmpbuf = NULL; struct timeval tv = { 0, 0 }; if (!vd->non_adaptive) { gettimeofday(&tv, NULL); has_dirty = vnc_update_stats(vd, &tv); } /* * Walk through the guest dirty map. * Check and copy modified bits from guest to server surface. * Update server dirty map. */ server_row0 = (uint8_t *)pixman_image_get_data(vd->server); server_stride = guest_stride = pixman_image_get_stride(vd->server); cmp_bytes = MIN(VNC_DIRTY_PIXELS_PER_BIT * VNC_SERVER_FB_BYTES, server_stride); if (vd->guest.format != VNC_SERVER_FB_FORMAT) { int width = pixman_image_get_width(vd->server); tmpbuf = qemu_pixman_linebuf_create(VNC_SERVER_FB_FORMAT, width); } else { guest_row0 = (uint8_t *)pixman_image_get_data(vd->guest.fb); guest_stride = pixman_image_get_stride(vd->guest.fb); } min_stride = MIN(server_stride, guest_stride); for (;;) { int x; uint8_t *guest_ptr, *server_ptr; unsigned long offset = find_next_bit((unsigned long *) &vd->guest.dirty, height * VNC_DIRTY_BPL(&vd->guest), y * VNC_DIRTY_BPL(&vd->guest)); if (offset == height * VNC_DIRTY_BPL(&vd->guest)) { /* no more dirty bits */ break; } y = offset / VNC_DIRTY_BPL(&vd->guest); x = offset % VNC_DIRTY_BPL(&vd->guest); server_ptr = server_row0 + y * server_stride + x * cmp_bytes; if (vd->guest.format != VNC_SERVER_FB_FORMAT) { qemu_pixman_linebuf_fill(tmpbuf, vd->guest.fb, width, 0, y); guest_ptr = (uint8_t *)pixman_image_get_data(tmpbuf); } else { guest_ptr = guest_row0 + y * guest_stride; } guest_ptr += x * cmp_bytes; for (; x < DIV_ROUND_UP(width, VNC_DIRTY_PIXELS_PER_BIT); x++, guest_ptr += cmp_bytes, server_ptr += cmp_bytes) { int _cmp_bytes = cmp_bytes; if (!test_and_clear_bit(x, vd->guest.dirty[y])) { continue; } if ((x + 1) * cmp_bytes > min_stride) { _cmp_bytes = min_stride - x * cmp_bytes; } if (memcmp(server_ptr, guest_ptr, _cmp_bytes) == 0) { continue; } memcpy(server_ptr, guest_ptr, _cmp_bytes); if (!vd->non_adaptive) { vnc_rect_updated(vd, x * VNC_DIRTY_PIXELS_PER_BIT, y, &tv); } QTAILQ_FOREACH(vs, &vd->clients, next) { set_bit(x, vs->dirty[y]); } has_dirty++; } y++; } qemu_pixman_image_unref(tmpbuf); return has_dirty; }
C
qemu
0
CVE-2012-3552
https://www.cvedetails.com/cve/CVE-2012-3552/
CWE-362
https://github.com/torvalds/linux/commit/f6d8bd051c391c1c0458a30b2a7abcd939329259
f6d8bd051c391c1c0458a30b2a7abcd939329259
inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
static void tcp_v4_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Free each key, then the set of key keys, * the crypto element, and then decrement our * hold on the last resort crypto. */ if (tp->md5sig_info->entries4) { int i; for (i = 0; i < tp->md5sig_info->entries4; i++) kfree(tp->md5sig_info->keys4[i].base.key); tp->md5sig_info->entries4 = 0; tcp_free_md5sig_pool(); } if (tp->md5sig_info->keys4) { kfree(tp->md5sig_info->keys4); tp->md5sig_info->keys4 = NULL; tp->md5sig_info->alloced4 = 0; } }
static void tcp_v4_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Free each key, then the set of key keys, * the crypto element, and then decrement our * hold on the last resort crypto. */ if (tp->md5sig_info->entries4) { int i; for (i = 0; i < tp->md5sig_info->entries4; i++) kfree(tp->md5sig_info->keys4[i].base.key); tp->md5sig_info->entries4 = 0; tcp_free_md5sig_pool(); } if (tp->md5sig_info->keys4) { kfree(tp->md5sig_info->keys4); tp->md5sig_info->keys4 = NULL; tp->md5sig_info->alloced4 = 0; } }
C
linux
0
CVE-2019-13296
https://www.cvedetails.com/cve/CVE-2019-13296/
CWE-399
https://github.com/ImageMagick/ImageMagick/commit/ce08a3691a8ac29125e29fc41967b3737fa3f425
ce08a3691a8ac29125e29fc41967b3737fa3f425
https://github.com/ImageMagick/ImageMagick/issues/1604
WandPrivate MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand, const char *option,const char *arg1,const char *arg2,ExceptionInfo *exception) { #if !USE_WAND_METHODS size_t n, i; #endif assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(cli_wand->wand.images != (Image *) NULL); /* images must be present */ if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- Simple Operator: %s \"%s\" \"%s\"", option,arg1,arg2); #if !USE_WAND_METHODS /* FUTURE add appropriate tracing */ i=0; n=GetImageListLength(cli_wand->wand.images); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); while (1) { i++; CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); if ( cli_wand->wand.images->next == (Image *) NULL ) break; cli_wand->wand.images=cli_wand->wand.images->next; } assert( i == n ); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); #else MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) (void) CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); MagickResetIterator(&cli_wand->wand); #endif return(MagickTrue); }
WandPrivate MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand, const char *option,const char *arg1,const char *arg2,ExceptionInfo *exception) { #if !USE_WAND_METHODS size_t n, i; #endif assert(cli_wand != (MagickCLI *) NULL); assert(cli_wand->signature == MagickWandSignature); assert(cli_wand->wand.signature == MagickWandSignature); assert(cli_wand->wand.images != (Image *) NULL); /* images must be present */ if (cli_wand->wand.debug != MagickFalse) (void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(), "- Simple Operator: %s \"%s\" \"%s\"", option,arg1,arg2); #if !USE_WAND_METHODS /* FUTURE add appropriate tracing */ i=0; n=GetImageListLength(cli_wand->wand.images); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); while (1) { i++; CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); if ( cli_wand->wand.images->next == (Image *) NULL ) break; cli_wand->wand.images=cli_wand->wand.images->next; } assert( i == n ); cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images); #else MagickResetIterator(&cli_wand->wand); while (MagickNextImage(&cli_wand->wand) != MagickFalse) (void) CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception); MagickResetIterator(&cli_wand->wand); #endif return(MagickTrue); }
C
ImageMagick
0
CVE-2017-5061
https://www.cvedetails.com/cve/CVE-2017-5061/
CWE-362
https://github.com/chromium/chromium/commit/5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
5d78b84d39bd34bc9fce9d01c0dcd5a22a330d34
(Reland) Discard compositor frames from unloaded web content This is a reland of https://codereview.chromium.org/2707243005/ with a small change to fix an uninitialized memory error that fails on MSAN bots. BUG=672847 TBR=danakj@chromium.org, creis@chromium.org CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_site_isolation Review-Url: https://codereview.chromium.org/2731283003 Cr-Commit-Position: refs/heads/master@{#454954}
void VerifyAfterValues(LayerImpl* layer) { EffectTree& tree = layer->layer_tree_impl()->property_trees()->effect_tree; EffectNode* node = tree.Node(layer->effect_tree_index()); switch (static_cast<Properties>(index_)) { case STARTUP: case DONE: break; case BOUNDS: EXPECT_EQ(gfx::Size(20, 20).ToString(), layer->bounds().ToString()); break; case HIDE_LAYER_AND_SUBTREE: EXPECT_EQ(tree.EffectiveOpacity(node), 0.f); break; case DRAWS_CONTENT: EXPECT_TRUE(layer->DrawsContent()); break; } }
void VerifyAfterValues(LayerImpl* layer) { EffectTree& tree = layer->layer_tree_impl()->property_trees()->effect_tree; EffectNode* node = tree.Node(layer->effect_tree_index()); switch (static_cast<Properties>(index_)) { case STARTUP: case DONE: break; case BOUNDS: EXPECT_EQ(gfx::Size(20, 20).ToString(), layer->bounds().ToString()); break; case HIDE_LAYER_AND_SUBTREE: EXPECT_EQ(tree.EffectiveOpacity(node), 0.f); break; case DRAWS_CONTENT: EXPECT_TRUE(layer->DrawsContent()); break; } }
C
Chrome
0
CVE-2019-1010239
https://www.cvedetails.com/cve/CVE-2019-1010239/
CWE-754
https://github.com/DaveGamble/cJSON/commit/be749d7efa7c9021da746e685bd6dec79f9dd99b
be749d7efa7c9021da746e685bd6dec79f9dd99b
Fix crash of cJSON_GetObjectItemCaseSensitive when calling it on arrays
CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) { replace_item_in_object(object, string, newitem, true); }
CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) { replace_item_in_object(object, string, newitem, true); }
C
cJSON
0
CVE-2012-2100
https://www.cvedetails.com/cve/CVE-2012-2100/
CWE-189
https://github.com/torvalds/linux/commit/d50f2ab6f050311dbf7b8f5501b25f0bf64a439b
d50f2ab6f050311dbf7b8f5501b25f0bf64a439b
ext4: fix undefined behavior in ext4_fill_flex_info() Commit 503358ae01b70ce6909d19dd01287093f6b6271c ("ext4: avoid divide by zero when trying to mount a corrupted file system") fixes CVE-2009-4307 by performing a sanity check on s_log_groups_per_flex, since it can be set to a bogus value by an attacker. sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex; groups_per_flex = 1 << sbi->s_log_groups_per_flex; if (groups_per_flex < 2) { ... } This patch fixes two potential issues in the previous commit. 1) The sanity check might only work on architectures like PowerPC. On x86, 5 bits are used for the shifting amount. That means, given a large s_log_groups_per_flex value like 36, groups_per_flex = 1 << 36 is essentially 1 << 4 = 16, rather than 0. This will bypass the check, leaving s_log_groups_per_flex and groups_per_flex inconsistent. 2) The sanity check relies on undefined behavior, i.e., oversized shift. A standard-confirming C compiler could rewrite the check in unexpected ways. Consider the following equivalent form, assuming groups_per_flex is unsigned for simplicity. groups_per_flex = 1 << sbi->s_log_groups_per_flex; if (groups_per_flex == 0 || groups_per_flex == 1) { We compile the code snippet using Clang 3.0 and GCC 4.6. Clang will completely optimize away the check groups_per_flex == 0, leaving the patched code as vulnerable as the original. GCC keeps the check, but there is no guarantee that future versions will do the same. Signed-off-by: Xi Wang <xi.wang@gmail.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@vger.kernel.org
static int ext4_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; /* Now we set up the journal barrier. */ jbd2_journal_lock_updates(journal); /* * Don't clear the needs_recovery flag if we failed to flush * the journal. */ error = jbd2_journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: /* we rely on s_frozen to stop further updates */ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); return error; }
static int ext4_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT4_SB(sb)->s_journal; /* Now we set up the journal barrier. */ jbd2_journal_lock_updates(journal); /* * Don't clear the needs_recovery flag if we failed to flush * the journal. */ error = jbd2_journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); error = ext4_commit_super(sb, 1); out: /* we rely on s_frozen to stop further updates */ jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); return error; }
C
linux
0
CVE-2011-3896
https://www.cvedetails.com/cve/CVE-2011-3896/
CWE-119
https://github.com/chromium/chromium/commit/5925dff83699508b5e2735afb0297dfb310e159d
5925dff83699508b5e2735afb0297dfb310e159d
Implement a bubble that appears at the top of the screen when a tab enters fullscreen mode via webkitRequestFullScreen(), telling the user how to exit fullscreen. This is implemented as an NSView rather than an NSWindow because the floating chrome that appears in presentation mode should overlap the bubble. Content-initiated fullscreen mode makes use of 'presentation mode' on the Mac: the mode in which the UI is hidden, accessible by moving the cursor to the top of the screen. On Snow Leopard, this mode is synonymous with fullscreen mode. On Lion, however, fullscreen mode does not imply presentation mode: in non-presentation fullscreen mode, the chrome is permanently shown. It is possible to switch between presentation mode and fullscreen mode using the presentation mode UI control. When a tab initiates fullscreen mode on Lion, we enter presentation mode if not in presentation mode already. When the user exits fullscreen mode using Chrome UI (i.e. keyboard shortcuts, menu items, buttons, switching tabs, etc.) we return the user to the mode they were in before the tab entered fullscreen. BUG=14471 TEST=Enter fullscreen mode using webkitRequestFullScreen. You should see a bubble pop down from the top of the screen. Need to test the Lion logic somehow, with no Lion trybots. BUG=96883 Original review http://codereview.chromium.org/7890056/ TBR=thakis Review URL: http://codereview.chromium.org/7920024 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@101624 0039d316-1c4b-4281-b951-d872f2087c98
void Browser::JSOutOfMemoryHelper(TabContents* tab) { TabContentsWrapper* tcw = TabContentsWrapper::GetCurrentWrapperForContents( tab); if (tcw) { tcw->infobar_tab_helper()->AddInfoBar(new SimpleAlertInfoBarDelegate( tab, NULL, l10n_util::GetStringUTF16(IDS_JS_OUT_OF_MEMORY_PROMPT), true)); } }
void Browser::JSOutOfMemoryHelper(TabContents* tab) { TabContentsWrapper* tcw = TabContentsWrapper::GetCurrentWrapperForContents( tab); if (tcw) { tcw->infobar_tab_helper()->AddInfoBar(new SimpleAlertInfoBarDelegate( tab, NULL, l10n_util::GetStringUTF16(IDS_JS_OUT_OF_MEMORY_PROMPT), true)); } }
C
Chrome
0
CVE-2013-0886
https://www.cvedetails.com/cve/CVE-2013-0886/
null
https://github.com/chromium/chromium/commit/18d67244984a574ba2dd8779faabc0e3e34f4b76
18d67244984a574ba2dd8779faabc0e3e34f4b76
Implement TextureImageTransportSurface using texture mailbox This has a couple of advantages: - allow tearing down and recreating the UI parent context without losing the renderer contexts - do not require a context to be able to generate textures when creating the GLSurfaceHandle - clearer ownership semantics that potentially allows for more robust and easier lost context handling/thumbnailing/etc., since a texture is at any given time owned by either: UI parent, mailbox, or TextureImageTransportSurface - simplify frontbuffer protection logic; the frontbuffer textures are now owned by RWHV where they are refcounted The TextureImageTransportSurface informs RenderWidgetHostView of the mailbox names for the front- and backbuffer textures by associating them with a surface_handle (1 or 2) in the AcceleratedSurfaceNew message. During SwapBuffers() or PostSubBuffer() cycles, it then uses produceTextureCHROMIUM() and consumeTextureCHROMIUM() to transfer ownership between renderer and browser compositor. RWHV sends back the surface_handle of the buffer being returned with the Swap ACK (or 0 if no buffer is being returned in which case TextureImageTransportSurface will allocate a new texture - note that this could be used to simply keep textures for thumbnailing). BUG=154815,139616 TBR=sky@chromium.org Review URL: https://chromiumcodereview.appspot.com/11194042 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@171569 0039d316-1c4b-4281-b951-d872f2087c98
TransportDIB* RenderProcessHostImpl::MapTransportDIB( TransportDIB::Id dib_id) { #if defined(OS_WIN) HANDLE section; DuplicateHandle(GetHandle(), dib_id.handle, GetCurrentProcess(), &section, STANDARD_RIGHTS_REQUIRED | FILE_MAP_READ | FILE_MAP_WRITE, FALSE, 0); return TransportDIB::Map(section); #elif defined(OS_MACOSX) return widget_helper_->MapTransportDIB(dib_id); #elif defined(OS_ANDROID) return TransportDIB::Map(dib_id); #elif defined(OS_POSIX) return TransportDIB::Map(dib_id.shmkey); #endif // defined(OS_POSIX) }
TransportDIB* RenderProcessHostImpl::MapTransportDIB( TransportDIB::Id dib_id) { #if defined(OS_WIN) HANDLE section; DuplicateHandle(GetHandle(), dib_id.handle, GetCurrentProcess(), &section, STANDARD_RIGHTS_REQUIRED | FILE_MAP_READ | FILE_MAP_WRITE, FALSE, 0); return TransportDIB::Map(section); #elif defined(OS_MACOSX) return widget_helper_->MapTransportDIB(dib_id); #elif defined(OS_ANDROID) return TransportDIB::Map(dib_id); #elif defined(OS_POSIX) return TransportDIB::Map(dib_id.shmkey); #endif // defined(OS_POSIX) }
C
Chrome
0
CVE-2018-16078
https://www.cvedetails.com/cve/CVE-2018-16078/
null
https://github.com/chromium/chromium/commit/b025e82307a8490501bb030266cd955c391abcb7
b025e82307a8490501bb030266cd955c391abcb7
[AF] Don't simplify/dedupe suggestions for (partially) filled sections. Since Autofill does not fill field by field anymore, this simplifying and deduping of suggestions is not useful anymore. Bug: 858820 Cq-Include-Trybots: luci.chromium.try:ios-simulator-full-configs;master.tryserver.chromium.mac:ios-simulator-cronet Change-Id: I36f7cfe425a0bdbf5ba7503a3d96773b405cc19b Reviewed-on: https://chromium-review.googlesource.com/1128255 Reviewed-by: Roger McFarlane <rogerm@chromium.org> Commit-Queue: Sebastien Seguin-Gagnon <sebsg@chromium.org> Cr-Commit-Position: refs/heads/master@{#573315}
void AutofillManager::SetExternalDelegate(AutofillExternalDelegate* delegate) { external_delegate_ = delegate; autocomplete_history_manager_->SetExternalDelegate(delegate); }
void AutofillManager::SetExternalDelegate(AutofillExternalDelegate* delegate) { external_delegate_ = delegate; autocomplete_history_manager_->SetExternalDelegate(delegate); }
C
Chrome
0
CVE-2016-7539
https://www.cvedetails.com/cve/CVE-2016-7539/
CWE-399
https://github.com/ImageMagick/ImageMagick/commit/4e81ce8b07219c69a9aeccb0f7f7b927ca6db74c
4e81ce8b07219c69a9aeccb0f7f7b927ca6db74c
http://www.imagemagick.org/discourse-server/viewtopic.php?f=2&t=28946
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); }
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickSignature); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); }
C
ImageMagick
0
CVE-2015-1352
https://www.cvedetails.com/cve/CVE-2015-1352/
null
https://git.php.net/?p=php-src.git;a=commit;h=124fb22a13fafa3648e4e15b4f207c7096d8155e
124fb22a13fafa3648e4e15b4f207c7096d8155e
null
PHP_FUNCTION(pg_lo_truncate) { zval *pgsql_id = NULL; size_t size; pgLofp *pgsql; int argc = ZEND_NUM_ARGS(); int result; if (zend_parse_parameters(argc, "rl", &pgsql_id, &size) == FAILURE) { return; } ZEND_FETCH_RESOURCE(pgsql, pgLofp *, pgsql_id, -1, "PostgreSQL large object", le_lofp); #if HAVE_PG_LO64 if (PQserverVersion((PGconn *)pgsql->conn) >= 90300) { result = lo_truncate64((PGconn *)pgsql->conn, pgsql->lofd, size); } else { result = lo_truncate((PGconn *)pgsql->conn, pgsql->lofd, size); } #else result = lo_truncate((PGconn *)pgsql->conn, pgsql->lofd, size); #endif if (!result) { RETURN_TRUE; } else { RETURN_FALSE; } }
PHP_FUNCTION(pg_lo_truncate) { zval *pgsql_id = NULL; size_t size; pgLofp *pgsql; int argc = ZEND_NUM_ARGS(); int result; if (zend_parse_parameters(argc, "rl", &pgsql_id, &size) == FAILURE) { return; } ZEND_FETCH_RESOURCE(pgsql, pgLofp *, pgsql_id, -1, "PostgreSQL large object", le_lofp); #if HAVE_PG_LO64 if (PQserverVersion((PGconn *)pgsql->conn) >= 90300) { result = lo_truncate64((PGconn *)pgsql->conn, pgsql->lofd, size); } else { result = lo_truncate((PGconn *)pgsql->conn, pgsql->lofd, size); } #else result = lo_truncate((PGconn *)pgsql->conn, pgsql->lofd, size); #endif if (!result) { RETURN_TRUE; } else { RETURN_FALSE; } }
C
php
0
null
null
null
https://github.com/chromium/chromium/commit/a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
a44b00c88bc5ea35b5b150217c5fd6e4ce168e58
Apply behaviour change fix from upstream for previous XPath change. BUG=58731 TEST=NONE Review URL: http://codereview.chromium.org/4027006 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@63572 0039d316-1c4b-4281-b951-d872f2087c98
xmlXPathCacheObjectCopy(xmlXPathContextPtr ctxt, xmlXPathObjectPtr val) { if (val == NULL) return(NULL); if (XP_HAS_CACHE(ctxt)) { switch (val->type) { case XPATH_NODESET: return(xmlXPathCacheWrapNodeSet(ctxt, xmlXPathNodeSetMerge(NULL, val->nodesetval))); case XPATH_STRING: return(xmlXPathCacheNewString(ctxt, val->stringval)); case XPATH_BOOLEAN: return(xmlXPathCacheNewBoolean(ctxt, val->boolval)); case XPATH_NUMBER: return(xmlXPathCacheNewFloat(ctxt, val->floatval)); default: break; } } return(xmlXPathObjectCopy(val)); }
xmlXPathCacheObjectCopy(xmlXPathContextPtr ctxt, xmlXPathObjectPtr val) { if (val == NULL) return(NULL); if (XP_HAS_CACHE(ctxt)) { switch (val->type) { case XPATH_NODESET: return(xmlXPathCacheWrapNodeSet(ctxt, xmlXPathNodeSetMerge(NULL, val->nodesetval))); case XPATH_STRING: return(xmlXPathCacheNewString(ctxt, val->stringval)); case XPATH_BOOLEAN: return(xmlXPathCacheNewBoolean(ctxt, val->boolval)); case XPATH_NUMBER: return(xmlXPathCacheNewFloat(ctxt, val->floatval)); default: break; } } return(xmlXPathObjectCopy(val)); }
C
Chrome
0
CVE-2017-18218
https://www.cvedetails.com/cve/CVE-2017-18218/
CWE-416
https://github.com/torvalds/linux/commit/27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
27463ad99f738ed93c7c8b3e2e5bc8c4853a2ff2
net: hns: Fix a skb used after free bug skb maybe freed in hns_nic_net_xmit_hw() and return NETDEV_TX_OK, which cause hns_nic_net_xmit to use a freed skb. BUG: KASAN: use-after-free in hns_nic_net_xmit_hw+0x62c/0x940... [17659.112635] alloc_debug_processing+0x18c/0x1a0 [17659.117208] __slab_alloc+0x52c/0x560 [17659.120909] kmem_cache_alloc_node+0xac/0x2c0 [17659.125309] __alloc_skb+0x6c/0x260 [17659.128837] tcp_send_ack+0x8c/0x280 [17659.132449] __tcp_ack_snd_check+0x9c/0xf0 [17659.136587] tcp_rcv_established+0x5a4/0xa70 [17659.140899] tcp_v4_do_rcv+0x27c/0x620 [17659.144687] tcp_prequeue_process+0x108/0x170 [17659.149085] tcp_recvmsg+0x940/0x1020 [17659.152787] inet_recvmsg+0x124/0x180 [17659.156488] sock_recvmsg+0x64/0x80 [17659.160012] SyS_recvfrom+0xd8/0x180 [17659.163626] __sys_trace_return+0x0/0x4 [17659.167506] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=23 cpu=1 pid=13 [17659.174000] free_debug_processing+0x1d4/0x2c0 [17659.178486] __slab_free+0x240/0x390 [17659.182100] kmem_cache_free+0x24c/0x270 [17659.186062] kfree_skbmem+0xa0/0xb0 [17659.189587] __kfree_skb+0x28/0x40 [17659.193025] napi_gro_receive+0x168/0x1c0 [17659.197074] hns_nic_rx_up_pro+0x58/0x90 [17659.201038] hns_nic_rx_poll_one+0x518/0xbc0 [17659.205352] hns_nic_common_poll+0x94/0x140 [17659.209576] net_rx_action+0x458/0x5e0 [17659.213363] __do_softirq+0x1b8/0x480 [17659.217062] run_ksoftirqd+0x64/0x80 [17659.220679] smpboot_thread_fn+0x224/0x310 [17659.224821] kthread+0x150/0x170 [17659.228084] ret_from_fork+0x10/0x40 BUG: KASAN: use-after-free in hns_nic_net_xmit+0x8c/0xc0... [17751.080490] __slab_alloc+0x52c/0x560 [17751.084188] kmem_cache_alloc+0x244/0x280 [17751.088238] __build_skb+0x40/0x150 [17751.091764] build_skb+0x28/0x100 [17751.095115] __alloc_rx_skb+0x94/0x150 [17751.098900] __napi_alloc_skb+0x34/0x90 [17751.102776] hns_nic_rx_poll_one+0x180/0xbc0 [17751.107097] hns_nic_common_poll+0x94/0x140 [17751.111333] net_rx_action+0x458/0x5e0 [17751.115123] __do_softirq+0x1b8/0x480 [17751.118823] run_ksoftirqd+0x64/0x80 [17751.122437] smpboot_thread_fn+0x224/0x310 [17751.126575] kthread+0x150/0x170 [17751.129838] ret_from_fork+0x10/0x40 [17751.133454] INFO: Freed in kfree_skbmem+0xa0/0xb0 age=19 cpu=7 pid=43 [17751.139951] free_debug_processing+0x1d4/0x2c0 [17751.144436] __slab_free+0x240/0x390 [17751.148051] kmem_cache_free+0x24c/0x270 [17751.152014] kfree_skbmem+0xa0/0xb0 [17751.155543] __kfree_skb+0x28/0x40 [17751.159022] napi_gro_receive+0x168/0x1c0 [17751.163074] hns_nic_rx_up_pro+0x58/0x90 [17751.167041] hns_nic_rx_poll_one+0x518/0xbc0 [17751.171358] hns_nic_common_poll+0x94/0x140 [17751.175585] net_rx_action+0x458/0x5e0 [17751.179373] __do_softirq+0x1b8/0x480 [17751.183076] run_ksoftirqd+0x64/0x80 [17751.186691] smpboot_thread_fn+0x224/0x310 [17751.190826] kthread+0x150/0x170 [17751.194093] ret_from_fork+0x10/0x40 Fixes: 13ac695e7ea1 ("net:hns: Add support of Hip06 SoC to the Hislicon Network Subsystem") Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: lipeng <lipeng321@huawei.com> Reported-by: Jun He <hjat2005@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static int hns_desc_unused(struct hnae_ring *ring) { int ntc = ring->next_to_clean; int ntu = ring->next_to_use; return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; }
static int hns_desc_unused(struct hnae_ring *ring) { int ntc = ring->next_to_clean; int ntu = ring->next_to_use; return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; }
C
linux
0
CVE-2017-8284
https://www.cvedetails.com/cve/CVE-2017-8284/
CWE-94
https://github.com/qemu/qemu/commit/30663fd26c0307e414622c7a8607fbc04f92ec14
30663fd26c0307e414622c7a8607fbc04f92ec14
tcg/i386: Check the size of instruction being translated This fixes the bug: 'user-to-root privesc inside VM via bad translation caching' reported by Jann Horn here: https://bugs.chromium.org/p/project-zero/issues/detail?id=1122 Reviewed-by: Richard Henderson <rth@twiddle.net> CC: Peter Maydell <peter.maydell@linaro.org> CC: Paolo Bonzini <pbonzini@redhat.com> Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Pranith Kumar <bobby.prani@gmail.com> Message-Id: <20170323175851.14342-1-bobby.prani@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
static void gen_movl_seg_T0(DisasContext *s, int seg_reg) { if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware interrupts for the next instruction */ if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) s->is_jmp = DISAS_TB_JUMP; } else { gen_op_movl_seg_T0_vm(seg_reg); if (seg_reg == R_SS) s->is_jmp = DISAS_TB_JUMP; } }
static void gen_movl_seg_T0(DisasContext *s, int seg_reg) { if (s->pe && !s->vm86) { tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0); gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware interrupts for the next instruction */ if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) s->is_jmp = DISAS_TB_JUMP; } else { gen_op_movl_seg_T0_vm(seg_reg); if (seg_reg == R_SS) s->is_jmp = DISAS_TB_JUMP; } }
C
qemu
0
CVE-2016-4997
https://www.cvedetails.com/cve/CVE-2016-4997/
CWE-264
https://github.com/torvalds/linux/commit/ce683e5f9d045e5d67d1312a42b359cb2ab2a13c
ce683e5f9d045e5d67d1312a42b359cb2ab2a13c
netfilter: x_tables: check for bogus target offset We're currently asserting that targetoff + targetsize <= nextoff. Extend it to also check that targetoff is >= sizeof(xt_entry). Since this is generic code, add an argument pointing to the start of the match/target, we can then derive the base structure size from the delta. We also need the e->elems pointer in a followup change to validate matches. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
void xt_compat_lock(u_int8_t af) { mutex_lock(&xt[af].compat_mutex); }
void xt_compat_lock(u_int8_t af) { mutex_lock(&xt[af].compat_mutex); }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/f2f703241635fa96fa630b83afcc9a330cc21b7e
f2f703241635fa96fa630b83afcc9a330cc21b7e
CrOS Shelf: Get rid of 'split view' mode for shelf background In the new UI, "maximized" and "split view" are treated the same in specs, so there is no more need for a separate "split view" mode. This folds it into the "maximized" mode. Note that the only thing that _seems_ different in shelf_background_animator is ShelfBackgroundAnimator::kMaxAlpha (255) vs kShelfTranslucentMaximizedWindow (254), which should be virtually impossible to distinguish. This CL therefore does not have any visual effect (and doesn't directly fix the linked bug, but is relevant). Bug: 899289 Change-Id: I60947338176ac15ca016b1ba4edf13d16362cb24 Reviewed-on: https://chromium-review.googlesource.com/c/1469741 Commit-Queue: Xiyuan Xia <xiyuan@chromium.org> Reviewed-by: Xiyuan Xia <xiyuan@chromium.org> Auto-Submit: Manu Cornet <manucornet@chromium.org> Cr-Commit-Position: refs/heads/master@{#631752}
void SetState(ShelfLayoutManager* layout_manager, ShelfVisibilityState state) { layout_manager->SetState(state); }
void SetState(ShelfLayoutManager* layout_manager, ShelfVisibilityState state) { layout_manager->SetState(state); }
C
Chrome
0
CVE-2013-1790
https://www.cvedetails.com/cve/CVE-2013-1790/
CWE-119
https://cgit.freedesktop.org/poppler/poppler/commit/?h=poppler-0.22&id=b1026b5978c385328f2a15a2185c599a563edf91
b1026b5978c385328f2a15a2185c599a563edf91
null
void CachedFileStream::moveStart(int delta) { start += delta; bufPtr = bufEnd = buf; bufPos = start; }
void CachedFileStream::moveStart(int delta) { start += delta; bufPtr = bufEnd = buf; bufPos = start; }
CPP
poppler
0
CVE-2015-6763
https://www.cvedetails.com/cve/CVE-2015-6763/
null
https://github.com/chromium/chromium/commit/f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
f1574f25e1402e748bf2bd7e28ce3dd96ceb1ca4
MacViews: Enable secure text input for password Textfields. In Cocoa the NSTextInputContext automatically enables secure text input when activated and it's in the secure text entry mode. RenderWidgetHostViewMac did the similar thing for ages following the WebKit example. views::Textfield needs to do the same thing in a fashion that's sycnrhonized with RenderWidgetHostViewMac, otherwise the race conditions are possible when the Textfield gets focus, activates the secure text input mode and the RWHVM loses focus immediately afterwards and disables the secure text input instead of leaving it in the enabled state. BUG=818133,677220 Change-Id: I6db6c4b59e4a1a72cbb7f8c7056f71b04a3df08b Reviewed-on: https://chromium-review.googlesource.com/943064 Commit-Queue: Michail Pishchagin <mblsha@yandex-team.ru> Reviewed-by: Pavel Feldman <pfeldman@chromium.org> Reviewed-by: Avi Drissman <avi@chromium.org> Reviewed-by: Peter Kasting <pkasting@chromium.org> Cr-Commit-Position: refs/heads/master@{#542517}
void Textfield::SetTextInputType(ui::TextInputType type) { GetRenderText()->SetObscured(type == ui::TEXT_INPUT_TYPE_PASSWORD); text_input_type_ = type; OnCaretBoundsChanged(); if (GetInputMethod()) GetInputMethod()->OnTextInputTypeChanged(this); SchedulePaint(); }
void Textfield::SetTextInputType(ui::TextInputType type) { GetRenderText()->SetObscured(type == ui::TEXT_INPUT_TYPE_PASSWORD); text_input_type_ = type; OnCaretBoundsChanged(); if (GetInputMethod()) GetInputMethod()->OnTextInputTypeChanged(this); SchedulePaint(); }
C
Chrome
0
CVE-2014-3191
https://www.cvedetails.com/cve/CVE-2014-3191/
CWE-416
https://github.com/chromium/chromium/commit/11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
11a4cc4a6d6e665d9a118fada4b7c658d6f70d95
Defer call to updateWidgetPositions() outside of RenderLayerScrollableArea. updateWidgetPositions() can destroy the render tree, so it should never be called from inside RenderLayerScrollableArea. Leaving it there allows for the potential of use-after-free bugs. BUG=402407 R=vollick@chromium.org Review URL: https://codereview.chromium.org/490473003 git-svn-id: svn://svn.chromium.org/blink/trunk@180681 bbb929c8-8fbe-4397-9dbb-9b2b20218538
void RenderLayerScrollableArea::updateScrollCornerStyle() { if (!m_scrollCorner && !hasScrollbar()) return; if (!m_scrollCorner && hasOverlayScrollbars()) return; RenderObject* actualRenderer = rendererForScrollbar(box()); RefPtr<RenderStyle> corner = box().hasOverflowClip() ? actualRenderer->getUncachedPseudoStyle(PseudoStyleRequest(SCROLLBAR_CORNER), actualRenderer->style()) : PassRefPtr<RenderStyle>(nullptr); if (corner) { if (!m_scrollCorner) { m_scrollCorner = RenderScrollbarPart::createAnonymous(&box().document()); m_scrollCorner->setParent(&box()); } m_scrollCorner->setStyle(corner.release()); } else if (m_scrollCorner) { m_scrollCorner->destroy(); m_scrollCorner = nullptr; } }
void RenderLayerScrollableArea::updateScrollCornerStyle() { if (!m_scrollCorner && !hasScrollbar()) return; if (!m_scrollCorner && hasOverlayScrollbars()) return; RenderObject* actualRenderer = rendererForScrollbar(box()); RefPtr<RenderStyle> corner = box().hasOverflowClip() ? actualRenderer->getUncachedPseudoStyle(PseudoStyleRequest(SCROLLBAR_CORNER), actualRenderer->style()) : PassRefPtr<RenderStyle>(nullptr); if (corner) { if (!m_scrollCorner) { m_scrollCorner = RenderScrollbarPart::createAnonymous(&box().document()); m_scrollCorner->setParent(&box()); } m_scrollCorner->setStyle(corner.release()); } else if (m_scrollCorner) { m_scrollCorner->destroy(); m_scrollCorner = nullptr; } }
C
Chrome
0
CVE-2016-9084
https://www.cvedetails.com/cve/CVE-2016-9084/
CWE-190
https://github.com/torvalds/linux/commit/05692d7005a364add85c6e25a6c4447ce08f913a
05692d7005a364add85c6e25a6c4447ce08f913a
vfio/pci: Fix integer overflows, bitmask check The VFIO_DEVICE_SET_IRQS ioctl did not sufficiently sanitize user-supplied integers, potentially allowing memory corruption. This patch adds appropriate integer overflow checks, checks the range bounds for VFIO_IRQ_SET_DATA_NONE, and also verifies that only single element in the VFIO_IRQ_SET_DATA_TYPE_MASK bitmask is set. VFIO_IRQ_SET_ACTION_TYPE_MASK is already correctly checked later in vfio_pci_set_irqs_ioctl(). Furthermore, a kzalloc is changed to a kcalloc because the use of a kzalloc with an integer multiplication allowed an integer overflow condition to be reached without this patch. kcalloc checks for overflow and should prevent a similar occurrence. Signed-off-by: Vlad Tsyrklevich <vlad@tsyrklevich.net> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, int vector, int fd, bool msix) { struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; irq = pci_irq_vector(pdev, vector); if (vdev->ctx[vector].trigger) { free_irq(irq, vdev->ctx[vector].trigger); irq_bypass_unregister_producer(&vdev->ctx[vector].producer); kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; } if (fd < 0) return 0; vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)", msix ? "x" : "", vector, pci_name(pdev)); if (!vdev->ctx[vector].name) return -ENOMEM; trigger = eventfd_ctx_fdget(fd); if (IS_ERR(trigger)) { kfree(vdev->ctx[vector].name); return PTR_ERR(trigger); } /* * The MSIx vector table resides in device memory which may be cleared * via backdoor resets. We don't allow direct access to the vector * table so even if a userspace driver attempts to save/restore around * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ if (msix) { struct msi_msg msg; get_cached_msi_msg(irq, &msg); pci_write_msi_msg(irq, &msg); } ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); return ret; } vdev->ctx[vector].producer.token = trigger; vdev->ctx[vector].producer.irq = irq; ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); if (unlikely(ret)) dev_info(&pdev->dev, "irq bypass producer (token %p) registration fails: %d\n", vdev->ctx[vector].producer.token, ret); vdev->ctx[vector].trigger = trigger; return 0; }
static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, int vector, int fd, bool msix) { struct pci_dev *pdev = vdev->pdev; struct eventfd_ctx *trigger; int irq, ret; if (vector < 0 || vector >= vdev->num_ctx) return -EINVAL; irq = pci_irq_vector(pdev, vector); if (vdev->ctx[vector].trigger) { free_irq(irq, vdev->ctx[vector].trigger); irq_bypass_unregister_producer(&vdev->ctx[vector].producer); kfree(vdev->ctx[vector].name); eventfd_ctx_put(vdev->ctx[vector].trigger); vdev->ctx[vector].trigger = NULL; } if (fd < 0) return 0; vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)", msix ? "x" : "", vector, pci_name(pdev)); if (!vdev->ctx[vector].name) return -ENOMEM; trigger = eventfd_ctx_fdget(fd); if (IS_ERR(trigger)) { kfree(vdev->ctx[vector].name); return PTR_ERR(trigger); } /* * The MSIx vector table resides in device memory which may be cleared * via backdoor resets. We don't allow direct access to the vector * table so even if a userspace driver attempts to save/restore around * such a reset it would be unsuccessful. To avoid this, restore the * cached value of the message prior to enabling. */ if (msix) { struct msi_msg msg; get_cached_msi_msg(irq, &msg); pci_write_msi_msg(irq, &msg); } ret = request_irq(irq, vfio_msihandler, 0, vdev->ctx[vector].name, trigger); if (ret) { kfree(vdev->ctx[vector].name); eventfd_ctx_put(trigger); return ret; } vdev->ctx[vector].producer.token = trigger; vdev->ctx[vector].producer.irq = irq; ret = irq_bypass_register_producer(&vdev->ctx[vector].producer); if (unlikely(ret)) dev_info(&pdev->dev, "irq bypass producer (token %p) registration fails: %d\n", vdev->ctx[vector].producer.token, ret); vdev->ctx[vector].trigger = trigger; return 0; }
C
linux
0
CVE-2015-2301
https://www.cvedetails.com/cve/CVE-2015-2301/
null
https://git.php.net/?p=php-src.git;a=commit;h=b2cf3f064b8f5efef89bb084521b61318c71781b
b2cf3f064b8f5efef89bb084521b61318c71781b
null
PHP_METHOD(Phar, isValidPharFilename) { char *fname; const char *ext_str; int fname_len, ext_len, is_executable; zend_bool executable = 1; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|b", &fname, &fname_len, &executable) == FAILURE) { return; } is_executable = executable; RETVAL_BOOL(phar_detect_phar_fname_ext(fname, fname_len, &ext_str, &ext_len, is_executable, 2, 1 TSRMLS_CC) == SUCCESS); }
PHP_METHOD(Phar, isValidPharFilename) { char *fname; const char *ext_str; int fname_len, ext_len, is_executable; zend_bool executable = 1; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|b", &fname, &fname_len, &executable) == FAILURE) { return; } is_executable = executable; RETVAL_BOOL(phar_detect_phar_fname_ext(fname, fname_len, &ext_str, &ext_len, is_executable, 2, 1 TSRMLS_CC) == SUCCESS); }
C
php
0
CVE-2016-7134
https://www.cvedetails.com/cve/CVE-2016-7134/
CWE-119
https://github.com/php/php-src/commit/72dbb7f416160f490c4e9987040989a10ad431c7?w=1
72dbb7f416160f490c4e9987040989a10ad431c7?w=1
Fix bug #72674 - check both curl_escape and curl_unescape
static void create_certinfo(struct curl_certinfo *ci, zval *listcode) { int i; if (ci) { zval certhash; for (i=0; i<ci->num_of_certs; i++) { struct curl_slist *slist; array_init(&certhash); for (slist = ci->certinfo[i]; slist; slist = slist->next) { int len; char s[64]; char *tmp; strncpy(s, slist->data, 64); tmp = memchr(s, ':', 64); if(tmp) { *tmp = '\0'; len = strlen(s); add_assoc_string(&certhash, s, &slist->data[len+1]); } else { php_error_docref(NULL, E_WARNING, "Could not extract hash key from certificate info"); } } add_next_index_zval(listcode, &certhash); } } }
static void create_certinfo(struct curl_certinfo *ci, zval *listcode) { int i; if (ci) { zval certhash; for (i=0; i<ci->num_of_certs; i++) { struct curl_slist *slist; array_init(&certhash); for (slist = ci->certinfo[i]; slist; slist = slist->next) { int len; char s[64]; char *tmp; strncpy(s, slist->data, 64); tmp = memchr(s, ':', 64); if(tmp) { *tmp = '\0'; len = strlen(s); add_assoc_string(&certhash, s, &slist->data[len+1]); } else { php_error_docref(NULL, E_WARNING, "Could not extract hash key from certificate info"); } } add_next_index_zval(listcode, &certhash); } } }
C
php-src
0
CVE-2018-16077
https://www.cvedetails.com/cve/CVE-2018-16077/
CWE-285
https://github.com/chromium/chromium/commit/90f878780cce9c4b0475fcea14d91b8f510cce11
90f878780cce9c4b0475fcea14d91b8f510cce11
Prevent sandboxed documents from reusing the default window Bug: 377995 Change-Id: Iff66c6d214dfd0cb7ea9c80f83afeedfff703541 Reviewed-on: https://chromium-review.googlesource.com/983558 Commit-Queue: Andy Paicu <andypaicu@chromium.org> Reviewed-by: Daniel Cheng <dcheng@chromium.org> Cr-Commit-Position: refs/heads/master@{#567663}
void DocumentLoader::LoadFailed(const ResourceError& error) { if (!error.IsCancellation() && frame_->Owner()) frame_->Owner()->RenderFallbackContent(); fetcher_->ClearResourcesFromPreviousFetcher(); WebHistoryCommitType history_commit_type = LoadTypeToCommitType(load_type_); switch (state_) { case kNotStarted: probe::frameClearedScheduledClientNavigation(frame_); FALLTHROUGH; case kProvisional: state_ = kSentDidFinishLoad; GetLocalFrameClient().DispatchDidFailProvisionalLoad(error, history_commit_type); if (frame_) GetFrameLoader().DetachProvisionalDocumentLoader(this); break; case kCommitted: if (frame_->GetDocument()->Parser()) frame_->GetDocument()->Parser()->StopParsing(); state_ = kSentDidFinishLoad; GetLocalFrameClient().DispatchDidFailLoad(error, history_commit_type); GetFrameLoader().DidFinishNavigation(); break; case kSentDidFinishLoad: NOTREACHED(); break; } DCHECK_EQ(kSentDidFinishLoad, state_); }
void DocumentLoader::LoadFailed(const ResourceError& error) { if (!error.IsCancellation() && frame_->Owner()) frame_->Owner()->RenderFallbackContent(); fetcher_->ClearResourcesFromPreviousFetcher(); WebHistoryCommitType history_commit_type = LoadTypeToCommitType(load_type_); switch (state_) { case kNotStarted: probe::frameClearedScheduledClientNavigation(frame_); FALLTHROUGH; case kProvisional: state_ = kSentDidFinishLoad; GetLocalFrameClient().DispatchDidFailProvisionalLoad(error, history_commit_type); if (frame_) GetFrameLoader().DetachProvisionalDocumentLoader(this); break; case kCommitted: if (frame_->GetDocument()->Parser()) frame_->GetDocument()->Parser()->StopParsing(); state_ = kSentDidFinishLoad; GetLocalFrameClient().DispatchDidFailLoad(error, history_commit_type); GetFrameLoader().DidFinishNavigation(); break; case kSentDidFinishLoad: NOTREACHED(); break; } DCHECK_EQ(kSentDidFinishLoad, state_); }
C
Chrome
0
CVE-2015-3193
https://www.cvedetails.com/cve/CVE-2015-3193/
CWE-200
https://git.openssl.org/?p=openssl.git;a=commit;h=d73cc256c8e256c32ed959456101b73ba9842f72
d73cc256c8e256c32ed959456101b73ba9842f72
null
int test_mod_exp(BIO *bp, BN_CTX *ctx) { BIGNUM *a, *b, *c, *d, *e; int i; a = BN_new(); b = BN_new(); c = BN_new(); d = BN_new(); e = BN_new(); BN_one(a); BN_one(b); BN_zero(c); if (BN_mod_exp(d, a, b, c, ctx)) { fprintf(stderr, "BN_mod_exp with zero modulus succeeded!\n"); return 0; } BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */ for (i = 0; i < num2; i++) { BN_bntest_rand(a, 20 + i * 5, 0, 0); BN_bntest_rand(b, 2 + i, 0, 0); if (!BN_mod_exp(d, a, b, c, ctx)) return (0); if (bp != NULL) { if (!results) { BN_print(bp, a); BIO_puts(bp, " ^ "); BN_print(bp, b); BIO_puts(bp, " % "); BN_print(bp, c); BIO_puts(bp, " - "); } BN_print(bp, d); BIO_puts(bp, "\n"); } BN_exp(e, a, b, ctx); BN_sub(e, e, d); BN_div(a, b, e, c, ctx); if (!BN_is_zero(b)) { fprintf(stderr, "Modulo exponentiation test failed!\n"); return 0; } } /* Regression test for carry propagation bug in sqr8x_reduction */ BN_hex2bn(&a, "050505050505"); BN_hex2bn(&b, "02"); BN_hex2bn(&c, "4141414141414141414141274141414141414141414141414141414141414141" "4141414141414141414141414141414141414141414141414141414141414141" "4141414141414141414141800000000000000000000000000000000000000000" "0000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000000000000000000000000000000000" "0000000000000000000000000000000000000000000000000000000001"); BN_mod_exp(d, a, b, c, ctx); BN_mul(e, a, a, ctx); if (BN_cmp(d, e)) { fprintf(stderr, "BN_mod_exp and BN_mul produce different results!\n"); return 0; } BN_free(a); BN_free(b); BN_free(c); BN_zero(c); if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) { fprintf(stderr, "BN_mod_exp_mont_consttime with zero modulus " "succeeded\n"); return 0; } BN_set_word(c, 16); if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) { fprintf(stderr, "BN_mod_exp_mont_consttime with even modulus " "succeeded\n"); return 0; } BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */ for (i = 0; i < num2; i++) { BN_bntest_rand(a, 20 + i * 5, 0, 0); BN_bntest_rand(b, 2 + i, 0, 0); if (!BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) return (00); if (bp != NULL) { if (!results) { BN_print(bp, a); BIO_puts(bp, " ^ "); BN_print(bp, b); BIO_puts(bp, " % "); BN_print(bp, c); BIO_puts(bp, " - "); } BN_print(bp, d); BIO_puts(bp, "\n"); } BN_exp(e, a, b, ctx); BN_sub(e, e, d); BN_div(a, b, e, c, ctx); if (!BN_is_zero(b)) { fprintf(stderr, "Modulo exponentiation test failed!\n"); return 0; } } BN_free(a); BN_free(b); BN_free(c); BN_free(d); BN_free(e); return (1); }
int test_mod_exp(BIO *bp, BN_CTX *ctx) { BIGNUM *a, *b, *c, *d, *e; int i; a = BN_new(); b = BN_new(); c = BN_new(); d = BN_new(); e = BN_new(); BN_one(a); BN_one(b); BN_zero(c); if (BN_mod_exp(d, a, b, c, ctx)) { fprintf(stderr, "BN_mod_exp with zero modulus succeeded!\n"); return 0; } BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */ for (i = 0; i < num2; i++) { BN_bntest_rand(a, 20 + i * 5, 0, 0); BN_bntest_rand(b, 2 + i, 0, 0); if (!BN_mod_exp(d, a, b, c, ctx)) return (0); if (bp != NULL) { if (!results) { BN_print(bp, a); BIO_puts(bp, " ^ "); BN_print(bp, b); BIO_puts(bp, " % "); BN_print(bp, c); BIO_puts(bp, " - "); } BN_print(bp, d); BIO_puts(bp, "\n"); } BN_exp(e, a, b, ctx); BN_sub(e, e, d); BN_div(a, b, e, c, ctx); if (!BN_is_zero(b)) { fprintf(stderr, "Modulo exponentiation test failed!\n"); return 0; } } BN_free(a); BN_free(b); BN_free(c); BN_zero(c); if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) { fprintf(stderr, "BN_mod_exp_mont_consttime with zero modulus " "succeeded\n"); return 0; } BN_set_word(c, 16); if (BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) { fprintf(stderr, "BN_mod_exp_mont_consttime with even modulus " "succeeded\n"); return 0; } BN_bntest_rand(c, 30, 0, 1); /* must be odd for montgomery */ for (i = 0; i < num2; i++) { BN_bntest_rand(a, 20 + i * 5, 0, 0); BN_bntest_rand(b, 2 + i, 0, 0); if (!BN_mod_exp_mont_consttime(d, a, b, c, ctx, NULL)) return (00); if (bp != NULL) { if (!results) { BN_print(bp, a); BIO_puts(bp, " ^ "); BN_print(bp, b); BIO_puts(bp, " % "); BN_print(bp, c); BIO_puts(bp, " - "); } BN_print(bp, d); BIO_puts(bp, "\n"); } BN_exp(e, a, b, ctx); BN_sub(e, e, d); BN_div(a, b, e, c, ctx); if (!BN_is_zero(b)) { fprintf(stderr, "Modulo exponentiation test failed!\n"); return 0; } } BN_free(a); BN_free(b); BN_free(c); BN_free(d); BN_free(e); return (1); }
C
openssl
1
CVE-2012-5148
https://www.cvedetails.com/cve/CVE-2012-5148/
CWE-20
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
e89cfcb9090e8c98129ae9160c513f504db74599
Remove TabContents from TabStripModelObserver::TabDetachedAt. BUG=107201 TEST=no visible change Review URL: https://chromiumcodereview.appspot.com/11293205 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
bool TabStripModel::OpenerMatches(const WebContentsData* data, const WebContents* opener, bool use_group) { return data->opener == opener || (use_group && data->group == opener); }
bool TabStripModel::OpenerMatches(const WebContentsData* data, const WebContents* opener, bool use_group) { return data->opener == opener || (use_group && data->group == opener); }
C
Chrome
0
CVE-2009-4411
https://www.cvedetails.com/cve/CVE-2009-4411/
CWE-264
https://git.savannah.gnu.org/cgit/acl.git/commit/?id=63451a0
63451a06b7484d220750ed8574d3ee84e156daf5
null
int main(int argc, char *argv[]) { int opt; char *line; progname = basename(argv[0]); #if POSIXLY_CORRECT cmd_line_options = POSIXLY_CMD_LINE_OPTIONS; #else if (getenv(POSIXLY_CORRECT_STR)) posixly_correct = 1; if (!posixly_correct) cmd_line_options = CMD_LINE_OPTIONS; else cmd_line_options = POSIXLY_CMD_LINE_OPTIONS; #endif setlocale(LC_CTYPE, ""); setlocale(LC_MESSAGES, ""); bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); /* Align `#effective:' comments to column 40 for tty's */ if (!posixly_correct && isatty(fileno(stdout))) print_options |= TEXT_SMART_INDENT; while ((opt = getopt_long(argc, argv, cmd_line_options, long_options, NULL)) != -1) { switch (opt) { case 'a': /* acl only */ if (posixly_correct) goto synopsis; opt_print_acl = 1; break; case 'd': /* default acl only */ opt_print_default_acl = 1; break; case 'c': /* no comments */ if (posixly_correct) goto synopsis; opt_comments = 0; break; case 'e': /* all #effective comments */ if (posixly_correct) goto synopsis; print_options |= TEXT_ALL_EFFECTIVE; break; case 'E': /* no #effective comments */ if (posixly_correct) goto synopsis; print_options &= ~(TEXT_SOME_EFFECTIVE | TEXT_ALL_EFFECTIVE); break; case 'R': /* recursive */ if (posixly_correct) goto synopsis; walk_flags |= WALK_TREE_RECURSIVE; break; case 'L': /* follow all symlinks */ if (posixly_correct) goto synopsis; walk_flags |= WALK_TREE_LOGICAL; walk_flags &= ~WALK_TREE_PHYSICAL; break; case 'P': /* skip all symlinks */ if (posixly_correct) goto synopsis; walk_flags |= WALK_TREE_PHYSICAL; walk_flags &= ~WALK_TREE_LOGICAL; break; case 's': /* skip files with only base entries */ if (posixly_correct) goto synopsis; opt_skip_base = 1; break; case 'p': if (posixly_correct) goto synopsis; opt_strip_leading_slash = 0; break; case 't': if (posixly_correct) goto synopsis; opt_tabular = 1; break; case 'n': /* numeric */ opt_numeric = 1; print_options |= TEXT_NUMERIC_IDS; break; case 'v': /* print version */ printf("%s " VERSION "\n", progname); return 0; case 'h': /* help */ help(); return 0; case ':': /* option missing */ case '?': /* unknown option */ default: goto synopsis; } } if (!(opt_print_acl || opt_print_default_acl)) { opt_print_acl = 1; if (!posixly_correct) opt_print_default_acl = 1; } if ((optind == argc) && !posixly_correct) goto synopsis; do { if (optind == argc || strcmp(argv[optind], "-") == 0) { while ((line = next_line(stdin)) != NULL) { if (*line == '\0') continue; had_errors += walk_tree(line, walk_flags, 0, do_print, NULL); } if (!feof(stdin)) { fprintf(stderr, _("%s: Standard input: %s\n"), progname, strerror(errno)); had_errors++; } } else had_errors += walk_tree(argv[optind], walk_flags, 0, do_print, NULL); optind++; } while (optind < argc); return had_errors ? 1 : 0; synopsis: fprintf(stderr, _("Usage: %s [-%s] file ...\n"), progname, cmd_line_options); fprintf(stderr, _("Try `%s --help' for more information.\n"), progname); return 2; }
int main(int argc, char *argv[]) { int opt; char *line; progname = basename(argv[0]); #if POSIXLY_CORRECT cmd_line_options = POSIXLY_CMD_LINE_OPTIONS; #else if (getenv(POSIXLY_CORRECT_STR)) posixly_correct = 1; if (!posixly_correct) cmd_line_options = CMD_LINE_OPTIONS; else cmd_line_options = POSIXLY_CMD_LINE_OPTIONS; #endif setlocale(LC_CTYPE, ""); setlocale(LC_MESSAGES, ""); bindtextdomain(PACKAGE, LOCALEDIR); textdomain(PACKAGE); /* Align `#effective:' comments to column 40 for tty's */ if (!posixly_correct && isatty(fileno(stdout))) print_options |= TEXT_SMART_INDENT; while ((opt = getopt_long(argc, argv, cmd_line_options, long_options, NULL)) != -1) { switch (opt) { case 'a': /* acl only */ if (posixly_correct) goto synopsis; opt_print_acl = 1; break; case 'd': /* default acl only */ opt_print_default_acl = 1; break; case 'c': /* no comments */ if (posixly_correct) goto synopsis; opt_comments = 0; break; case 'e': /* all #effective comments */ if (posixly_correct) goto synopsis; print_options |= TEXT_ALL_EFFECTIVE; break; case 'E': /* no #effective comments */ if (posixly_correct) goto synopsis; print_options &= ~(TEXT_SOME_EFFECTIVE | TEXT_ALL_EFFECTIVE); break; case 'R': /* recursive */ if (posixly_correct) goto synopsis; walk_flags |= WALK_TREE_RECURSIVE; break; case 'L': /* follow all symlinks */ if (posixly_correct) goto synopsis; walk_flags |= WALK_TREE_LOGICAL; walk_flags &= ~WALK_TREE_PHYSICAL; break; case 'P': /* skip all symlinks */ if (posixly_correct) goto synopsis; walk_flags |= WALK_TREE_PHYSICAL; walk_flags &= ~WALK_TREE_LOGICAL; break; case 's': /* skip files with only base entries */ if (posixly_correct) goto synopsis; opt_skip_base = 1; break; case 'p': if (posixly_correct) goto synopsis; opt_strip_leading_slash = 0; break; case 't': if (posixly_correct) goto synopsis; opt_tabular = 1; break; case 'n': /* numeric */ opt_numeric = 1; print_options |= TEXT_NUMERIC_IDS; break; case 'v': /* print version */ printf("%s " VERSION "\n", progname); return 0; case 'h': /* help */ help(); return 0; case ':': /* option missing */ case '?': /* unknown option */ default: goto synopsis; } } if (!(opt_print_acl || opt_print_default_acl)) { opt_print_acl = 1; if (!posixly_correct) opt_print_default_acl = 1; } if ((optind == argc) && !posixly_correct) goto synopsis; do { if (optind == argc || strcmp(argv[optind], "-") == 0) { while ((line = next_line(stdin)) != NULL) { if (*line == '\0') continue; had_errors += walk_tree(line, walk_flags, 0, do_print, NULL); } if (!feof(stdin)) { fprintf(stderr, _("%s: Standard input: %s\n"), progname, strerror(errno)); had_errors++; } } else had_errors += walk_tree(argv[optind], walk_flags, 0, do_print, NULL); optind++; } while (optind < argc); return had_errors ? 1 : 0; synopsis: fprintf(stderr, _("Usage: %s [-%s] file ...\n"), progname, cmd_line_options); fprintf(stderr, _("Try `%s --help' for more information.\n"), progname); return 2; }
C
savannah
0
CVE-2011-1019
https://www.cvedetails.com/cve/CVE-2011-1019/
CWE-264
https://github.com/torvalds/linux/commit/8909c9ad8ff03611c9c96c9a92656213e4bb495b
8909c9ad8ff03611c9c96c9a92656213e4bb495b
net: don't allow CAP_NET_ADMIN to load non-netdev kernel modules Since a8f80e8ff94ecba629542d9b4b5f5a8ee3eb565c any process with CAP_NET_ADMIN may load any module from /lib/modules/. This doesn't mean that CAP_NET_ADMIN is a superset of CAP_SYS_MODULE as modules are limited to /lib/modules/**. However, CAP_NET_ADMIN capability shouldn't allow anybody load any module not related to networking. This patch restricts an ability of autoloading modules to netdev modules with explicit aliases. This fixes CVE-2011-1019. Arnd Bergmann suggested to leave untouched the old pre-v2.6.32 behavior of loading netdev modules by name (without any prefix) for processes with CAP_SYS_MODULE to maintain the compatibility with network scripts that use autoloading netdev modules by aliases like "eth0", "wlan0". Currently there are only three users of the feature in the upstream kernel: ipip, ip_gre and sit. root@albatros:~# capsh --drop=$(seq -s, 0 11),$(seq -s, 13 34) -- root@albatros:~# grep Cap /proc/$$/status CapInh: 0000000000000000 CapPrm: fffffff800001000 CapEff: fffffff800001000 CapBnd: fffffff800001000 root@albatros:~# modprobe xfs FATAL: Error inserting xfs (/lib/modules/2.6.38-rc6-00001-g2bf4ca3/kernel/fs/xfs/xfs.ko): Operation not permitted root@albatros:~# lsmod | grep xfs root@albatros:~# ifconfig xfs xfs: error fetching interface information: Device not found root@albatros:~# lsmod | grep xfs root@albatros:~# lsmod | grep sit root@albatros:~# ifconfig sit sit: error fetching interface information: Device not found root@albatros:~# lsmod | grep sit root@albatros:~# ifconfig sit0 sit0 Link encap:IPv6-in-IPv4 NOARP MTU:1480 Metric:1 root@albatros:~# lsmod | grep sit sit 10457 0 tunnel4 2957 1 sit For CAP_SYS_MODULE module loading is still relaxed: root@albatros:~# grep Cap /proc/$$/status CapInh: 0000000000000000 CapPrm: ffffffffffffffff CapEff: ffffffffffffffff CapBnd: ffffffffffffffff root@albatros:~# ifconfig xfs xfs: error fetching interface information: Device not found root@albatros:~# lsmod | grep xfs xfs 745319 0 Reference: https://lkml.org/lkml/2011/2/24/203 Signed-off-by: Vasiliy Kulikov <segoon@openwall.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Kees Cook <kees.cook@canonical.com> Signed-off-by: James Morris <jmorris@namei.org>
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { if (net_ratelimit()) { pr_warning("%s selects TX queue %d, but " "real number of TX queues is %d\n", dev->name, queue_index, dev->real_num_tx_queues); } return 0; } return queue_index; }
static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { if (net_ratelimit()) { pr_warning("%s selects TX queue %d, but " "real number of TX queues is %d\n", dev->name, queue_index, dev->real_num_tx_queues); } return 0; } return queue_index; }
C
linux
0
CVE-2019-5822
https://www.cvedetails.com/cve/CVE-2019-5822/
CWE-284
https://github.com/chromium/chromium/commit/2f81d000fdb5331121cba7ff81dfaaec25b520a5
2f81d000fdb5331121cba7ff81dfaaec25b520a5
When turning a download into a navigation, navigate the right frame Code changes from Nate Chapin <japhet@chromium.org> Bug: 926105 Change-Id: I098599394e6ebe7d2fce5af838014297a337d294 Reviewed-on: https://chromium-review.googlesource.com/c/1454962 Reviewed-by: Camille Lamy <clamy@chromium.org> Commit-Queue: Jochen Eisinger <jochen@chromium.org> Cr-Commit-Position: refs/heads/master@{#629547}
void ResourceDispatcherHostImpl::AckUpdateLoadInfo() { DCHECK(waiting_on_load_state_ack_); waiting_on_load_state_ack_ = false; MaybeStartUpdateLoadInfoTimer(); }
void ResourceDispatcherHostImpl::AckUpdateLoadInfo() { DCHECK(waiting_on_load_state_ack_); waiting_on_load_state_ack_ = false; MaybeStartUpdateLoadInfoTimer(); }
C
Chrome
0
CVE-2013-3237
https://www.cvedetails.com/cve/CVE-2013-3237/
CWE-200
https://github.com/torvalds/linux/commit/d5e0d0f607a7a029c6563a0470d88255c89a8d11
d5e0d0f607a7a029c6563a0470d88255c89a8d11
VSOCK: Fix missing msg_namelen update in vsock_stream_recvmsg() The code misses to update the msg_namelen member to 0 and therefore makes net/socket.c leak the local, uninitialized sockaddr_storage variable to userland -- 128 bytes of kernel stack memory. Cc: Andy King <acking@vmware.com> Cc: Dmitry Torokhov <dtor@vmware.com> Cc: George Zhang <georgezhang@vmware.com> Signed-off-by: Mathias Krause <minipli@googlemail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static bool __vsock_in_bound_table(struct vsock_sock *vsk) { return !list_empty(&vsk->bound_table); }
static bool __vsock_in_bound_table(struct vsock_sock *vsk) { return !list_empty(&vsk->bound_table); }
C
linux
0
CVE-2016-5126
https://www.cvedetails.com/cve/CVE-2016-5126/
CWE-119
https://git.qemu.org/?p=qemu.git;a=commit;h=a6b3167fa0e825aebb5a7cd8b437b6d41584a196
a6b3167fa0e825aebb5a7cd8b437b6d41584a196
null
static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, 0, iscsi_co_generic_cb, &iTask) == NULL) { return -ENOMEM; } while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_coroutine_yield(); } if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } if (iTask.do_retry) { iTask.complete = 0; goto retry; } if (iTask.status != SCSI_STATUS_GOOD) { return iTask.err_code; } return 0; }
static int coroutine_fn iscsi_co_flush(BlockDriverState *bs) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (iscsi_synchronizecache10_task(iscsilun->iscsi, iscsilun->lun, 0, 0, 0, 0, iscsi_co_generic_cb, &iTask) == NULL) { return -ENOMEM; } while (!iTask.complete) { iscsi_set_events(iscsilun); qemu_coroutine_yield(); } if (iTask.task != NULL) { scsi_free_scsi_task(iTask.task); iTask.task = NULL; } if (iTask.do_retry) { iTask.complete = 0; goto retry; } if (iTask.status != SCSI_STATUS_GOOD) { return iTask.err_code; } return 0; }
C
qemu
0
CVE-2016-7133
https://www.cvedetails.com/cve/CVE-2016-7133/
CWE-190
https://github.com/php/php-src/commit/c2a13ced4272f2e65d2773e2ea6ca11c1ce4a911?w=1
c2a13ced4272f2e65d2773e2ea6ca11c1ce4a911?w=1
Fix bug #72742 - memory allocator fails to realloc small block to large one
ZEND_API void shutdown_memory_manager(int silent, int full_shutdown) { zend_mm_shutdown(AG(mm_heap), full_shutdown, silent); }
ZEND_API void shutdown_memory_manager(int silent, int full_shutdown) { zend_mm_shutdown(AG(mm_heap), full_shutdown, silent); }
C
php-src
0
CVE-2016-10741
https://www.cvedetails.com/cve/CVE-2016-10741/
CWE-362
https://github.com/torvalds/linux/commit/04197b341f23b908193308b8d63d17ff23232598
04197b341f23b908193308b8d63d17ff23232598
xfs: don't BUG() on mixed direct and mapped I/O We've had reports of generic/095 causing XFS to BUG() in __xfs_get_blocks() due to the existence of delalloc blocks on a direct I/O read. generic/095 issues a mix of various types of I/O, including direct and memory mapped I/O to a single file. This is clearly not supported behavior and is known to lead to such problems. E.g., the lack of exclusion between the direct I/O and write fault paths means that a write fault can allocate delalloc blocks in a region of a file that was previously a hole after the direct read has attempted to flush/inval the file range, but before it actually reads the block mapping. In turn, the direct read discovers a delalloc extent and cannot proceed. While the appropriate solution here is to not mix direct and memory mapped I/O to the same regions of the same file, the current BUG_ON() behavior is probably overkill as it can crash the entire system. Instead, localize the failure to the I/O in question by returning an error for a direct I/O that cannot be handled safely due to delalloc blocks. Be careful to allow the case of a direct write to post-eof delalloc blocks. This can occur due to speculative preallocation and is safe as post-eof blocks are not accompanied by dirty pages in pagecache (conversely, preallocation within eof must have been zeroed, and thus dirtied, before the inode size could have been increased beyond said blocks). Finally, provide an additional warning if a direct I/O write occurs while the file is memory mapped. This may not catch all problematic scenarios, but provides a hint that some known-to-be-problematic I/O methods are in use. Signed-off-by: Brian Foster <bfoster@redhat.com> Reviewed-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>
xfs_find_bdev_for_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; if (XFS_IS_REALTIME_INODE(ip)) return mp->m_rtdev_targp->bt_bdev; else return mp->m_ddev_targp->bt_bdev; }
xfs_find_bdev_for_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; if (XFS_IS_REALTIME_INODE(ip)) return mp->m_rtdev_targp->bt_bdev; else return mp->m_ddev_targp->bt_bdev; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/9ad7483d8e7c20e9f1a5a08d00150fb51899f14c
9ad7483d8e7c20e9f1a5a08d00150fb51899f14c
Shutdown Timebomb - In canary, get a callstack if it takes longer than 10 minutes. In Dev, get callstack if it takes longer than 20 minutes. In Beta (50 minutes) and Stable (100 minutes) it is same as before. BUG=519321 R=asvitkine@chromium.org Review URL: https://codereview.chromium.org/1409333005 Cr-Commit-Position: refs/heads/master@{#355586}
StartupTimeBomb::~StartupTimeBomb() { DCHECK(this == g_startup_timebomb_); DCHECK_EQ(thread_id_, base::PlatformThread::CurrentId()); if (startup_watchdog_) Disarm(); g_startup_timebomb_ = nullptr; }
StartupTimeBomb::~StartupTimeBomb() { DCHECK(this == g_startup_timebomb_); DCHECK_EQ(thread_id_, base::PlatformThread::CurrentId()); if (startup_watchdog_) Disarm(); g_startup_timebomb_ = nullptr; }
C
Chrome
0
CVE-2012-5148
https://www.cvedetails.com/cve/CVE-2012-5148/
CWE-20
https://github.com/chromium/chromium/commit/e89cfcb9090e8c98129ae9160c513f504db74599
e89cfcb9090e8c98129ae9160c513f504db74599
Remove TabContents from TabStripModelObserver::TabDetachedAt. BUG=107201 TEST=no visible change Review URL: https://chromiumcodereview.appspot.com/11293205 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167122 0039d316-1c4b-4281-b951-d872f2087c98
void TabStripGtk::FinishAnimation(TabStripGtk::TabAnimation* animation, bool layout) { active_animation_.reset(NULL); for (int i = 0, count = GetTabCount(); i < count; ++i) GetTabAt(i)->set_animating_mini_change(false); if (layout) Layout(); }
void TabStripGtk::FinishAnimation(TabStripGtk::TabAnimation* animation, bool layout) { active_animation_.reset(NULL); for (int i = 0, count = GetTabCount(); i < count; ++i) GetTabAt(i)->set_animating_mini_change(false); if (layout) Layout(); }
C
Chrome
0
null
null
null
https://github.com/chromium/chromium/commit/1a113d35a19c0ed6500fb5c0acdc35730617fb3f
1a113d35a19c0ed6500fb5c0acdc35730617fb3f
Gracefully deal with clearing content settings for unregistered extensions. BUG=128652 Review URL: https://chromiumcodereview.appspot.com/10907093 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@155341 0039d316-1c4b-4281-b951-d872f2087c98
void ContentSettingsStore::AddObserver(Observer* observer) { DCHECK(OnCorrectThread()); observers_.AddObserver(observer); }
void ContentSettingsStore::AddObserver(Observer* observer) { DCHECK(OnCorrectThread()); observers_.AddObserver(observer); }
C
Chrome
0
CVE-2017-12897
https://www.cvedetails.com/cve/CVE-2017-12897/
CWE-125
https://github.com/the-tcpdump-group/tcpdump/commit/1dcd10aceabbc03bf571ea32b892c522cbe923de
1dcd10aceabbc03bf571ea32b892c522cbe923de
CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print(). This fixes a buffer over-read discovered by Kamil Frankowicz. Don't pass the remaining caplen - that's too hard to get right, and we were getting it wrong in at least one case; just use ND_TTEST(). Add a test using the capture file supplied by the reporter(s).
frf15_print(netdissect_options *ndo, const u_char *p, u_int length) { uint16_t sequence_num, flags; if (length < 2) goto trunc; ND_TCHECK2(*p, 2); flags = p[0]&MFR_BEC_MASK; sequence_num = (p[0]&0x1e)<<7 | p[1]; ND_PRINT((ndo, "FRF.15, seq 0x%03x, Flags [%s],%s Fragmentation, length %u", sequence_num, bittok2str(frf_flag_values,"none",flags), p[0]&FR_FRF15_FRAGTYPE ? "Interface" : "End-to-End", length)); /* TODO: * depending on all permutations of the B, E and C bit * dig as deep as we can - e.g. on the first (B) fragment * there is enough payload to print the IP header * on non (B) fragments it depends if the fragmentation * model is end-to-end or interface based wether we want to print * another Q.922 header */ return; trunc: ND_PRINT((ndo, "[|frf.15]")); }
frf15_print(netdissect_options *ndo, const u_char *p, u_int length) { uint16_t sequence_num, flags; if (length < 2) goto trunc; ND_TCHECK2(*p, 2); flags = p[0]&MFR_BEC_MASK; sequence_num = (p[0]&0x1e)<<7 | p[1]; ND_PRINT((ndo, "FRF.15, seq 0x%03x, Flags [%s],%s Fragmentation, length %u", sequence_num, bittok2str(frf_flag_values,"none",flags), p[0]&FR_FRF15_FRAGTYPE ? "Interface" : "End-to-End", length)); /* TODO: * depending on all permutations of the B, E and C bit * dig as deep as we can - e.g. on the first (B) fragment * there is enough payload to print the IP header * on non (B) fragments it depends if the fragmentation * model is end-to-end or interface based wether we want to print * another Q.922 header */ return; trunc: ND_PRINT((ndo, "[|frf.15]")); }
C
tcpdump
0
CVE-2015-5283
https://www.cvedetails.com/cve/CVE-2015-5283/
CWE-119
https://github.com/torvalds/linux/commit/8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
8e2d61e0aed2b7c4ecb35844fe07e0b2b762dee4
sctp: fix race on protocol/netns initialization Consider sctp module is unloaded and is being requested because an user is creating a sctp socket. During initialization, sctp will add the new protocol type and then initialize pernet subsys: status = sctp_v4_protosw_init(); if (status) goto err_protosw_init; status = sctp_v6_protosw_init(); if (status) goto err_v6_protosw_init; status = register_pernet_subsys(&sctp_net_ops); The problem is that after those calls to sctp_v{4,6}_protosw_init(), it is possible for userspace to create SCTP sockets like if the module is already fully loaded. If that happens, one of the possible effects is that we will have readers for net->sctp.local_addr_list list earlier than expected and sctp_net_init() does not take precautions while dealing with that list, leading to a potential panic but not limited to that, as sctp_sock_init() will copy a bunch of blank/partially initialized values from net->sctp. The race happens like this: CPU 0 | CPU 1 socket() | __sock_create | socket() inet_create | __sock_create list_for_each_entry_rcu( | answer, &inetsw[sock->type], | list) { | inet_create /* no hits */ | if (unlikely(err)) { | ... | request_module() | /* socket creation is blocked | * the module is fully loaded | */ | sctp_init | sctp_v4_protosw_init | inet_register_protosw | list_add_rcu(&p->list, | last_perm); | | list_for_each_entry_rcu( | answer, &inetsw[sock->type], sctp_v6_protosw_init | list) { | /* hit, so assumes protocol | * is already loaded | */ | /* socket creation continues | * before netns is initialized | */ register_pernet_subsys | Simply inverting the initialization order between register_pernet_subsys() and sctp_v4_protosw_init() is not possible because register_pernet_subsys() will create a control sctp socket, so the protocol must be already visible by then. Deferring the socket creation to a work-queue is not good specially because we loose the ability to handle its errors. So, as suggested by Vlad, the fix is to split netns initialization in two moments: defaults and control socket, so that the defaults are already loaded by when we register the protocol, while control socket initialization is kept at the same moment it is today. Fixes: 4db67e808640 ("sctp: Make the address lists per network namespace") Signed-off-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
static struct sock *sctp_v4_create_accept_sk(struct sock *sk, struct sctp_association *asoc) { struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, sk->sk_prot, 0); struct inet_sock *newinet; if (!newsk) goto out; sock_init_data(NULL, newsk); sctp_copy_sock(newsk, sk, asoc); sock_reset_flag(newsk, SOCK_ZAPPED); newinet = inet_sk(newsk); newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; sk_refcnt_debug_inc(newsk); if (newsk->sk_prot->init(newsk)) { sk_common_release(newsk); newsk = NULL; } out: return newsk; }
static struct sock *sctp_v4_create_accept_sk(struct sock *sk, struct sctp_association *asoc) { struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, sk->sk_prot, 0); struct inet_sock *newinet; if (!newsk) goto out; sock_init_data(NULL, newsk); sctp_copy_sock(newsk, sk, asoc); sock_reset_flag(newsk, SOCK_ZAPPED); newinet = inet_sk(newsk); newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; sk_refcnt_debug_inc(newsk); if (newsk->sk_prot->init(newsk)) { sk_common_release(newsk); newsk = NULL; } out: return newsk; }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/c4363d1ca65494cb7b271625e1ff6541a9f593c9
c4363d1ca65494cb7b271625e1ff6541a9f593c9
ozone: evdev: Add a couple more trace events Add trace event inside each read notification for evdev. BUG=none TEST=chrome://tracing in link_freon Review URL: https://codereview.chromium.org/1110693003 Cr-Commit-Position: refs/heads/master@{#327110}
void TouchEventConverterEvdev::ProcessAbs(const input_event& input) { switch (input.code) { case ABS_MT_TOUCH_MAJOR: events_[current_slot_].radius_x = input.value / 2.0f; break; case ABS_MT_TOUCH_MINOR: events_[current_slot_].radius_y = input.value / 2.0f; break; case ABS_MT_POSITION_X: events_[current_slot_].x = input.value; break; case ABS_MT_POSITION_Y: events_[current_slot_].y = input.value; break; case ABS_MT_TRACKING_ID: UpdateTrackingId(current_slot_, input.value); break; case ABS_MT_PRESSURE: events_[current_slot_].pressure = ScalePressure(input.value); break; case ABS_MT_SLOT: if (input.value >= 0 && static_cast<size_t>(input.value) < events_.size()) { current_slot_ = input.value; } else { LOG(ERROR) << "invalid touch event index: " << input.value; return; } break; default: DVLOG(5) << "unhandled code for EV_ABS: " << input.code; return; } events_[current_slot_].altered = true; }
void TouchEventConverterEvdev::ProcessAbs(const input_event& input) { switch (input.code) { case ABS_MT_TOUCH_MAJOR: events_[current_slot_].radius_x = input.value / 2.0f; break; case ABS_MT_TOUCH_MINOR: events_[current_slot_].radius_y = input.value / 2.0f; break; case ABS_MT_POSITION_X: events_[current_slot_].x = input.value; break; case ABS_MT_POSITION_Y: events_[current_slot_].y = input.value; break; case ABS_MT_TRACKING_ID: UpdateTrackingId(current_slot_, input.value); break; case ABS_MT_PRESSURE: events_[current_slot_].pressure = ScalePressure(input.value); break; case ABS_MT_SLOT: if (input.value >= 0 && static_cast<size_t>(input.value) < events_.size()) { current_slot_ = input.value; } else { LOG(ERROR) << "invalid touch event index: " << input.value; return; } break; default: DVLOG(5) << "unhandled code for EV_ABS: " << input.code; return; } events_[current_slot_].altered = true; }
C
Chrome
0
CVE-2016-6516
https://www.cvedetails.com/cve/CVE-2016-6516/
CWE-119
https://github.com/torvalds/linux/commit/10eec60ce79187686e052092e5383c99b4420a20
10eec60ce79187686e052092e5383c99b4420a20
vfs: ioctl: prevent double-fetch in dedupe ioctl This prevents a double-fetch from user space that can lead to to an undersized allocation and heap overflow. Fixes: 54dbc1517237 ("vfs: hoist the btrfs deduplication ioctl to the vfs") Signed-off-by: Scott Bauer <sbauer@plzdonthack.me> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block) { int ret; inode_lock(inode); ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); inode_unlock(inode); return ret; }
int generic_block_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, get_block_t *get_block) { int ret; inode_lock(inode); ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); inode_unlock(inode); return ret; }
C
linux
0
CVE-2015-6783
https://www.cvedetails.com/cve/CVE-2015-6783/
CWE-20
https://github.com/chromium/chromium/commit/d9e316238aee59acf665d80b544cf4e1edfd3349
d9e316238aee59acf665d80b544cf4e1edfd3349
crazy linker: Alter search for zip EOCD start When loading directly from APK, begin searching backwards for the zip EOCD record signature at size of EOCD record bytes before the end of the file. BUG=537205 R=rmcilroy@chromium.org Review URL: https://codereview.chromium.org/1390553002 . Cr-Commit-Position: refs/heads/master@{#352577}
inline uint32_t ReadUInt16(uint8_t* mem_bytes, int offset) { return static_cast<uint32_t>(mem_bytes[offset]) | (static_cast<uint32_t>(mem_bytes[offset + 1]) << 8); }
inline uint32_t ReadUInt16(uint8_t* mem_bytes, int offset) { return static_cast<uint32_t>(mem_bytes[offset]) | (static_cast<uint32_t>(mem_bytes[offset + 1]) << 8); }
C
Chrome
0
CVE-2014-9710
https://www.cvedetails.com/cve/CVE-2014-9710/
CWE-362
https://github.com/torvalds/linux/commit/5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
5f5bc6b1e2d5a6f827bc860ef2dc5b6f365d1339
Btrfs: make xattr replace operations atomic Replacing a xattr consists of doing a lookup for its existing value, delete the current value from the respective leaf, release the search path and then finally insert the new value. This leaves a time window where readers (getxattr, listxattrs) won't see any value for the xattr. Xattrs are used to store ACLs, so this has security implications. This change also fixes 2 other existing issues which were: *) Deleting the old xattr value without verifying first if the new xattr will fit in the existing leaf item (in case multiple xattrs are packed in the same item due to name hash collision); *) Returning -EEXIST when the flag XATTR_CREATE is given and the xattr doesn't exist but we have have an existing item that packs muliple xattrs with the same name hash as the input xattr. In this case we should return ENOSPC. A test case for xfstests follows soon. Thanks to Alexandre Oliva for reporting the non-atomicity of the xattr replace implementation. Reported-by: Alexandre Oliva <oliva@gnu.org> Signed-off-by: Filipe Manana <fdmanana@suse.com> Signed-off-by: Chris Mason <clm@fb.com>
static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot) { if (level == 0) return generic_bin_search(eb, offsetof(struct btrfs_leaf, items), sizeof(struct btrfs_item), key, btrfs_header_nritems(eb), slot); else return generic_bin_search(eb, offsetof(struct btrfs_node, ptrs), sizeof(struct btrfs_key_ptr), key, btrfs_header_nritems(eb), slot); }
static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot) { if (level == 0) return generic_bin_search(eb, offsetof(struct btrfs_leaf, items), sizeof(struct btrfs_item), key, btrfs_header_nritems(eb), slot); else return generic_bin_search(eb, offsetof(struct btrfs_node, ptrs), sizeof(struct btrfs_key_ptr), key, btrfs_header_nritems(eb), slot); }
C
linux
0
null
null
null
https://github.com/chromium/chromium/commit/ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
ee8d6fd30b022ac2c87b7a190c954e7bb3c9b21e
Clean up calls like "gfx::Rect(0, 0, size().width(), size().height()". The caller can use the much shorter "gfx::Rect(size())", since gfx::Rect has a constructor that just takes a Size. BUG=none TEST=none Review URL: http://codereview.chromium.org/2204001 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@48283 0039d316-1c4b-4281-b951-d872f2087c98
void WebPluginDelegateProxy::OnSetWindow(gfx::PluginWindowHandle window) { uses_shared_bitmaps_ = !window; window_ = window; if (plugin_) plugin_->SetWindow(window); }
void WebPluginDelegateProxy::OnSetWindow(gfx::PluginWindowHandle window) { uses_shared_bitmaps_ = !window; window_ = window; if (plugin_) plugin_->SetWindow(window); }
C
Chrome
0
CVE-2017-5112
https://www.cvedetails.com/cve/CVE-2017-5112/
CWE-119
https://github.com/chromium/chromium/commit/f6ac1dba5e36f338a490752a2cbef3339096d9fe
f6ac1dba5e36f338a490752a2cbef3339096d9fe
Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later. BUG=740603 TEST=new conformance test R=kbr@chromium.org,piman@chromium.org Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4 Reviewed-on: https://chromium-review.googlesource.com/570840 Reviewed-by: Antoine Labour <piman@chromium.org> Reviewed-by: Kenneth Russell <kbr@chromium.org> Commit-Queue: Zhenyao Mo <zmo@chromium.org> Cr-Commit-Position: refs/heads/master@{#486518}
void WebGLRenderingContextBase::shaderSource(WebGLShader* shader, const String& string) { if (isContextLost() || !ValidateWebGLObject("shaderSource", shader)) return; String string_without_comments = StripComments(string).Result(); if (!ValidateShaderSource(string_without_comments)) return; shader->SetSource(string); WTF::StringUTF8Adaptor adaptor(string_without_comments); const GLchar* shader_data = adaptor.Data(); const GLint shader_length = adaptor.length(); ContextGL()->ShaderSource(ObjectOrZero(shader), 1, &shader_data, &shader_length); }
void WebGLRenderingContextBase::shaderSource(WebGLShader* shader, const String& string) { if (isContextLost() || !ValidateWebGLObject("shaderSource", shader)) return; String string_without_comments = StripComments(string).Result(); if (!ValidateShaderSource(string_without_comments)) return; shader->SetSource(string); WTF::StringUTF8Adaptor adaptor(string_without_comments); const GLchar* shader_data = adaptor.Data(); const GLint shader_length = adaptor.length(); ContextGL()->ShaderSource(ObjectOrZero(shader), 1, &shader_data, &shader_length); }
C
Chrome
0
CVE-2011-2918
https://www.cvedetails.com/cve/CVE-2011-2918/
CWE-399
https://github.com/torvalds/linux/commit/a8b0ca17b80e92faab46ee7179ba9e99ccb61233
a8b0ca17b80e92faab46ee7179ba9e99ccb61233
perf: Remove the nmi parameter from the swevent and overflow interface The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Michael Cree <mcree@orcon.net.nz> Cc: Will Deacon <will.deacon@arm.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: Anton Blanchard <anton@samba.org> Cc: Eric B Munson <emunson@mgebm.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jason Wessel <jason.wessel@windriver.com> Cc: Don Zickus <dzickus@redhat.com> Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
static void perf_event_mmap_ctx(struct perf_event_context *ctx, struct perf_mmap_event *mmap_event, int executable) { struct perf_event *event; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_mmap_match(event, mmap_event, executable)) perf_event_mmap_output(event, mmap_event); } }
static void perf_event_mmap_ctx(struct perf_event_context *ctx, struct perf_mmap_event *mmap_event, int executable) { struct perf_event *event; list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { if (perf_event_mmap_match(event, mmap_event, executable)) perf_event_mmap_output(event, mmap_event); } }
C
linux
0
CVE-2011-4930
https://www.cvedetails.com/cve/CVE-2011-4930/
CWE-134
https://htcondor-git.cs.wisc.edu/?p=condor.git;a=commitdiff;h=5e5571d1a431eb3c61977b6dd6ec90186ef79867
5e5571d1a431eb3c61977b6dd6ec90186ef79867
null
int GahpClient::ec2_associate_address(const char * service_url, const char * publickeyfile, const char * privatekeyfile, const char * instance_id, const char * elastic_ip, StringList & returnStatus, char* & error_code ) { static const char* command = "EC2_VM_ASSOCIATE_ADDRESS"; int rc=0; if (server->m_commands_supported->contains_anycase(command)==FALSE) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } if ( (service_url == NULL) || (publickeyfile == NULL) || (privatekeyfile == NULL) || (instance_id == NULL) || (elastic_ip == NULL) ) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } std::string reqline; char* esc1 = strdup( escapeGahpString(service_url) ); char* esc2 = strdup( escapeGahpString(publickeyfile) ); char* esc3 = strdup( escapeGahpString(privatekeyfile) ); char* esc4 = strdup( escapeGahpString(instance_id) ); char* esc5 = strdup( escapeGahpString(elastic_ip) ); int x = sprintf(reqline, "%s %s %s %s %s", esc1, esc2, esc3, esc4, esc5 ); free( esc1 ); free( esc2 ); free( esc3 ); free( esc4 ); free( esc5 ); ASSERT( x > 0 ); const char *buf = reqline.c_str(); if ( !is_pending(command,buf) ) { if ( m_mode == results_only ) { return GAHPCLIENT_COMMAND_NOT_SUBMITTED; } now_pending(command, buf, deleg_proxy); } Gahp_Args* result = get_pending_result(command, buf); if ( result ) { int return_code = atoi(result->argv[1]); if (return_code == 1) { if (result->argc == 2) { error_string = ""; } else if (result->argc == 4) { error_code = strdup(result->argv[2]); error_string = result->argv[3]; } else { EXCEPT("Bad %s Result",command); } } else { // return_code == 0 if ( ( (result->argc-2) % 2) != 0 ) { EXCEPT("Bad %s Result",command); } else { for (int i=2; i<result->argc; i++) { returnStatus.append( strdup(result->argv[i]) ); } returnStatus.rewind(); } } delete result; } return rc; }
int GahpClient::ec2_associate_address(const char * service_url, const char * publickeyfile, const char * privatekeyfile, const char * instance_id, const char * elastic_ip, StringList & returnStatus, char* & error_code ) { static const char* command = "EC2_VM_ASSOCIATE_ADDRESS"; int rc=0; if (server->m_commands_supported->contains_anycase(command)==FALSE) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } if ( (service_url == NULL) || (publickeyfile == NULL) || (privatekeyfile == NULL) || (instance_id == NULL) || (elastic_ip == NULL) ) { return GAHPCLIENT_COMMAND_NOT_SUPPORTED; } std::string reqline; char* esc1 = strdup( escapeGahpString(service_url) ); char* esc2 = strdup( escapeGahpString(publickeyfile) ); char* esc3 = strdup( escapeGahpString(privatekeyfile) ); char* esc4 = strdup( escapeGahpString(instance_id) ); char* esc5 = strdup( escapeGahpString(elastic_ip) ); int x = sprintf(reqline, "%s %s %s %s %s", esc1, esc2, esc3, esc4, esc5 ); free( esc1 ); free( esc2 ); free( esc3 ); free( esc4 ); free( esc5 ); ASSERT( x > 0 ); const char *buf = reqline.c_str(); if ( !is_pending(command,buf) ) { if ( m_mode == results_only ) { return GAHPCLIENT_COMMAND_NOT_SUBMITTED; } now_pending(command, buf, deleg_proxy); } Gahp_Args* result = get_pending_result(command, buf); if ( result ) { int return_code = atoi(result->argv[1]); if (return_code == 1) { if (result->argc == 2) { error_string = ""; } else if (result->argc == 4) { error_code = strdup(result->argv[2]); error_string = result->argv[3]; } else { EXCEPT("Bad %s Result",command); } } else { // return_code == 0 if ( ( (result->argc-2) % 2) != 0 ) { EXCEPT("Bad %s Result",command); } else { for (int i=2; i<result->argc; i++) { returnStatus.append( strdup(result->argv[i]) ); } returnStatus.rewind(); } } delete result; } return rc; }
CPP
htcondor
0
CVE-2015-8877
https://www.cvedetails.com/cve/CVE-2015-8877/
CWE-399
https://github.com/libgd/libgd/commit/4751b606fa38edc456d627140898a7ec679fcc24
4751b606fa38edc456d627140898a7ec679fcc24
gdImageScaleTwoPass memory leak fix Fixing memory leak in gdImageScaleTwoPass, as reported by @cmb69 and confirmed by @vapier. This bug actually bit me in production and I'm very thankful that it was reported with an easy fix. Fixes #173.
static double KernelBessel_Q1(const double x) { double p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); }
static double KernelBessel_Q1(const double x) { double p, q; register long i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p = Pone[5]; q = Qone[5]; for (i=4; i >= 0; i--) { p = p*(8.0/x)*(8.0/x)+Pone[i]; q = q*(8.0/x)*(8.0/x)+Qone[i]; } return (double)(p/q); }
C
libgd
0
CVE-2013-4483
https://www.cvedetails.com/cve/CVE-2013-4483/
CWE-189
https://github.com/torvalds/linux/commit/6062a8dc0517bce23e3c2f7d2fea5e22411269a3
6062a8dc0517bce23e3c2f7d2fea5e22411269a3
ipc,sem: fine grained locking for semtimedop Introduce finer grained locking for semtimedop, to handle the common case of a program wanting to manipulate one semaphore from an array with multiple semaphores. If the call is a semop manipulating just one semaphore in an array with multiple semaphores, only take the lock for that semaphore itself. If the call needs to manipulate multiple semaphores, or another caller is in a transaction that manipulates multiple semaphores, the sem_array lock is taken, as well as all the locks for the individual semaphores. On a 24 CPU system, performance numbers with the semop-multi test with N threads and N semaphores, look like this: vanilla Davidlohr's Davidlohr's + Davidlohr's + threads patches rwlock patches v3 patches 10 610652 726325 1783589 2142206 20 341570 365699 1520453 1977878 30 288102 307037 1498167 2037995 40 290714 305955 1612665 2256484 50 288620 312890 1733453 2650292 60 289987 306043 1649360 2388008 70 291298 306347 1723167 2717486 80 290948 305662 1729545 2763582 90 290996 306680 1736021 2757524 100 292243 306700 1773700 3059159 [davidlohr.bueso@hp.com: do not call sem_lock when bogus sma] [davidlohr.bueso@hp.com: make refcounter atomic] Signed-off-by: Rik van Riel <riel@redhat.com> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Jason Low <jason.low2@hp.com> Reviewed-by: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Emmanuel Benisty <benisty.e@gmail.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { int lid = ipcid_to_idx(ipcp->id); idr_remove(&ids->ipcs_idr, lid); ids->in_use--; ipcp->deleted = 1; return; }
void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { int lid = ipcid_to_idx(ipcp->id); idr_remove(&ids->ipcs_idr, lid); ids->in_use--; ipcp->deleted = 1; return; }
C
linux
0
CVE-2016-3839
https://www.cvedetails.com/cve/CVE-2016-3839/
CWE-284
https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c
472271b153c5dc53c28beac55480a8d8434b2d5c
DO NOT MERGE Fix potential DoS caused by delivering signal to BT process Bug: 28885210 Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360 Conflicts: btif/co/bta_hh_co.c btif/src/btif_core.c Merge conflict resolution of ag/1161415 (referencing ag/1164670) - Directly into mnc-mr2-release
bt_status_t btif_dut_mode_send(uint16_t opcode, uint8_t *buf, uint8_t len) { /* TODO: Check that opcode is a vendor command group */ BTIF_TRACE_DEBUG("%s", __FUNCTION__); if (!btif_is_dut_mode()) { BTIF_TRACE_ERROR("Bluedroid HAL needs to be init with test_mode set to 1."); return BT_STATUS_FAIL; } BTM_VendorSpecificCommand(opcode, len, buf, btif_dut_mode_cback); return BT_STATUS_SUCCESS; }
bt_status_t btif_dut_mode_send(uint16_t opcode, uint8_t *buf, uint8_t len) { /* TODO: Check that opcode is a vendor command group */ BTIF_TRACE_DEBUG("%s", __FUNCTION__); if (!btif_is_dut_mode()) { BTIF_TRACE_ERROR("Bluedroid HAL needs to be init with test_mode set to 1."); return BT_STATUS_FAIL; } BTM_VendorSpecificCommand(opcode, len, buf, btif_dut_mode_cback); return BT_STATUS_SUCCESS; }
C
Android
0
CVE-2014-1713
https://www.cvedetails.com/cve/CVE-2014-1713/
CWE-399
https://github.com/chromium/chromium/commit/f85a87ec670ad0fce9d98d90c9a705b72a288154
f85a87ec670ad0fce9d98d90c9a705b72a288154
document.location bindings fix BUG=352374 R=jochen@chromium.org Review URL: https://codereview.chromium.org/196343011 git-svn-id: svn://svn.chromium.org/blink/trunk@169176 bbb929c8-8fbe-4397-9dbb-9b2b20218538
static void octetAttrAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info) { TestObject* imp = V8TestObject::toNative(info.Holder()); v8SetReturnValueUnsigned(info, imp->octetAttr()); }
static void octetAttrAttributeGetter(const v8::PropertyCallbackInfo<v8::Value>& info) { TestObject* imp = V8TestObject::toNative(info.Holder()); v8SetReturnValueUnsigned(info, imp->octetAttr()); }
C
Chrome
0
CVE-2014-3175
https://www.cvedetails.com/cve/CVE-2014-3175/
null
https://github.com/chromium/chromium/commit/4843d98517bd37e5940cd04627c6cfd2ac774d11
4843d98517bd37e5940cd04627c6cfd2ac774d11
Remove clock resolution page load histograms. These were temporary metrics intended to understand whether high/low resolution clocks adversely impact page load metrics. After collecting a few months of data it was determined that clock resolution doesn't adversely impact our metrics, and it that these histograms were no longer needed. BUG=394757 Review-Url: https://codereview.chromium.org/2155143003 Cr-Commit-Position: refs/heads/master@{#406143}
void CorePageLoadMetricsObserver::OnFirstContentfulPaint( const page_load_metrics::PageLoadTiming& timing, const page_load_metrics::PageLoadExtraInfo& info) { if (WasStartedInForegroundOptionalEventInForeground( timing.first_contentful_paint, info)) { PAGE_LOAD_HISTOGRAM(internal::kHistogramFirstContentfulPaintImmediate, timing.first_contentful_paint.value()); PAGE_LOAD_HISTOGRAM( internal::kHistogramParseStartToFirstContentfulPaintImmediate, timing.first_contentful_paint.value() - timing.parse_start.value()); switch (GetPageLoadType(transition_)) { case LOAD_TYPE_RELOAD: PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintReload, timing.first_contentful_paint.value()); if (initiated_by_user_gesture_) { PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintReloadByGesture, timing.first_contentful_paint.value()); } break; case LOAD_TYPE_FORWARD_BACK: PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintForwardBack, timing.first_contentful_paint.value()); break; case LOAD_TYPE_NEW_NAVIGATION: PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintNewNavigation, timing.first_contentful_paint.value()); break; case LOAD_TYPE_NONE: NOTREACHED(); break; } } else { PAGE_LOAD_HISTOGRAM( internal::kBackgroundHistogramFirstContentfulPaintImmediate, timing.first_contentful_paint.value()); PAGE_LOAD_HISTOGRAM( internal::kBackgroundHistogramParseStartToFirstContentfulPaintImmediate, timing.first_contentful_paint.value() - timing.parse_start.value()); } }
void CorePageLoadMetricsObserver::OnFirstContentfulPaint( const page_load_metrics::PageLoadTiming& timing, const page_load_metrics::PageLoadExtraInfo& info) { if (WasStartedInForegroundOptionalEventInForeground( timing.first_contentful_paint, info)) { PAGE_LOAD_HISTOGRAM(internal::kHistogramFirstContentfulPaintImmediate, timing.first_contentful_paint.value()); PAGE_LOAD_HISTOGRAM( internal::kHistogramParseStartToFirstContentfulPaintImmediate, timing.first_contentful_paint.value() - timing.parse_start.value()); switch (GetPageLoadType(transition_)) { case LOAD_TYPE_RELOAD: PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintReload, timing.first_contentful_paint.value()); if (initiated_by_user_gesture_) { PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintReloadByGesture, timing.first_contentful_paint.value()); } break; case LOAD_TYPE_FORWARD_BACK: PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintForwardBack, timing.first_contentful_paint.value()); break; case LOAD_TYPE_NEW_NAVIGATION: PAGE_LOAD_HISTOGRAM( internal::kHistogramLoadTypeFirstContentfulPaintNewNavigation, timing.first_contentful_paint.value()); break; case LOAD_TYPE_NONE: NOTREACHED(); break; } } else { PAGE_LOAD_HISTOGRAM( internal::kBackgroundHistogramFirstContentfulPaintImmediate, timing.first_contentful_paint.value()); PAGE_LOAD_HISTOGRAM( internal::kBackgroundHistogramParseStartToFirstContentfulPaintImmediate, timing.first_contentful_paint.value() - timing.parse_start.value()); } }
C
Chrome
0
CVE-2016-10066
https://www.cvedetails.com/cve/CVE-2016-10066/
CWE-119
https://github.com/ImageMagick/ImageMagick/commit/f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
f6e9d0d9955e85bdd7540b251cd50d598dacc5e6
null
ModuleExport void UnregisterTIMImage(void) { (void) UnregisterMagickInfo("TIM"); }
ModuleExport void UnregisterTIMImage(void) { (void) UnregisterMagickInfo("TIM"); }
C
ImageMagick
0
CVE-2016-6254
https://www.cvedetails.com/cve/CVE-2016-6254/
CWE-119
https://github.com/collectd/collectd/commit/b589096f907052b3a4da2b9ccc9b0e2e888dfc18
b589096f907052b3a4da2b9ccc9b0e2e888dfc18
network plugin: Fix heap overflow in parse_packet(). Emilien Gaspar has identified a heap overflow in parse_packet(), the function used by the network plugin to parse incoming network packets. This is a vulnerability in collectd, though the scope is not clear at this point. At the very least specially crafted network packets can be used to crash the daemon. We can't rule out a potential remote code execution though. Fixes: CVE-2016-6254
static int network_stats_read (void) /* {{{ */ { derive_t copy_octets_rx; derive_t copy_octets_tx; derive_t copy_packets_rx; derive_t copy_packets_tx; derive_t copy_values_dispatched; derive_t copy_values_not_dispatched; derive_t copy_values_sent; derive_t copy_values_not_sent; derive_t copy_receive_list_length; value_list_t vl = VALUE_LIST_INIT; value_t values[2]; copy_octets_rx = stats_octets_rx; copy_octets_tx = stats_octets_tx; copy_packets_rx = stats_packets_rx; copy_packets_tx = stats_packets_tx; copy_values_dispatched = stats_values_dispatched; copy_values_not_dispatched = stats_values_not_dispatched; copy_values_sent = stats_values_sent; copy_values_not_sent = stats_values_not_sent; copy_receive_list_length = receive_list_length; /* Initialize `vl' */ vl.values = values; vl.values_len = 2; vl.time = 0; sstrncpy (vl.host, hostname_g, sizeof (vl.host)); sstrncpy (vl.plugin, "network", sizeof (vl.plugin)); /* Octets received / sent */ vl.values[0].derive = (derive_t) copy_octets_rx; vl.values[1].derive = (derive_t) copy_octets_tx; sstrncpy (vl.type, "if_octets", sizeof (vl.type)); plugin_dispatch_values (&vl); /* Packets received / send */ vl.values[0].derive = (derive_t) copy_packets_rx; vl.values[1].derive = (derive_t) copy_packets_tx; sstrncpy (vl.type, "if_packets", sizeof (vl.type)); plugin_dispatch_values (&vl); /* Values (not) dispatched and (not) send */ sstrncpy (vl.type, "total_values", sizeof (vl.type)); vl.values_len = 1; vl.values[0].derive = (derive_t) copy_values_dispatched; sstrncpy (vl.type_instance, "dispatch-accepted", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_not_dispatched; sstrncpy (vl.type_instance, "dispatch-rejected", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_sent; sstrncpy (vl.type_instance, "send-accepted", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_not_sent; sstrncpy (vl.type_instance, "send-rejected", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); /* Receive queue length */ vl.values[0].gauge = (gauge_t) copy_receive_list_length; sstrncpy (vl.type, "queue_length", sizeof (vl.type)); vl.type_instance[0] = 0; plugin_dispatch_values (&vl); return (0); } /* }}} int network_stats_read */
static int network_stats_read (void) /* {{{ */ { derive_t copy_octets_rx; derive_t copy_octets_tx; derive_t copy_packets_rx; derive_t copy_packets_tx; derive_t copy_values_dispatched; derive_t copy_values_not_dispatched; derive_t copy_values_sent; derive_t copy_values_not_sent; derive_t copy_receive_list_length; value_list_t vl = VALUE_LIST_INIT; value_t values[2]; copy_octets_rx = stats_octets_rx; copy_octets_tx = stats_octets_tx; copy_packets_rx = stats_packets_rx; copy_packets_tx = stats_packets_tx; copy_values_dispatched = stats_values_dispatched; copy_values_not_dispatched = stats_values_not_dispatched; copy_values_sent = stats_values_sent; copy_values_not_sent = stats_values_not_sent; copy_receive_list_length = receive_list_length; /* Initialize `vl' */ vl.values = values; vl.values_len = 2; vl.time = 0; sstrncpy (vl.host, hostname_g, sizeof (vl.host)); sstrncpy (vl.plugin, "network", sizeof (vl.plugin)); /* Octets received / sent */ vl.values[0].derive = (derive_t) copy_octets_rx; vl.values[1].derive = (derive_t) copy_octets_tx; sstrncpy (vl.type, "if_octets", sizeof (vl.type)); plugin_dispatch_values (&vl); /* Packets received / send */ vl.values[0].derive = (derive_t) copy_packets_rx; vl.values[1].derive = (derive_t) copy_packets_tx; sstrncpy (vl.type, "if_packets", sizeof (vl.type)); plugin_dispatch_values (&vl); /* Values (not) dispatched and (not) send */ sstrncpy (vl.type, "total_values", sizeof (vl.type)); vl.values_len = 1; vl.values[0].derive = (derive_t) copy_values_dispatched; sstrncpy (vl.type_instance, "dispatch-accepted", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_not_dispatched; sstrncpy (vl.type_instance, "dispatch-rejected", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_sent; sstrncpy (vl.type_instance, "send-accepted", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); vl.values[0].derive = (derive_t) copy_values_not_sent; sstrncpy (vl.type_instance, "send-rejected", sizeof (vl.type_instance)); plugin_dispatch_values (&vl); /* Receive queue length */ vl.values[0].gauge = (gauge_t) copy_receive_list_length; sstrncpy (vl.type, "queue_length", sizeof (vl.type)); vl.type_instance[0] = 0; plugin_dispatch_values (&vl); return (0); } /* }}} int network_stats_read */
C
collectd
0
CVE-2018-11376
https://www.cvedetails.com/cve/CVE-2018-11376/
CWE-125
https://github.com/radare/radare2/commit/1f37c04f2a762500222dda2459e6a04646feeedf
1f37c04f2a762500222dda2459e6a04646feeedf
Fix #9904 - crash in r2_hoobr_r_read_le32 (over 9000 entrypoints) and read_le oobread (#9923)
static ut64 ht_find_intu64(SdbHash* ht, int key, bool* found) { ut64 *mvalue = (ut64 *)ht_find (ht, sdb_fmt ("%d", key), found); return *mvalue; }
static ut64 ht_find_intu64(SdbHash* ht, int key, bool* found) { ut64 *mvalue = (ut64 *)ht_find (ht, sdb_fmt ("%d", key), found); return *mvalue; }
C
radare2
0
CVE-2013-0885
https://www.cvedetails.com/cve/CVE-2013-0885/
CWE-264
https://github.com/chromium/chromium/commit/f335421145bb7f82c60fb9d61babcd6ce2e4b21e
f335421145bb7f82c60fb9d61babcd6ce2e4b21e
Tighten restrictions on hosted apps calling extension APIs Only allow component apps to make any API calls, and for them only allow the namespaces they explicitly have permission for (plus chrome.test - I need to see if I can rework some WebStore tests to remove even this). BUG=172369 Review URL: https://chromiumcodereview.appspot.com/12095095 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180426 0039d316-1c4b-4281-b951-d872f2087c98
bool Extension::LoadVersion(string16* error) { std::string version_str; if (!manifest_->GetString(keys::kVersion, &version_str)) { *error = ASCIIToUTF16(errors::kInvalidVersion); return false; } version_.reset(new Version(version_str)); if (!version_->IsValid() || version_->components().size() > 4) { *error = ASCIIToUTF16(errors::kInvalidVersion); return false; } return true; }
bool Extension::LoadVersion(string16* error) { std::string version_str; if (!manifest_->GetString(keys::kVersion, &version_str)) { *error = ASCIIToUTF16(errors::kInvalidVersion); return false; } version_.reset(new Version(version_str)); if (!version_->IsValid() || version_->components().size() > 4) { *error = ASCIIToUTF16(errors::kInvalidVersion); return false; } return true; }
C
Chrome
0
CVE-2016-9557
https://www.cvedetails.com/cve/CVE-2016-9557/
CWE-190
https://github.com/mdadams/jasper/commit/d42b2388f7f8e0332c846675133acea151fc557a
d42b2388f7f8e0332c846675133acea151fc557a
The generation of the configuration file jas_config.h has been completely reworked in order to avoid pollution of the global namespace. Some problematic types like uchar, ulong, and friends have been replaced with names with a jas_ prefix. An option max_samples has been added to the BMP and JPEG decoders to restrict the maximum size of image that they can decode. This change was made as a (possibly temporary) fix to address security concerns. A max_samples command-line option has also been added to imginfo. Whether an image component (for jas_image_t) is stored in memory or on disk is now based on the component size (rather than the image size). Some debug log message were added. Some new integer overflow checks were added. Some new safe integer add/multiply functions were added. More pre-C99 cruft was removed. JasPer has numerous "hacks" to handle pre-C99 compilers. JasPer now assumes C99 support. So, this pre-C99 cruft is unnecessary and can be removed. The regression jasper-doublefree-mem_close.jpg has been re-enabled. Theoretically, it should work more predictably now.
static int file_write(jas_stream_obj_t *obj, char *buf, int cnt) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_write(%p, %p, %d)\n", obj, buf, cnt)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return write(fileobj->fd, buf, cnt); }
static int file_write(jas_stream_obj_t *obj, char *buf, int cnt) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_write(%p, %p, %d)\n", obj, buf, cnt)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return write(fileobj->fd, buf, cnt); }
C
jasper
0
CVE-2017-10663
https://www.cvedetails.com/cve/CVE-2017-10663/
CWE-129
https://github.com/torvalds/linux/commit/15d3042a937c13f5d9244241c7a9c8416ff6e82a
15d3042a937c13f5d9244241c7a9c8416ff6e82a
f2fs: sanity check checkpoint segno and blkoff Make sure segno and blkoff read from raw image are valid. Cc: stable@vger.kernel.org Signed-off-by: Jin Qian <jinqian@google.com> [Jaegeuk Kim: adjust minor coding style] Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
static int f2fs_freeze(struct super_block *sb) { if (f2fs_readonly(sb)) return 0; /* IO error happened before */ if (unlikely(f2fs_cp_error(F2FS_SB(sb)))) return -EIO; /* must be clean, since sync_filesystem() was already called */ if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY)) return -EINVAL; return 0; }
static int f2fs_freeze(struct super_block *sb) { if (f2fs_readonly(sb)) return 0; /* IO error happened before */ if (unlikely(f2fs_cp_error(F2FS_SB(sb)))) return -EIO; /* must be clean, since sync_filesystem() was already called */ if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY)) return -EINVAL; return 0; }
C
linux
0