instruction
stringclasses 1
value | input
stringlengths 222
112k
| output
stringlengths 21
113k
| __index_level_0__
int64 15
30k
|
---|---|---|---|
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mdb_htable *mdb;
struct nlattr *nest, *nest2;
int i, err = 0;
int idx = 0, s_idx = cb->args[1];
if (br->multicast_disabled)
return 0;
mdb = rcu_dereference(br->mdb);
if (!mdb)
return 0;
nest = nla_nest_start(skb, MDBA_MDB);
if (nest == NULL)
return -EMSGSIZE;
for (i = 0; i < mdb->max; i++) {
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p, **pp;
struct net_bridge_port *port;
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
if (idx < s_idx)
goto skip;
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
if (nest2 == NULL) {
err = -EMSGSIZE;
goto out;
}
for (pp = &mp->ports;
(p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
port = p->port;
if (port) {
struct br_mdb_entry e;
e.ifindex = port->dev->ifindex;
e.state = p->state;
if (p->addr.proto == htons(ETH_P_IP))
e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
if (p->addr.proto == htons(ETH_P_IPV6))
e.addr.u.ip6 = p->addr.u.ip6;
#endif
e.addr.proto = p->addr.proto;
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
nla_nest_cancel(skb, nest2);
err = -EMSGSIZE;
goto out;
}
}
}
nla_nest_end(skb, nest2);
skip:
idx++;
}
}
out:
cb->args[1] = idx;
nla_nest_end(skb, nest);
return err;
}
Commit Message: bridge: fix mdb info leaks
The bridging code discloses heap and stack bytes via the RTM_GETMDB
netlink interface and via the notify messages send to group RTNLGRP_MDB
afer a successful add/del.
Fix both cases by initializing all unset members/padding bytes with
memset(0).
Cc: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Mathias Krause <minipli@googlemail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: CWE-399 | static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mdb_htable *mdb;
struct nlattr *nest, *nest2;
int i, err = 0;
int idx = 0, s_idx = cb->args[1];
if (br->multicast_disabled)
return 0;
mdb = rcu_dereference(br->mdb);
if (!mdb)
return 0;
nest = nla_nest_start(skb, MDBA_MDB);
if (nest == NULL)
return -EMSGSIZE;
for (i = 0; i < mdb->max; i++) {
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p, **pp;
struct net_bridge_port *port;
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
if (idx < s_idx)
goto skip;
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
if (nest2 == NULL) {
err = -EMSGSIZE;
goto out;
}
for (pp = &mp->ports;
(p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
port = p->port;
if (port) {
struct br_mdb_entry e;
memset(&e, 0, sizeof(e));
e.ifindex = port->dev->ifindex;
e.state = p->state;
if (p->addr.proto == htons(ETH_P_IP))
e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
if (p->addr.proto == htons(ETH_P_IPV6))
e.addr.u.ip6 = p->addr.u.ip6;
#endif
e.addr.proto = p->addr.proto;
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
nla_nest_cancel(skb, nest2);
err = -EMSGSIZE;
goto out;
}
}
}
nla_nest_end(skb, nest2);
skip:
idx++;
}
}
out:
cb->args[1] = idx;
nla_nest_end(skb, nest);
return err;
}
| 15,467 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
int ret = 0;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
trace_ext4_punch_hole(inode, offset, length, 0);
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
return ret;
}
mutex_lock(&inode->i_mutex);
/* No need to punch hole beyond i_size */
if (offset >= inode->i_size)
goto out_mutex;
/*
* If the hole extends beyond i_size, set the hole
* to end after the page that contains i_size
*/
if (offset + length > inode->i_size) {
length = inode->i_size +
PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
offset;
}
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
* Attach jinode to inode for jbd2 if we do any zeroing of
* partial block
*/
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
goto out_mutex;
}
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
if (last_block_offset > first_block_offset)
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
goto out_dio;
}
ret = ext4_zero_partial_blocks(handle, inode, offset,
length);
if (ret)
goto out_stop;
first_block = (offset + sb->s_blocksize - 1) >>
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
/* If there are no blocks to remove, return now */
if (first_block >= stop_block)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
/* Now release the pages again to reduce race window */
if (last_block_offset > first_block_offset)
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
Commit Message: ext4: fix races between page faults and hole punching
Currently, page faults and hole punching are completely unsynchronized.
This can result in page fault faulting in a page into a range that we
are punching after truncate_pagecache_range() has been called and thus
we can end up with a page mapped to disk blocks that will be shortly
freed. Filesystem corruption will shortly follow. Note that the same
race is avoided for truncate by checking page fault offset against
i_size but there isn't similar mechanism available for punching holes.
Fix the problem by creating new rw semaphore i_mmap_sem in inode and
grab it for writing over truncate, hole punching, and other functions
removing blocks from extent tree and for read over page faults. We
cannot easily use i_data_sem for this since that ranks below transaction
start and we need something ranking above it so that it can be held over
the whole truncate / hole punching operation. Also remove various
workarounds we had in the code to reduce race window when page fault
could have created pages with stale mapping information.
Signed-off-by: Jan Kara <jack@suse.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
CWE ID: CWE-362 | int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
int ret = 0;
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
trace_ext4_punch_hole(inode, offset, length, 0);
/*
* Write out all dirty pages to avoid race conditions
* Then release them.
*/
if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
ret = filemap_write_and_wait_range(mapping, offset,
offset + length - 1);
if (ret)
return ret;
}
mutex_lock(&inode->i_mutex);
/* No need to punch hole beyond i_size */
if (offset >= inode->i_size)
goto out_mutex;
/*
* If the hole extends beyond i_size, set the hole
* to end after the page that contains i_size
*/
if (offset + length > inode->i_size) {
length = inode->i_size +
PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
offset;
}
if (offset & (sb->s_blocksize - 1) ||
(offset + length) & (sb->s_blocksize - 1)) {
/*
* Attach jinode to inode for jbd2 if we do any zeroing of
* partial block
*/
ret = ext4_inode_attach_jinode(inode);
if (ret < 0)
goto out_mutex;
}
/* Wait all existing dio workers, newcomers will block on i_mutex */
ext4_inode_block_unlocked_dio(inode);
inode_dio_wait(inode);
/*
* Prevent page faults from reinstantiating pages we have released from
* page cache.
*/
down_write(&EXT4_I(inode)->i_mmap_sem);
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
/* Now release the pages and zero block aligned part of pages*/
if (last_block_offset > first_block_offset)
truncate_pagecache_range(inode, first_block_offset,
last_block_offset);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
credits = ext4_blocks_for_truncate(inode);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
ext4_std_error(sb, ret);
goto out_dio;
}
ret = ext4_zero_partial_blocks(handle, inode, offset,
length);
if (ret)
goto out_stop;
first_block = (offset + sb->s_blocksize - 1) >>
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
/* If there are no blocks to remove, return now */
if (first_block >= stop_block)
goto out_stop;
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode);
ret = ext4_es_remove_extent(inode, first_block,
stop_block - first_block);
if (ret) {
up_write(&EXT4_I(inode)->i_data_sem);
goto out_stop;
}
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
out_stop:
ext4_journal_stop(handle);
out_dio:
up_write(&EXT4_I(inode)->i_mmap_sem);
ext4_inode_resume_unlocked_dio(inode);
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
}
| 27,409 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void DevToolsSession::ReceivedBadMessage() {
MojoConnectionDestroyed();
if (process_) {
bad_message::ReceivedBadMessage(
process_, bad_message::RFH_INCONSISTENT_DEVTOOLS_MESSAGE);
}
}
Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <caseq@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Cr-Commit-Position: refs/heads/master@{#531157}
CWE ID: CWE-20 | void DevToolsSession::ReceivedBadMessage() {
MojoConnectionDestroyed();
RenderProcessHost* process = RenderProcessHost::FromID(process_host_id_);
if (process) {
bad_message::ReceivedBadMessage(
process, bad_message::RFH_INCONSISTENT_DEVTOOLS_MESSAGE);
}
}
| 13,564 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PageGroupLoadDeferrer::PageGroupLoadDeferrer(Page* page, bool deferSelf)
{
const HashSet<Page*>& pages = page->group().pages();
HashSet<Page*>::const_iterator end = pages.end();
for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
Page* otherPage = *it;
if ((deferSelf || otherPage != page)) {
if (!otherPage->defersLoading()) {
m_deferredFrames.append(otherPage->mainFrame());
for (Frame* frame = otherPage->mainFrame(); frame; frame = frame->tree()->traverseNext())
frame->document()->suspendScheduledTasks(ActiveDOMObject::WillDeferLoading);
}
}
}
size_t count = m_deferredFrames.size();
for (size_t i = 0; i < count; ++i)
if (Page* page = m_deferredFrames[i]->page())
page->setDefersLoading(true);
}
Commit Message: Don't wait to notify client of spoof attempt if a modal dialog is created.
BUG=281256
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/23620020
git-svn-id: svn://svn.chromium.org/blink/trunk@157196 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | PageGroupLoadDeferrer::PageGroupLoadDeferrer(Page* page, bool deferSelf)
{
const HashSet<Page*>& pages = page->group().pages();
HashSet<Page*>::const_iterator end = pages.end();
for (HashSet<Page*>::const_iterator it = pages.begin(); it != end; ++it) {
Page* otherPage = *it;
if ((deferSelf || otherPage != page)) {
if (!otherPage->defersLoading()) {
m_deferredFrames.append(otherPage->mainFrame());
// Ensure that we notify the client if the initial empty document is accessed before showing anything
// modal, to prevent spoofs while the modal window or sheet is visible.
otherPage->mainFrame()->loader()->notifyIfInitialDocumentAccessed();
for (Frame* frame = otherPage->mainFrame(); frame; frame = frame->tree()->traverseNext())
frame->document()->suspendScheduledTasks(ActiveDOMObject::WillDeferLoading);
}
}
}
size_t count = m_deferredFrames.size();
for (size_t i = 0; i < count; ++i)
if (Page* page = m_deferredFrames[i]->page())
page->setDefersLoading(true);
}
| 22,951 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void MemoryInstrumentation::GetVmRegionsForHeapProfiler(
RequestGlobalDumpCallback callback) {
const auto& coordinator = GetCoordinatorBindingForCurrentThread();
coordinator->GetVmRegionsForHeapProfiler(callback);
}
Commit Message: memory-infra: split up memory-infra coordinator service into two
This allows for heap profiler to use its own service with correct
capabilities and all other instances to use the existing coordinator
service.
Bug: 792028
Change-Id: I84e4ec71f5f1d00991c0516b1424ce7334bcd3cd
Reviewed-on: https://chromium-review.googlesource.com/836896
Commit-Queue: Lalit Maganti <lalitm@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: oysteine <oysteine@chromium.org>
Reviewed-by: Albert J. Wong <ajwong@chromium.org>
Reviewed-by: Hector Dearman <hjd@chromium.org>
Cr-Commit-Position: refs/heads/master@{#529059}
CWE ID: CWE-269 | void MemoryInstrumentation::GetVmRegionsForHeapProfiler(
| 6,000 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PassRefPtr<DrawingBuffer> DrawingBuffer::Create(
std::unique_ptr<WebGraphicsContext3DProvider> context_provider,
Client* client,
const IntSize& size,
bool premultiplied_alpha,
bool want_alpha_channel,
bool want_depth_buffer,
bool want_stencil_buffer,
bool want_antialiasing,
PreserveDrawingBuffer preserve,
WebGLVersion web_gl_version,
ChromiumImageUsage chromium_image_usage,
const CanvasColorParams& color_params) {
DCHECK(context_provider);
if (g_should_fail_drawing_buffer_creation_for_testing) {
g_should_fail_drawing_buffer_creation_for_testing = false;
return nullptr;
}
std::unique_ptr<Extensions3DUtil> extensions_util =
Extensions3DUtil::Create(context_provider->ContextGL());
if (!extensions_util->IsValid()) {
return nullptr;
}
DCHECK(extensions_util->SupportsExtension("GL_OES_packed_depth_stencil"));
extensions_util->EnsureExtensionEnabled("GL_OES_packed_depth_stencil");
bool multisample_supported =
want_antialiasing &&
(extensions_util->SupportsExtension(
"GL_CHROMIUM_framebuffer_multisample") ||
extensions_util->SupportsExtension(
"GL_EXT_multisampled_render_to_texture")) &&
extensions_util->SupportsExtension("GL_OES_rgb8_rgba8");
if (multisample_supported) {
extensions_util->EnsureExtensionEnabled("GL_OES_rgb8_rgba8");
if (extensions_util->SupportsExtension(
"GL_CHROMIUM_framebuffer_multisample"))
extensions_util->EnsureExtensionEnabled(
"GL_CHROMIUM_framebuffer_multisample");
else
extensions_util->EnsureExtensionEnabled(
"GL_EXT_multisampled_render_to_texture");
}
bool discard_framebuffer_supported =
extensions_util->SupportsExtension("GL_EXT_discard_framebuffer");
if (discard_framebuffer_supported)
extensions_util->EnsureExtensionEnabled("GL_EXT_discard_framebuffer");
RefPtr<DrawingBuffer> drawing_buffer = AdoptRef(new DrawingBuffer(
std::move(context_provider), std::move(extensions_util), client,
discard_framebuffer_supported, want_alpha_channel, premultiplied_alpha,
preserve, web_gl_version, want_depth_buffer, want_stencil_buffer,
chromium_image_usage, color_params));
if (!drawing_buffer->Initialize(size, multisample_supported)) {
drawing_buffer->BeginDestruction();
return PassRefPtr<DrawingBuffer>();
}
return drawing_buffer;
}
Commit Message: Reset ES3 pixel pack parameters and PIXEL_PACK_BUFFER binding in DrawingBuffer before ReadPixels() and recover them later.
BUG=740603
TEST=new conformance test
R=kbr@chromium.org,piman@chromium.org
Change-Id: I3ea54c6cc34f34e249f7c8b9f792d93c5e1958f4
Reviewed-on: https://chromium-review.googlesource.com/570840
Reviewed-by: Antoine Labour <piman@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Commit-Queue: Zhenyao Mo <zmo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#486518}
CWE ID: CWE-119 | PassRefPtr<DrawingBuffer> DrawingBuffer::Create(
std::unique_ptr<WebGraphicsContext3DProvider> context_provider,
Client* client,
const IntSize& size,
bool premultiplied_alpha,
bool want_alpha_channel,
bool want_depth_buffer,
bool want_stencil_buffer,
bool want_antialiasing,
PreserveDrawingBuffer preserve,
WebGLVersion webgl_version,
ChromiumImageUsage chromium_image_usage,
const CanvasColorParams& color_params) {
DCHECK(context_provider);
if (g_should_fail_drawing_buffer_creation_for_testing) {
g_should_fail_drawing_buffer_creation_for_testing = false;
return nullptr;
}
std::unique_ptr<Extensions3DUtil> extensions_util =
Extensions3DUtil::Create(context_provider->ContextGL());
if (!extensions_util->IsValid()) {
return nullptr;
}
DCHECK(extensions_util->SupportsExtension("GL_OES_packed_depth_stencil"));
extensions_util->EnsureExtensionEnabled("GL_OES_packed_depth_stencil");
bool multisample_supported =
want_antialiasing &&
(extensions_util->SupportsExtension(
"GL_CHROMIUM_framebuffer_multisample") ||
extensions_util->SupportsExtension(
"GL_EXT_multisampled_render_to_texture")) &&
extensions_util->SupportsExtension("GL_OES_rgb8_rgba8");
if (multisample_supported) {
extensions_util->EnsureExtensionEnabled("GL_OES_rgb8_rgba8");
if (extensions_util->SupportsExtension(
"GL_CHROMIUM_framebuffer_multisample"))
extensions_util->EnsureExtensionEnabled(
"GL_CHROMIUM_framebuffer_multisample");
else
extensions_util->EnsureExtensionEnabled(
"GL_EXT_multisampled_render_to_texture");
}
bool discard_framebuffer_supported =
extensions_util->SupportsExtension("GL_EXT_discard_framebuffer");
if (discard_framebuffer_supported)
extensions_util->EnsureExtensionEnabled("GL_EXT_discard_framebuffer");
RefPtr<DrawingBuffer> drawing_buffer = AdoptRef(new DrawingBuffer(
std::move(context_provider), std::move(extensions_util), client,
discard_framebuffer_supported, want_alpha_channel, premultiplied_alpha,
preserve, webgl_version, want_depth_buffer, want_stencil_buffer,
chromium_image_usage, color_params));
if (!drawing_buffer->Initialize(size, multisample_supported)) {
drawing_buffer->BeginDestruction();
return PassRefPtr<DrawingBuffer>();
}
return drawing_buffer;
}
| 10,991 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int h, plane, line_step, line_size, line;
uint8_t *data;
if (!frame->interlaced_frame ||
frame->top_field_first == s->dst_tff)
return ff_filter_frame(outlink, frame);
av_dlog(ctx,
"picture will move %s one line\n",
s->dst_tff ? "up" : "down");
h = frame->height;
for (plane = 0; plane < 4 && frame->data[plane]; plane++) {
line_step = frame->linesize[plane];
line_size = s->line_size[plane];
data = frame->data[plane];
if (s->dst_tff) {
/** Move every line up one line, working from
* the top to the bottom of the frame.
* The original top line is lost.
* The new last line is created as a copy of the
* penultimate line from that field. */
for (line = 0; line < h; line++) {
if (1 + line < frame->height) {
memcpy(data, data + line_step, line_size);
} else {
memcpy(data, data - line_step - line_step, line_size);
}
data += line_step;
}
} else {
/** Move every line down one line, working from
* the bottom to the top of the frame.
* The original bottom line is lost.
* The new first line is created as a copy of the
* second line from that field. */
data += (h - 1) * line_step;
for (line = h - 1; line >= 0 ; line--) {
if (line > 0) {
memcpy(data, data - line_step, line_size);
} else {
memcpy(data, data + line_step + line_step, line_size);
}
data -= line_step;
}
}
}
frame->top_field_first = s->dst_tff;
return ff_filter_frame(outlink, frame);
}
Commit Message: avfilter: fix plane validity checks
Fixes out of array accesses
Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
CWE ID: CWE-119 | static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FieldOrderContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int h, plane, line_step, line_size, line;
uint8_t *data;
if (!frame->interlaced_frame ||
frame->top_field_first == s->dst_tff)
return ff_filter_frame(outlink, frame);
av_dlog(ctx,
"picture will move %s one line\n",
s->dst_tff ? "up" : "down");
h = frame->height;
for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) {
line_step = frame->linesize[plane];
line_size = s->line_size[plane];
data = frame->data[plane];
if (s->dst_tff) {
/** Move every line up one line, working from
* the top to the bottom of the frame.
* The original top line is lost.
* The new last line is created as a copy of the
* penultimate line from that field. */
for (line = 0; line < h; line++) {
if (1 + line < frame->height) {
memcpy(data, data + line_step, line_size);
} else {
memcpy(data, data - line_step - line_step, line_size);
}
data += line_step;
}
} else {
/** Move every line down one line, working from
* the bottom to the top of the frame.
* The original bottom line is lost.
* The new first line is created as a copy of the
* second line from that field. */
data += (h - 1) * line_step;
for (line = h - 1; line >= 0 ; line--) {
if (line > 0) {
memcpy(data, data - line_step, line_size);
} else {
memcpy(data, data + line_step + line_step, line_size);
}
data -= line_step;
}
}
}
frame->top_field_first = s->dst_tff;
return ff_filter_frame(outlink, frame);
}
| 1,553 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: match_at(regex_t* reg, const UChar* str, const UChar* end,
#ifdef USE_MATCH_RANGE_MUST_BE_INSIDE_OF_SPECIFIED_RANGE
const UChar* right_range,
#endif
const UChar* sstart, UChar* sprev, OnigMatchArg* msa)
{
static UChar FinishCode[] = { OP_FINISH };
int i, n, num_mem, best_len, pop_level;
LengthType tlen, tlen2;
MemNumType mem;
RelAddrType addr;
UChar *s, *q, *sbegin;
int is_alloca;
char *alloc_base;
OnigStackType *stk_base, *stk, *stk_end;
OnigStackType *stkp; /* used as any purpose. */
OnigStackIndex si;
OnigStackIndex *repeat_stk;
OnigStackIndex *mem_start_stk, *mem_end_stk;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
int scv;
unsigned char* state_check_buff = msa->state_check_buff;
int num_comb_exp_check = reg->num_comb_exp_check;
#endif
UChar *p = reg->p;
OnigOptionType option = reg->options;
OnigEncoding encode = reg->enc;
OnigCaseFoldType case_fold_flag = reg->case_fold_flag;
pop_level = reg->stack_pop_level;
num_mem = reg->num_mem;
STACK_INIT(INIT_MATCH_STACK_SIZE);
UPDATE_FOR_STACK_REALLOC;
for (i = 1; i <= num_mem; i++) {
mem_start_stk[i] = mem_end_stk[i] = INVALID_STACK_INDEX;
}
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "match_at: str: %d, end: %d, start: %d, sprev: %d\n",
(int )str, (int )end, (int )sstart, (int )sprev);
fprintf(stderr, "size: %d, start offset: %d\n",
(int )(end - str), (int )(sstart - str));
#endif
STACK_PUSH_ENSURED(STK_ALT, FinishCode); /* bottom stack */
best_len = ONIG_MISMATCH;
s = (UChar* )sstart;
while (1) {
#ifdef ONIG_DEBUG_MATCH
{
UChar *q, *bp, buf[50];
int len;
fprintf(stderr, "%4d> \"", (int )(s - str));
bp = buf;
for (i = 0, q = s; i < 7 && q < end; i++) {
len = enclen(encode, q);
while (len-- > 0) *bp++ = *q++;
}
if (q < end) { xmemcpy(bp, "...\"", 4); bp += 4; }
else { xmemcpy(bp, "\"", 1); bp += 1; }
*bp = 0;
fputs((char* )buf, stderr);
for (i = 0; i < 20 - (bp - buf); i++) fputc(' ', stderr);
onig_print_compiled_byte_code(stderr, p, NULL, encode);
fprintf(stderr, "\n");
}
#endif
sbegin = s;
switch (*p++) {
case OP_END: MOP_IN(OP_END);
n = s - sstart;
if (n > best_len) {
OnigRegion* region;
#ifdef USE_FIND_LONGEST_SEARCH_ALL_OF_RANGE
if (IS_FIND_LONGEST(option)) {
if (n > msa->best_len) {
msa->best_len = n;
msa->best_s = (UChar* )sstart;
}
else
goto end_best_len;
}
#endif
best_len = n;
region = msa->region;
if (region) {
#ifdef USE_POSIX_API_REGION_OPTION
if (IS_POSIX_REGION(msa->options)) {
posix_regmatch_t* rmt = (posix_regmatch_t* )region;
rmt[0].rm_so = sstart - str;
rmt[0].rm_eo = s - str;
for (i = 1; i <= num_mem; i++) {
if (mem_end_stk[i] != INVALID_STACK_INDEX) {
if (BIT_STATUS_AT(reg->bt_mem_start, i))
rmt[i].rm_so = STACK_AT(mem_start_stk[i])->u.mem.pstr - str;
else
rmt[i].rm_so = (UChar* )((void* )(mem_start_stk[i])) - str;
rmt[i].rm_eo = (BIT_STATUS_AT(reg->bt_mem_end, i)
? STACK_AT(mem_end_stk[i])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[i])) - str;
}
else {
rmt[i].rm_so = rmt[i].rm_eo = ONIG_REGION_NOTPOS;
}
}
}
else {
#endif /* USE_POSIX_API_REGION_OPTION */
region->beg[0] = sstart - str;
region->end[0] = s - str;
for (i = 1; i <= num_mem; i++) {
if (mem_end_stk[i] != INVALID_STACK_INDEX) {
if (BIT_STATUS_AT(reg->bt_mem_start, i))
region->beg[i] = STACK_AT(mem_start_stk[i])->u.mem.pstr - str;
else
region->beg[i] = (UChar* )((void* )mem_start_stk[i]) - str;
region->end[i] = (BIT_STATUS_AT(reg->bt_mem_end, i)
? STACK_AT(mem_end_stk[i])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[i])) - str;
}
else {
region->beg[i] = region->end[i] = ONIG_REGION_NOTPOS;
}
}
#ifdef USE_CAPTURE_HISTORY
if (reg->capture_history != 0) {
int r;
OnigCaptureTreeNode* node;
if (IS_NULL(region->history_root)) {
region->history_root = node = history_node_new();
CHECK_NULL_RETURN_MEMERR(node);
}
else {
node = region->history_root;
history_tree_clear(node);
}
node->group = 0;
node->beg = sstart - str;
node->end = s - str;
stkp = stk_base;
r = make_capture_history_tree(region->history_root, &stkp,
stk, (UChar* )str, reg);
if (r < 0) {
best_len = r; /* error code */
goto finish;
}
}
#endif /* USE_CAPTURE_HISTORY */
#ifdef USE_POSIX_API_REGION_OPTION
} /* else IS_POSIX_REGION() */
#endif
} /* if (region) */
} /* n > best_len */
#ifdef USE_FIND_LONGEST_SEARCH_ALL_OF_RANGE
end_best_len:
#endif
MOP_OUT;
if (IS_FIND_CONDITION(option)) {
if (IS_FIND_NOT_EMPTY(option) && s == sstart) {
best_len = ONIG_MISMATCH;
goto fail; /* for retry */
}
if (IS_FIND_LONGEST(option) && DATA_ENSURE_CHECK1) {
goto fail; /* for retry */
}
}
/* default behavior: return first-matching result. */
goto finish;
break;
case OP_EXACT1: MOP_IN(OP_EXACT1);
#if 0
DATA_ENSURE(1);
if (*p != *s) goto fail;
p++; s++;
#endif
if (*p != *s++) goto fail;
DATA_ENSURE(0);
p++;
MOP_OUT;
break;
case OP_EXACT1_IC: MOP_IN(OP_EXACT1_IC);
{
int len;
UChar *q, lowbuf[ONIGENC_MBC_CASE_FOLD_MAXLEN];
DATA_ENSURE(1);
len = ONIGENC_MBC_CASE_FOLD(encode,
/* DISABLE_CASE_FOLD_MULTI_CHAR(case_fold_flag), */
case_fold_flag,
&s, end, lowbuf);
DATA_ENSURE(0);
q = lowbuf;
while (len-- > 0) {
if (*p != *q) {
goto fail;
}
p++; q++;
}
}
MOP_OUT;
break;
case OP_EXACT2: MOP_IN(OP_EXACT2);
DATA_ENSURE(2);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACT3: MOP_IN(OP_EXACT3);
DATA_ENSURE(3);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACT4: MOP_IN(OP_EXACT4);
DATA_ENSURE(4);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACT5: MOP_IN(OP_EXACT5);
DATA_ENSURE(5);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACTN: MOP_IN(OP_EXACTN);
GET_LENGTH_INC(tlen, p);
DATA_ENSURE(tlen);
while (tlen-- > 0) {
if (*p++ != *s++) goto fail;
}
sprev = s - 1;
MOP_OUT;
continue;
break;
case OP_EXACTN_IC: MOP_IN(OP_EXACTN_IC);
{
int len;
UChar *q, *endp, lowbuf[ONIGENC_MBC_CASE_FOLD_MAXLEN];
GET_LENGTH_INC(tlen, p);
endp = p + tlen;
while (p < endp) {
sprev = s;
DATA_ENSURE(1);
len = ONIGENC_MBC_CASE_FOLD(encode,
/* DISABLE_CASE_FOLD_MULTI_CHAR(case_fold_flag), */
case_fold_flag,
&s, end, lowbuf);
DATA_ENSURE(0);
q = lowbuf;
while (len-- > 0) {
if (*p != *q) goto fail;
p++; q++;
}
}
}
MOP_OUT;
continue;
break;
case OP_EXACTMB2N1: MOP_IN(OP_EXACTMB2N1);
DATA_ENSURE(2);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
MOP_OUT;
break;
case OP_EXACTMB2N2: MOP_IN(OP_EXACTMB2N2);
DATA_ENSURE(4);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
sprev = s;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACTMB2N3: MOP_IN(OP_EXACTMB2N3);
DATA_ENSURE(6);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
sprev = s;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACTMB2N: MOP_IN(OP_EXACTMB2N);
GET_LENGTH_INC(tlen, p);
DATA_ENSURE(tlen * 2);
while (tlen-- > 0) {
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
}
sprev = s - 2;
MOP_OUT;
continue;
break;
case OP_EXACTMB3N: MOP_IN(OP_EXACTMB3N);
GET_LENGTH_INC(tlen, p);
DATA_ENSURE(tlen * 3);
while (tlen-- > 0) {
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
}
sprev = s - 3;
MOP_OUT;
continue;
break;
case OP_EXACTMBN: MOP_IN(OP_EXACTMBN);
GET_LENGTH_INC(tlen, p); /* mb-len */
GET_LENGTH_INC(tlen2, p); /* string len */
tlen2 *= tlen;
DATA_ENSURE(tlen2);
while (tlen2-- > 0) {
if (*p != *s) goto fail;
p++; s++;
}
sprev = s - tlen;
MOP_OUT;
continue;
break;
case OP_CCLASS: MOP_IN(OP_CCLASS);
DATA_ENSURE(1);
if (BITSET_AT(((BitSetRef )p), *s) == 0) goto fail;
p += SIZE_BITSET;
s += enclen(encode, s); /* OP_CCLASS can match mb-code. \D, \S */
MOP_OUT;
break;
case OP_CCLASS_MB: MOP_IN(OP_CCLASS_MB);
if (! ONIGENC_IS_MBC_HEAD(encode, s)) goto fail;
cclass_mb:
GET_LENGTH_INC(tlen, p);
{
OnigCodePoint code;
UChar *ss;
int mb_len;
DATA_ENSURE(1);
mb_len = enclen(encode, s);
DATA_ENSURE(mb_len);
ss = s;
s += mb_len;
code = ONIGENC_MBC_TO_CODE(encode, ss, s);
#ifdef PLATFORM_UNALIGNED_WORD_ACCESS
if (! onig_is_in_code_range(p, code)) goto fail;
#else
q = p;
ALIGNMENT_RIGHT(q);
if (! onig_is_in_code_range(q, code)) goto fail;
#endif
}
p += tlen;
MOP_OUT;
break;
case OP_CCLASS_MIX: MOP_IN(OP_CCLASS_MIX);
DATA_ENSURE(1);
if (ONIGENC_IS_MBC_HEAD(encode, s)) {
p += SIZE_BITSET;
goto cclass_mb;
}
else {
if (BITSET_AT(((BitSetRef )p), *s) == 0)
goto fail;
p += SIZE_BITSET;
GET_LENGTH_INC(tlen, p);
p += tlen;
s++;
}
MOP_OUT;
break;
case OP_CCLASS_NOT: MOP_IN(OP_CCLASS_NOT);
DATA_ENSURE(1);
if (BITSET_AT(((BitSetRef )p), *s) != 0) goto fail;
p += SIZE_BITSET;
s += enclen(encode, s);
MOP_OUT;
break;
case OP_CCLASS_MB_NOT: MOP_IN(OP_CCLASS_MB_NOT);
DATA_ENSURE(1);
if (! ONIGENC_IS_MBC_HEAD(encode, s)) {
s++;
GET_LENGTH_INC(tlen, p);
p += tlen;
goto cc_mb_not_success;
}
cclass_mb_not:
GET_LENGTH_INC(tlen, p);
{
OnigCodePoint code;
UChar *ss;
int mb_len = enclen(encode, s);
if (! DATA_ENSURE_CHECK(mb_len)) {
DATA_ENSURE(1);
s = (UChar* )end;
p += tlen;
goto cc_mb_not_success;
}
ss = s;
s += mb_len;
code = ONIGENC_MBC_TO_CODE(encode, ss, s);
#ifdef PLATFORM_UNALIGNED_WORD_ACCESS
if (onig_is_in_code_range(p, code)) goto fail;
#else
q = p;
ALIGNMENT_RIGHT(q);
if (onig_is_in_code_range(q, code)) goto fail;
#endif
}
p += tlen;
cc_mb_not_success:
MOP_OUT;
break;
case OP_CCLASS_MIX_NOT: MOP_IN(OP_CCLASS_MIX_NOT);
DATA_ENSURE(1);
if (ONIGENC_IS_MBC_HEAD(encode, s)) {
p += SIZE_BITSET;
goto cclass_mb_not;
}
else {
if (BITSET_AT(((BitSetRef )p), *s) != 0)
goto fail;
p += SIZE_BITSET;
GET_LENGTH_INC(tlen, p);
p += tlen;
s++;
}
MOP_OUT;
break;
case OP_CCLASS_NODE: MOP_IN(OP_CCLASS_NODE);
{
OnigCodePoint code;
void *node;
int mb_len;
UChar *ss;
DATA_ENSURE(1);
GET_POINTER_INC(node, p);
mb_len = enclen(encode, s);
ss = s;
s += mb_len;
DATA_ENSURE(0);
code = ONIGENC_MBC_TO_CODE(encode, ss, s);
if (onig_is_code_in_cc_len(mb_len, code, node) == 0) goto fail;
}
MOP_OUT;
break;
case OP_ANYCHAR: MOP_IN(OP_ANYCHAR);
DATA_ENSURE(1);
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
s += n;
MOP_OUT;
break;
case OP_ANYCHAR_ML: MOP_IN(OP_ANYCHAR_ML);
DATA_ENSURE(1);
n = enclen(encode, s);
DATA_ENSURE(n);
s += n;
MOP_OUT;
break;
case OP_ANYCHAR_STAR: MOP_IN(OP_ANYCHAR_STAR);
while (DATA_ENSURE_CHECK1) {
STACK_PUSH_ALT(p, s, sprev);
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
sprev = s;
s += n;
}
MOP_OUT;
break;
case OP_ANYCHAR_ML_STAR: MOP_IN(OP_ANYCHAR_ML_STAR);
while (DATA_ENSURE_CHECK1) {
STACK_PUSH_ALT(p, s, sprev);
n = enclen(encode, s);
if (n > 1) {
DATA_ENSURE(n);
sprev = s;
s += n;
}
else {
sprev = s;
s++;
}
}
MOP_OUT;
break;
case OP_ANYCHAR_STAR_PEEK_NEXT: MOP_IN(OP_ANYCHAR_STAR_PEEK_NEXT);
while (DATA_ENSURE_CHECK1) {
if (*p == *s) {
STACK_PUSH_ALT(p + 1, s, sprev);
}
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
sprev = s;
s += n;
}
p++;
MOP_OUT;
break;
case OP_ANYCHAR_ML_STAR_PEEK_NEXT:MOP_IN(OP_ANYCHAR_ML_STAR_PEEK_NEXT);
while (DATA_ENSURE_CHECK1) {
if (*p == *s) {
STACK_PUSH_ALT(p + 1, s, sprev);
}
n = enclen(encode, s);
if (n > 1) {
DATA_ENSURE(n);
sprev = s;
s += n;
}
else {
sprev = s;
s++;
}
}
p++;
MOP_OUT;
break;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
case OP_STATE_CHECK_ANYCHAR_STAR: MOP_IN(OP_STATE_CHECK_ANYCHAR_STAR);
GET_STATE_CHECK_NUM_INC(mem, p);
while (DATA_ENSURE_CHECK1) {
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
STACK_PUSH_ALT_WITH_STATE_CHECK(p, s, sprev, mem);
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
sprev = s;
s += n;
}
MOP_OUT;
break;
case OP_STATE_CHECK_ANYCHAR_ML_STAR:
MOP_IN(OP_STATE_CHECK_ANYCHAR_ML_STAR);
GET_STATE_CHECK_NUM_INC(mem, p);
while (DATA_ENSURE_CHECK1) {
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
STACK_PUSH_ALT_WITH_STATE_CHECK(p, s, sprev, mem);
n = enclen(encode, s);
if (n > 1) {
DATA_ENSURE(n);
sprev = s;
s += n;
}
else {
sprev = s;
s++;
}
}
MOP_OUT;
break;
#endif /* USE_COMBINATION_EXPLOSION_CHECK */
case OP_WORD: MOP_IN(OP_WORD);
DATA_ENSURE(1);
if (! ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
s += enclen(encode, s);
MOP_OUT;
break;
case OP_NOT_WORD: MOP_IN(OP_NOT_WORD);
DATA_ENSURE(1);
if (ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
s += enclen(encode, s);
MOP_OUT;
break;
case OP_WORD_BOUND: MOP_IN(OP_WORD_BOUND);
if (ON_STR_BEGIN(s)) {
DATA_ENSURE(1);
if (! ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
}
else if (ON_STR_END(s)) {
if (! ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
else {
if (ONIGENC_IS_MBC_WORD(encode, s, end)
== ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
MOP_OUT;
continue;
break;
case OP_NOT_WORD_BOUND: MOP_IN(OP_NOT_WORD_BOUND);
if (ON_STR_BEGIN(s)) {
if (DATA_ENSURE_CHECK1 && ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
}
else if (ON_STR_END(s)) {
if (ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
else {
if (ONIGENC_IS_MBC_WORD(encode, s, end)
!= ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
MOP_OUT;
continue;
break;
#ifdef USE_WORD_BEGIN_END
case OP_WORD_BEGIN: MOP_IN(OP_WORD_BEGIN);
if (DATA_ENSURE_CHECK1 && ONIGENC_IS_MBC_WORD(encode, s, end)) {
if (ON_STR_BEGIN(s) || !ONIGENC_IS_MBC_WORD(encode, sprev, end)) {
MOP_OUT;
continue;
}
}
goto fail;
break;
case OP_WORD_END: MOP_IN(OP_WORD_END);
if (!ON_STR_BEGIN(s) && ONIGENC_IS_MBC_WORD(encode, sprev, end)) {
if (ON_STR_END(s) || !ONIGENC_IS_MBC_WORD(encode, s, end)) {
MOP_OUT;
continue;
}
}
goto fail;
break;
#endif
case OP_BEGIN_BUF: MOP_IN(OP_BEGIN_BUF);
if (! ON_STR_BEGIN(s)) goto fail;
MOP_OUT;
continue;
break;
case OP_END_BUF: MOP_IN(OP_END_BUF);
if (! ON_STR_END(s)) goto fail;
MOP_OUT;
continue;
break;
case OP_BEGIN_LINE: MOP_IN(OP_BEGIN_LINE);
if (ON_STR_BEGIN(s)) {
if (IS_NOTBOL(msa->options)) goto fail;
MOP_OUT;
continue;
}
else if (ONIGENC_IS_MBC_NEWLINE(encode, sprev, end) && !ON_STR_END(s)) {
MOP_OUT;
continue;
}
goto fail;
break;
case OP_END_LINE: MOP_IN(OP_END_LINE);
if (ON_STR_END(s)) {
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
if (IS_EMPTY_STR || !ONIGENC_IS_MBC_NEWLINE(encode, sprev, end)) {
#endif
if (IS_NOTEOL(msa->options)) goto fail;
MOP_OUT;
continue;
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
}
#endif
}
else if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) {
MOP_OUT;
continue;
}
#ifdef USE_CRNL_AS_LINE_TERMINATOR
else if (ONIGENC_IS_MBC_CRNL(encode, s, end)) {
MOP_OUT;
continue;
}
#endif
goto fail;
break;
case OP_SEMI_END_BUF: MOP_IN(OP_SEMI_END_BUF);
if (ON_STR_END(s)) {
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
if (IS_EMPTY_STR || !ONIGENC_IS_MBC_NEWLINE(encode, sprev, end)) {
#endif
if (IS_NOTEOL(msa->options)) goto fail;
MOP_OUT;
continue;
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
}
#endif
}
else if (ONIGENC_IS_MBC_NEWLINE(encode, s, end) &&
ON_STR_END(s + enclen(encode, s))) {
MOP_OUT;
continue;
}
#ifdef USE_CRNL_AS_LINE_TERMINATOR
else if (ONIGENC_IS_MBC_CRNL(encode, s, end)) {
UChar* ss = s + enclen(encode, s);
ss += enclen(encode, ss);
if (ON_STR_END(ss)) {
MOP_OUT;
continue;
}
}
#endif
goto fail;
break;
case OP_BEGIN_POSITION: MOP_IN(OP_BEGIN_POSITION);
if (s != msa->start)
goto fail;
MOP_OUT;
continue;
break;
case OP_MEMORY_START_PUSH: MOP_IN(OP_MEMORY_START_PUSH);
GET_MEMNUM_INC(mem, p);
STACK_PUSH_MEM_START(mem, s);
MOP_OUT;
continue;
break;
case OP_MEMORY_START: MOP_IN(OP_MEMORY_START);
GET_MEMNUM_INC(mem, p);
mem_start_stk[mem] = (OnigStackIndex )((void* )s);
MOP_OUT;
continue;
break;
case OP_MEMORY_END_PUSH: MOP_IN(OP_MEMORY_END_PUSH);
GET_MEMNUM_INC(mem, p);
STACK_PUSH_MEM_END(mem, s);
MOP_OUT;
continue;
break;
case OP_MEMORY_END: MOP_IN(OP_MEMORY_END);
GET_MEMNUM_INC(mem, p);
mem_end_stk[mem] = (OnigStackIndex )((void* )s);
MOP_OUT;
continue;
break;
#ifdef USE_SUBEXP_CALL
case OP_MEMORY_END_PUSH_REC: MOP_IN(OP_MEMORY_END_PUSH_REC);
GET_MEMNUM_INC(mem, p);
STACK_GET_MEM_START(mem, stkp); /* should be before push mem-end. */
STACK_PUSH_MEM_END(mem, s);
mem_start_stk[mem] = GET_STACK_INDEX(stkp);
MOP_OUT;
continue;
break;
case OP_MEMORY_END_REC: MOP_IN(OP_MEMORY_END_REC);
GET_MEMNUM_INC(mem, p);
mem_end_stk[mem] = (OnigStackIndex )((void* )s);
STACK_GET_MEM_START(mem, stkp);
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
mem_start_stk[mem] = GET_STACK_INDEX(stkp);
else
mem_start_stk[mem] = (OnigStackIndex )((void* )stkp->u.mem.pstr);
STACK_PUSH_MEM_END_MARK(mem);
MOP_OUT;
continue;
break;
#endif
case OP_BACKREF1: MOP_IN(OP_BACKREF1);
mem = 1;
goto backref;
break;
case OP_BACKREF2: MOP_IN(OP_BACKREF2);
mem = 2;
goto backref;
break;
case OP_BACKREFN: MOP_IN(OP_BACKREFN);
GET_MEMNUM_INC(mem, p);
backref:
{
int len;
UChar *pstart, *pend;
/* if you want to remove following line,
you should check in parse and compile time. */
if (mem > num_mem) goto fail;
if (mem_end_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
STRING_CMP(pstart, s, n);
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
MOP_OUT;
continue;
}
break;
case OP_BACKREFN_IC: MOP_IN(OP_BACKREFN_IC);
GET_MEMNUM_INC(mem, p);
{
int len;
UChar *pstart, *pend;
/* if you want to remove following line,
you should check in parse and compile time. */
if (mem > num_mem) goto fail;
if (mem_end_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
STRING_CMP_IC(case_fold_flag, pstart, &s, n);
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
MOP_OUT;
continue;
}
break;
case OP_BACKREF_MULTI: MOP_IN(OP_BACKREF_MULTI);
{
int len, is_fail;
UChar *pstart, *pend, *swork;
GET_LENGTH_INC(tlen, p);
for (i = 0; i < tlen; i++) {
GET_MEMNUM_INC(mem, p);
if (mem_end_stk[mem] == INVALID_STACK_INDEX) continue;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) continue;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
swork = s;
STRING_CMP_VALUE(pstart, swork, n, is_fail);
if (is_fail) continue;
s = swork;
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
p += (SIZE_MEMNUM * (tlen - i - 1));
break; /* success */
}
if (i == tlen) goto fail;
MOP_OUT;
continue;
}
break;
case OP_BACKREF_MULTI_IC: MOP_IN(OP_BACKREF_MULTI_IC);
{
int len, is_fail;
UChar *pstart, *pend, *swork;
GET_LENGTH_INC(tlen, p);
for (i = 0; i < tlen; i++) {
GET_MEMNUM_INC(mem, p);
if (mem_end_stk[mem] == INVALID_STACK_INDEX) continue;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) continue;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
swork = s;
STRING_CMP_VALUE_IC(case_fold_flag, pstart, &swork, n, is_fail);
if (is_fail) continue;
s = swork;
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
p += (SIZE_MEMNUM * (tlen - i - 1));
break; /* success */
}
if (i == tlen) goto fail;
MOP_OUT;
continue;
}
break;
#ifdef USE_BACKREF_WITH_LEVEL
case OP_BACKREF_WITH_LEVEL:
{
int len;
OnigOptionType ic;
LengthType level;
GET_OPTION_INC(ic, p);
GET_LENGTH_INC(level, p);
GET_LENGTH_INC(tlen, p);
sprev = s;
if (backref_match_at_nested_level(reg, stk, stk_base, ic
, case_fold_flag, (int )level, (int )tlen, p, &s, end)) {
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
p += (SIZE_MEMNUM * tlen);
}
else
goto fail;
MOP_OUT;
continue;
}
break;
#endif
#if 0 /* no need: IS_DYNAMIC_OPTION() == 0 */
case OP_SET_OPTION_PUSH: MOP_IN(OP_SET_OPTION_PUSH);
GET_OPTION_INC(option, p);
STACK_PUSH_ALT(p, s, sprev);
p += SIZE_OP_SET_OPTION + SIZE_OP_FAIL;
MOP_OUT;
continue;
break;
case OP_SET_OPTION: MOP_IN(OP_SET_OPTION);
GET_OPTION_INC(option, p);
MOP_OUT;
continue;
break;
#endif
case OP_NULL_CHECK_START: MOP_IN(OP_NULL_CHECK_START);
GET_MEMNUM_INC(mem, p); /* mem: null check id */
STACK_PUSH_NULL_CHECK_START(mem, s);
MOP_OUT;
continue;
break;
case OP_NULL_CHECK_END: MOP_IN(OP_NULL_CHECK_END);
{
int isnull;
GET_MEMNUM_INC(mem, p); /* mem: null check id */
STACK_NULL_CHECK(isnull, mem, s);
if (isnull) {
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "NULL_CHECK_END: skip id:%d, s:%d\n",
(int )mem, (int )s);
#endif
null_check_found:
/* empty loop founded, skip next instruction */
switch (*p++) {
case OP_JUMP:
case OP_PUSH:
p += SIZE_RELADDR;
break;
case OP_REPEAT_INC:
case OP_REPEAT_INC_NG:
case OP_REPEAT_INC_SG:
case OP_REPEAT_INC_NG_SG:
p += SIZE_MEMNUM;
break;
default:
goto unexpected_bytecode_error;
break;
}
}
}
MOP_OUT;
continue;
break;
#ifdef USE_MONOMANIAC_CHECK_CAPTURES_IN_ENDLESS_REPEAT
case OP_NULL_CHECK_END_MEMST: MOP_IN(OP_NULL_CHECK_END_MEMST);
{
int isnull;
GET_MEMNUM_INC(mem, p); /* mem: null check id */
STACK_NULL_CHECK_MEMST(isnull, mem, s, reg);
if (isnull) {
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "NULL_CHECK_END_MEMST: skip id:%d, s:%d\n",
(int )mem, (int )s);
#endif
if (isnull == -1) goto fail;
goto null_check_found;
}
}
MOP_OUT;
continue;
break;
#endif
#ifdef USE_SUBEXP_CALL
case OP_NULL_CHECK_END_MEMST_PUSH:
MOP_IN(OP_NULL_CHECK_END_MEMST_PUSH);
{
int isnull;
GET_MEMNUM_INC(mem, p); /* mem: null check id */
#ifdef USE_MONOMANIAC_CHECK_CAPTURES_IN_ENDLESS_REPEAT
STACK_NULL_CHECK_MEMST_REC(isnull, mem, s, reg);
#else
STACK_NULL_CHECK_REC(isnull, mem, s);
#endif
if (isnull) {
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "NULL_CHECK_END_MEMST_PUSH: skip id:%d, s:%d\n",
(int )mem, (int )s);
#endif
if (isnull == -1) goto fail;
goto null_check_found;
}
else {
STACK_PUSH_NULL_CHECK_END(mem);
}
}
MOP_OUT;
continue;
break;
#endif
case OP_JUMP: MOP_IN(OP_JUMP);
GET_RELADDR_INC(addr, p);
p += addr;
MOP_OUT;
CHECK_INTERRUPT_IN_MATCH_AT;
continue;
break;
case OP_PUSH: MOP_IN(OP_PUSH);
GET_RELADDR_INC(addr, p);
STACK_PUSH_ALT(p + addr, s, sprev);
MOP_OUT;
continue;
break;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
case OP_STATE_CHECK_PUSH: MOP_IN(OP_STATE_CHECK_PUSH);
GET_STATE_CHECK_NUM_INC(mem, p);
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
GET_RELADDR_INC(addr, p);
STACK_PUSH_ALT_WITH_STATE_CHECK(p + addr, s, sprev, mem);
MOP_OUT;
continue;
break;
case OP_STATE_CHECK_PUSH_OR_JUMP: MOP_IN(OP_STATE_CHECK_PUSH_OR_JUMP);
GET_STATE_CHECK_NUM_INC(mem, p);
GET_RELADDR_INC(addr, p);
STATE_CHECK_VAL(scv, mem);
if (scv) {
p += addr;
}
else {
STACK_PUSH_ALT_WITH_STATE_CHECK(p + addr, s, sprev, mem);
}
MOP_OUT;
continue;
break;
case OP_STATE_CHECK: MOP_IN(OP_STATE_CHECK);
GET_STATE_CHECK_NUM_INC(mem, p);
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
STACK_PUSH_STATE_CHECK(s, mem);
MOP_OUT;
continue;
break;
#endif /* USE_COMBINATION_EXPLOSION_CHECK */
case OP_POP: MOP_IN(OP_POP);
STACK_POP_ONE;
MOP_OUT;
continue;
break;
case OP_PUSH_OR_JUMP_EXACT1: MOP_IN(OP_PUSH_OR_JUMP_EXACT1);
GET_RELADDR_INC(addr, p);
if (*p == *s && DATA_ENSURE_CHECK1) {
p++;
STACK_PUSH_ALT(p + addr, s, sprev);
MOP_OUT;
continue;
}
p += (addr + 1);
MOP_OUT;
continue;
break;
case OP_PUSH_IF_PEEK_NEXT: MOP_IN(OP_PUSH_IF_PEEK_NEXT);
GET_RELADDR_INC(addr, p);
if (*p == *s) {
p++;
STACK_PUSH_ALT(p + addr, s, sprev);
MOP_OUT;
continue;
}
p++;
MOP_OUT;
continue;
break;
case OP_REPEAT: MOP_IN(OP_REPEAT);
{
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
GET_RELADDR_INC(addr, p);
STACK_ENSURE(1);
repeat_stk[mem] = GET_STACK_INDEX(stk);
STACK_PUSH_REPEAT(mem, p);
if (reg->repeat_range[mem].lower == 0) {
STACK_PUSH_ALT(p + addr, s, sprev);
}
}
MOP_OUT;
continue;
break;
case OP_REPEAT_NG: MOP_IN(OP_REPEAT_NG);
{
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
GET_RELADDR_INC(addr, p);
STACK_ENSURE(1);
repeat_stk[mem] = GET_STACK_INDEX(stk);
STACK_PUSH_REPEAT(mem, p);
if (reg->repeat_range[mem].lower == 0) {
STACK_PUSH_ALT(p, s, sprev);
p += addr;
}
}
MOP_OUT;
continue;
break;
case OP_REPEAT_INC: MOP_IN(OP_REPEAT_INC);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
si = repeat_stk[mem];
stkp = STACK_AT(si);
repeat_inc:
stkp->u.repeat.count++;
if (stkp->u.repeat.count >= reg->repeat_range[mem].upper) {
/* end of repeat. Nothing to do. */
}
else if (stkp->u.repeat.count >= reg->repeat_range[mem].lower) {
STACK_PUSH_ALT(p, s, sprev);
p = STACK_AT(si)->u.repeat.pcode; /* Don't use stkp after PUSH. */
}
else {
p = stkp->u.repeat.pcode;
}
STACK_PUSH_REPEAT_INC(si);
MOP_OUT;
CHECK_INTERRUPT_IN_MATCH_AT;
continue;
break;
case OP_REPEAT_INC_SG: MOP_IN(OP_REPEAT_INC_SG);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
STACK_GET_REPEAT(mem, stkp);
si = GET_STACK_INDEX(stkp);
goto repeat_inc;
break;
case OP_REPEAT_INC_NG: MOP_IN(OP_REPEAT_INC_NG);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
si = repeat_stk[mem];
stkp = STACK_AT(si);
repeat_inc_ng:
stkp->u.repeat.count++;
if (stkp->u.repeat.count < reg->repeat_range[mem].upper) {
if (stkp->u.repeat.count >= reg->repeat_range[mem].lower) {
UChar* pcode = stkp->u.repeat.pcode;
STACK_PUSH_REPEAT_INC(si);
STACK_PUSH_ALT(pcode, s, sprev);
}
else {
p = stkp->u.repeat.pcode;
STACK_PUSH_REPEAT_INC(si);
}
}
else if (stkp->u.repeat.count == reg->repeat_range[mem].upper) {
STACK_PUSH_REPEAT_INC(si);
}
MOP_OUT;
CHECK_INTERRUPT_IN_MATCH_AT;
continue;
break;
case OP_REPEAT_INC_NG_SG: MOP_IN(OP_REPEAT_INC_NG_SG);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
STACK_GET_REPEAT(mem, stkp);
si = GET_STACK_INDEX(stkp);
goto repeat_inc_ng;
break;
case OP_PUSH_POS: MOP_IN(OP_PUSH_POS);
STACK_PUSH_POS(s, sprev);
MOP_OUT;
continue;
break;
case OP_POP_POS: MOP_IN(OP_POP_POS);
{
STACK_POS_END(stkp);
s = stkp->u.state.pstr;
sprev = stkp->u.state.pstr_prev;
}
MOP_OUT;
continue;
break;
case OP_PUSH_POS_NOT: MOP_IN(OP_PUSH_POS_NOT);
GET_RELADDR_INC(addr, p);
STACK_PUSH_POS_NOT(p + addr, s, sprev);
MOP_OUT;
continue;
break;
case OP_FAIL_POS: MOP_IN(OP_FAIL_POS);
STACK_POP_TIL_POS_NOT;
goto fail;
break;
case OP_PUSH_STOP_BT: MOP_IN(OP_PUSH_STOP_BT);
STACK_PUSH_STOP_BT;
MOP_OUT;
continue;
break;
case OP_POP_STOP_BT: MOP_IN(OP_POP_STOP_BT);
STACK_STOP_BT_END;
MOP_OUT;
continue;
break;
case OP_LOOK_BEHIND: MOP_IN(OP_LOOK_BEHIND);
GET_LENGTH_INC(tlen, p);
s = (UChar* )ONIGENC_STEP_BACK(encode, str, s, (int )tlen);
if (IS_NULL(s)) goto fail;
sprev = (UChar* )onigenc_get_prev_char_head(encode, str, s);
MOP_OUT;
continue;
break;
case OP_PUSH_LOOK_BEHIND_NOT: MOP_IN(OP_PUSH_LOOK_BEHIND_NOT);
GET_RELADDR_INC(addr, p);
GET_LENGTH_INC(tlen, p);
q = (UChar* )ONIGENC_STEP_BACK(encode, str, s, (int )tlen);
if (IS_NULL(q)) {
/* too short case -> success. ex. /(?<!XXX)a/.match("a")
If you want to change to fail, replace following line. */
p += addr;
/* goto fail; */
}
else {
STACK_PUSH_LOOK_BEHIND_NOT(p + addr, s, sprev);
s = q;
sprev = (UChar* )onigenc_get_prev_char_head(encode, str, s);
}
MOP_OUT;
continue;
break;
case OP_FAIL_LOOK_BEHIND_NOT: MOP_IN(OP_FAIL_LOOK_BEHIND_NOT);
STACK_POP_TIL_LOOK_BEHIND_NOT;
goto fail;
break;
#ifdef USE_SUBEXP_CALL
case OP_CALL: MOP_IN(OP_CALL);
GET_ABSADDR_INC(addr, p);
STACK_PUSH_CALL_FRAME(p);
p = reg->p + addr;
MOP_OUT;
continue;
break;
case OP_RETURN: MOP_IN(OP_RETURN);
STACK_RETURN(p);
STACK_PUSH_RETURN;
MOP_OUT;
continue;
break;
#endif
case OP_FINISH:
goto finish;
break;
fail:
MOP_OUT;
/* fall */
case OP_FAIL: MOP_IN(OP_FAIL);
STACK_POP;
p = stk->u.state.pcode;
s = stk->u.state.pstr;
sprev = stk->u.state.pstr_prev;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
if (stk->u.state.state_check != 0) {
stk->type = STK_STATE_CHECK_MARK;
stk++;
}
#endif
MOP_OUT;
continue;
break;
default:
goto bytecode_error;
} /* end of switch */
sprev = sbegin;
} /* end of while(1) */
finish:
STACK_SAVE;
return best_len;
#ifdef ONIG_DEBUG
stack_error:
STACK_SAVE;
return ONIGERR_STACK_BUG;
#endif
bytecode_error:
STACK_SAVE;
return ONIGERR_UNDEFINED_BYTECODE;
unexpected_bytecode_error:
STACK_SAVE;
return ONIGERR_UNEXPECTED_BYTECODE;
}
Commit Message: fix #57 : DATA_ENSURE() check must be before data access
CWE ID: CWE-125 | match_at(regex_t* reg, const UChar* str, const UChar* end,
#ifdef USE_MATCH_RANGE_MUST_BE_INSIDE_OF_SPECIFIED_RANGE
const UChar* right_range,
#endif
const UChar* sstart, UChar* sprev, OnigMatchArg* msa)
{
static UChar FinishCode[] = { OP_FINISH };
int i, n, num_mem, best_len, pop_level;
LengthType tlen, tlen2;
MemNumType mem;
RelAddrType addr;
UChar *s, *q, *sbegin;
int is_alloca;
char *alloc_base;
OnigStackType *stk_base, *stk, *stk_end;
OnigStackType *stkp; /* used as any purpose. */
OnigStackIndex si;
OnigStackIndex *repeat_stk;
OnigStackIndex *mem_start_stk, *mem_end_stk;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
int scv;
unsigned char* state_check_buff = msa->state_check_buff;
int num_comb_exp_check = reg->num_comb_exp_check;
#endif
UChar *p = reg->p;
OnigOptionType option = reg->options;
OnigEncoding encode = reg->enc;
OnigCaseFoldType case_fold_flag = reg->case_fold_flag;
pop_level = reg->stack_pop_level;
num_mem = reg->num_mem;
STACK_INIT(INIT_MATCH_STACK_SIZE);
UPDATE_FOR_STACK_REALLOC;
for (i = 1; i <= num_mem; i++) {
mem_start_stk[i] = mem_end_stk[i] = INVALID_STACK_INDEX;
}
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "match_at: str: %d, end: %d, start: %d, sprev: %d\n",
(int )str, (int )end, (int )sstart, (int )sprev);
fprintf(stderr, "size: %d, start offset: %d\n",
(int )(end - str), (int )(sstart - str));
#endif
STACK_PUSH_ENSURED(STK_ALT, FinishCode); /* bottom stack */
best_len = ONIG_MISMATCH;
s = (UChar* )sstart;
while (1) {
#ifdef ONIG_DEBUG_MATCH
{
UChar *q, *bp, buf[50];
int len;
fprintf(stderr, "%4d> \"", (int )(s - str));
bp = buf;
for (i = 0, q = s; i < 7 && q < end; i++) {
len = enclen(encode, q);
while (len-- > 0) *bp++ = *q++;
}
if (q < end) { xmemcpy(bp, "...\"", 4); bp += 4; }
else { xmemcpy(bp, "\"", 1); bp += 1; }
*bp = 0;
fputs((char* )buf, stderr);
for (i = 0; i < 20 - (bp - buf); i++) fputc(' ', stderr);
onig_print_compiled_byte_code(stderr, p, NULL, encode);
fprintf(stderr, "\n");
}
#endif
sbegin = s;
switch (*p++) {
case OP_END: MOP_IN(OP_END);
n = s - sstart;
if (n > best_len) {
OnigRegion* region;
#ifdef USE_FIND_LONGEST_SEARCH_ALL_OF_RANGE
if (IS_FIND_LONGEST(option)) {
if (n > msa->best_len) {
msa->best_len = n;
msa->best_s = (UChar* )sstart;
}
else
goto end_best_len;
}
#endif
best_len = n;
region = msa->region;
if (region) {
#ifdef USE_POSIX_API_REGION_OPTION
if (IS_POSIX_REGION(msa->options)) {
posix_regmatch_t* rmt = (posix_regmatch_t* )region;
rmt[0].rm_so = sstart - str;
rmt[0].rm_eo = s - str;
for (i = 1; i <= num_mem; i++) {
if (mem_end_stk[i] != INVALID_STACK_INDEX) {
if (BIT_STATUS_AT(reg->bt_mem_start, i))
rmt[i].rm_so = STACK_AT(mem_start_stk[i])->u.mem.pstr - str;
else
rmt[i].rm_so = (UChar* )((void* )(mem_start_stk[i])) - str;
rmt[i].rm_eo = (BIT_STATUS_AT(reg->bt_mem_end, i)
? STACK_AT(mem_end_stk[i])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[i])) - str;
}
else {
rmt[i].rm_so = rmt[i].rm_eo = ONIG_REGION_NOTPOS;
}
}
}
else {
#endif /* USE_POSIX_API_REGION_OPTION */
region->beg[0] = sstart - str;
region->end[0] = s - str;
for (i = 1; i <= num_mem; i++) {
if (mem_end_stk[i] != INVALID_STACK_INDEX) {
if (BIT_STATUS_AT(reg->bt_mem_start, i))
region->beg[i] = STACK_AT(mem_start_stk[i])->u.mem.pstr - str;
else
region->beg[i] = (UChar* )((void* )mem_start_stk[i]) - str;
region->end[i] = (BIT_STATUS_AT(reg->bt_mem_end, i)
? STACK_AT(mem_end_stk[i])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[i])) - str;
}
else {
region->beg[i] = region->end[i] = ONIG_REGION_NOTPOS;
}
}
#ifdef USE_CAPTURE_HISTORY
if (reg->capture_history != 0) {
int r;
OnigCaptureTreeNode* node;
if (IS_NULL(region->history_root)) {
region->history_root = node = history_node_new();
CHECK_NULL_RETURN_MEMERR(node);
}
else {
node = region->history_root;
history_tree_clear(node);
}
node->group = 0;
node->beg = sstart - str;
node->end = s - str;
stkp = stk_base;
r = make_capture_history_tree(region->history_root, &stkp,
stk, (UChar* )str, reg);
if (r < 0) {
best_len = r; /* error code */
goto finish;
}
}
#endif /* USE_CAPTURE_HISTORY */
#ifdef USE_POSIX_API_REGION_OPTION
} /* else IS_POSIX_REGION() */
#endif
} /* if (region) */
} /* n > best_len */
#ifdef USE_FIND_LONGEST_SEARCH_ALL_OF_RANGE
end_best_len:
#endif
MOP_OUT;
if (IS_FIND_CONDITION(option)) {
if (IS_FIND_NOT_EMPTY(option) && s == sstart) {
best_len = ONIG_MISMATCH;
goto fail; /* for retry */
}
if (IS_FIND_LONGEST(option) && DATA_ENSURE_CHECK1) {
goto fail; /* for retry */
}
}
/* default behavior: return first-matching result. */
goto finish;
break;
case OP_EXACT1: MOP_IN(OP_EXACT1);
DATA_ENSURE(1);
if (*p != *s) goto fail;
p++; s++;
MOP_OUT;
break;
case OP_EXACT1_IC: MOP_IN(OP_EXACT1_IC);
{
int len;
UChar *q, lowbuf[ONIGENC_MBC_CASE_FOLD_MAXLEN];
DATA_ENSURE(1);
len = ONIGENC_MBC_CASE_FOLD(encode,
/* DISABLE_CASE_FOLD_MULTI_CHAR(case_fold_flag), */
case_fold_flag,
&s, end, lowbuf);
DATA_ENSURE(0);
q = lowbuf;
while (len-- > 0) {
if (*p != *q) {
goto fail;
}
p++; q++;
}
}
MOP_OUT;
break;
case OP_EXACT2: MOP_IN(OP_EXACT2);
DATA_ENSURE(2);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACT3: MOP_IN(OP_EXACT3);
DATA_ENSURE(3);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACT4: MOP_IN(OP_EXACT4);
DATA_ENSURE(4);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACT5: MOP_IN(OP_EXACT5);
DATA_ENSURE(5);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
sprev = s;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACTN: MOP_IN(OP_EXACTN);
GET_LENGTH_INC(tlen, p);
DATA_ENSURE(tlen);
while (tlen-- > 0) {
if (*p++ != *s++) goto fail;
}
sprev = s - 1;
MOP_OUT;
continue;
break;
case OP_EXACTN_IC: MOP_IN(OP_EXACTN_IC);
{
int len;
UChar *q, *endp, lowbuf[ONIGENC_MBC_CASE_FOLD_MAXLEN];
GET_LENGTH_INC(tlen, p);
endp = p + tlen;
while (p < endp) {
sprev = s;
DATA_ENSURE(1);
len = ONIGENC_MBC_CASE_FOLD(encode,
/* DISABLE_CASE_FOLD_MULTI_CHAR(case_fold_flag), */
case_fold_flag,
&s, end, lowbuf);
DATA_ENSURE(0);
q = lowbuf;
while (len-- > 0) {
if (*p != *q) goto fail;
p++; q++;
}
}
}
MOP_OUT;
continue;
break;
case OP_EXACTMB2N1: MOP_IN(OP_EXACTMB2N1);
DATA_ENSURE(2);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
MOP_OUT;
break;
case OP_EXACTMB2N2: MOP_IN(OP_EXACTMB2N2);
DATA_ENSURE(4);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
sprev = s;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACTMB2N3: MOP_IN(OP_EXACTMB2N3);
DATA_ENSURE(6);
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
sprev = s;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
MOP_OUT;
continue;
break;
case OP_EXACTMB2N: MOP_IN(OP_EXACTMB2N);
GET_LENGTH_INC(tlen, p);
DATA_ENSURE(tlen * 2);
while (tlen-- > 0) {
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
}
sprev = s - 2;
MOP_OUT;
continue;
break;
case OP_EXACTMB3N: MOP_IN(OP_EXACTMB3N);
GET_LENGTH_INC(tlen, p);
DATA_ENSURE(tlen * 3);
while (tlen-- > 0) {
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
if (*p != *s) goto fail;
p++; s++;
}
sprev = s - 3;
MOP_OUT;
continue;
break;
case OP_EXACTMBN: MOP_IN(OP_EXACTMBN);
GET_LENGTH_INC(tlen, p); /* mb-len */
GET_LENGTH_INC(tlen2, p); /* string len */
tlen2 *= tlen;
DATA_ENSURE(tlen2);
while (tlen2-- > 0) {
if (*p != *s) goto fail;
p++; s++;
}
sprev = s - tlen;
MOP_OUT;
continue;
break;
case OP_CCLASS: MOP_IN(OP_CCLASS);
DATA_ENSURE(1);
if (BITSET_AT(((BitSetRef )p), *s) == 0) goto fail;
p += SIZE_BITSET;
s += enclen(encode, s); /* OP_CCLASS can match mb-code. \D, \S */
MOP_OUT;
break;
case OP_CCLASS_MB: MOP_IN(OP_CCLASS_MB);
if (! ONIGENC_IS_MBC_HEAD(encode, s)) goto fail;
cclass_mb:
GET_LENGTH_INC(tlen, p);
{
OnigCodePoint code;
UChar *ss;
int mb_len;
DATA_ENSURE(1);
mb_len = enclen(encode, s);
DATA_ENSURE(mb_len);
ss = s;
s += mb_len;
code = ONIGENC_MBC_TO_CODE(encode, ss, s);
#ifdef PLATFORM_UNALIGNED_WORD_ACCESS
if (! onig_is_in_code_range(p, code)) goto fail;
#else
q = p;
ALIGNMENT_RIGHT(q);
if (! onig_is_in_code_range(q, code)) goto fail;
#endif
}
p += tlen;
MOP_OUT;
break;
case OP_CCLASS_MIX: MOP_IN(OP_CCLASS_MIX);
DATA_ENSURE(1);
if (ONIGENC_IS_MBC_HEAD(encode, s)) {
p += SIZE_BITSET;
goto cclass_mb;
}
else {
if (BITSET_AT(((BitSetRef )p), *s) == 0)
goto fail;
p += SIZE_BITSET;
GET_LENGTH_INC(tlen, p);
p += tlen;
s++;
}
MOP_OUT;
break;
case OP_CCLASS_NOT: MOP_IN(OP_CCLASS_NOT);
DATA_ENSURE(1);
if (BITSET_AT(((BitSetRef )p), *s) != 0) goto fail;
p += SIZE_BITSET;
s += enclen(encode, s);
MOP_OUT;
break;
case OP_CCLASS_MB_NOT: MOP_IN(OP_CCLASS_MB_NOT);
DATA_ENSURE(1);
if (! ONIGENC_IS_MBC_HEAD(encode, s)) {
s++;
GET_LENGTH_INC(tlen, p);
p += tlen;
goto cc_mb_not_success;
}
cclass_mb_not:
GET_LENGTH_INC(tlen, p);
{
OnigCodePoint code;
UChar *ss;
int mb_len = enclen(encode, s);
if (! DATA_ENSURE_CHECK(mb_len)) {
DATA_ENSURE(1);
s = (UChar* )end;
p += tlen;
goto cc_mb_not_success;
}
ss = s;
s += mb_len;
code = ONIGENC_MBC_TO_CODE(encode, ss, s);
#ifdef PLATFORM_UNALIGNED_WORD_ACCESS
if (onig_is_in_code_range(p, code)) goto fail;
#else
q = p;
ALIGNMENT_RIGHT(q);
if (onig_is_in_code_range(q, code)) goto fail;
#endif
}
p += tlen;
cc_mb_not_success:
MOP_OUT;
break;
case OP_CCLASS_MIX_NOT: MOP_IN(OP_CCLASS_MIX_NOT);
DATA_ENSURE(1);
if (ONIGENC_IS_MBC_HEAD(encode, s)) {
p += SIZE_BITSET;
goto cclass_mb_not;
}
else {
if (BITSET_AT(((BitSetRef )p), *s) != 0)
goto fail;
p += SIZE_BITSET;
GET_LENGTH_INC(tlen, p);
p += tlen;
s++;
}
MOP_OUT;
break;
case OP_CCLASS_NODE: MOP_IN(OP_CCLASS_NODE);
{
OnigCodePoint code;
void *node;
int mb_len;
UChar *ss;
DATA_ENSURE(1);
GET_POINTER_INC(node, p);
mb_len = enclen(encode, s);
ss = s;
s += mb_len;
DATA_ENSURE(0);
code = ONIGENC_MBC_TO_CODE(encode, ss, s);
if (onig_is_code_in_cc_len(mb_len, code, node) == 0) goto fail;
}
MOP_OUT;
break;
case OP_ANYCHAR: MOP_IN(OP_ANYCHAR);
DATA_ENSURE(1);
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
s += n;
MOP_OUT;
break;
case OP_ANYCHAR_ML: MOP_IN(OP_ANYCHAR_ML);
DATA_ENSURE(1);
n = enclen(encode, s);
DATA_ENSURE(n);
s += n;
MOP_OUT;
break;
case OP_ANYCHAR_STAR: MOP_IN(OP_ANYCHAR_STAR);
while (DATA_ENSURE_CHECK1) {
STACK_PUSH_ALT(p, s, sprev);
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
sprev = s;
s += n;
}
MOP_OUT;
break;
case OP_ANYCHAR_ML_STAR: MOP_IN(OP_ANYCHAR_ML_STAR);
while (DATA_ENSURE_CHECK1) {
STACK_PUSH_ALT(p, s, sprev);
n = enclen(encode, s);
if (n > 1) {
DATA_ENSURE(n);
sprev = s;
s += n;
}
else {
sprev = s;
s++;
}
}
MOP_OUT;
break;
case OP_ANYCHAR_STAR_PEEK_NEXT: MOP_IN(OP_ANYCHAR_STAR_PEEK_NEXT);
while (DATA_ENSURE_CHECK1) {
if (*p == *s) {
STACK_PUSH_ALT(p + 1, s, sprev);
}
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
sprev = s;
s += n;
}
p++;
MOP_OUT;
break;
case OP_ANYCHAR_ML_STAR_PEEK_NEXT:MOP_IN(OP_ANYCHAR_ML_STAR_PEEK_NEXT);
while (DATA_ENSURE_CHECK1) {
if (*p == *s) {
STACK_PUSH_ALT(p + 1, s, sprev);
}
n = enclen(encode, s);
if (n > 1) {
DATA_ENSURE(n);
sprev = s;
s += n;
}
else {
sprev = s;
s++;
}
}
p++;
MOP_OUT;
break;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
case OP_STATE_CHECK_ANYCHAR_STAR: MOP_IN(OP_STATE_CHECK_ANYCHAR_STAR);
GET_STATE_CHECK_NUM_INC(mem, p);
while (DATA_ENSURE_CHECK1) {
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
STACK_PUSH_ALT_WITH_STATE_CHECK(p, s, sprev, mem);
n = enclen(encode, s);
DATA_ENSURE(n);
if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) goto fail;
sprev = s;
s += n;
}
MOP_OUT;
break;
case OP_STATE_CHECK_ANYCHAR_ML_STAR:
MOP_IN(OP_STATE_CHECK_ANYCHAR_ML_STAR);
GET_STATE_CHECK_NUM_INC(mem, p);
while (DATA_ENSURE_CHECK1) {
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
STACK_PUSH_ALT_WITH_STATE_CHECK(p, s, sprev, mem);
n = enclen(encode, s);
if (n > 1) {
DATA_ENSURE(n);
sprev = s;
s += n;
}
else {
sprev = s;
s++;
}
}
MOP_OUT;
break;
#endif /* USE_COMBINATION_EXPLOSION_CHECK */
case OP_WORD: MOP_IN(OP_WORD);
DATA_ENSURE(1);
if (! ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
s += enclen(encode, s);
MOP_OUT;
break;
case OP_NOT_WORD: MOP_IN(OP_NOT_WORD);
DATA_ENSURE(1);
if (ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
s += enclen(encode, s);
MOP_OUT;
break;
case OP_WORD_BOUND: MOP_IN(OP_WORD_BOUND);
if (ON_STR_BEGIN(s)) {
DATA_ENSURE(1);
if (! ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
}
else if (ON_STR_END(s)) {
if (! ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
else {
if (ONIGENC_IS_MBC_WORD(encode, s, end)
== ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
MOP_OUT;
continue;
break;
case OP_NOT_WORD_BOUND: MOP_IN(OP_NOT_WORD_BOUND);
if (ON_STR_BEGIN(s)) {
if (DATA_ENSURE_CHECK1 && ONIGENC_IS_MBC_WORD(encode, s, end))
goto fail;
}
else if (ON_STR_END(s)) {
if (ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
else {
if (ONIGENC_IS_MBC_WORD(encode, s, end)
!= ONIGENC_IS_MBC_WORD(encode, sprev, end))
goto fail;
}
MOP_OUT;
continue;
break;
#ifdef USE_WORD_BEGIN_END
case OP_WORD_BEGIN: MOP_IN(OP_WORD_BEGIN);
if (DATA_ENSURE_CHECK1 && ONIGENC_IS_MBC_WORD(encode, s, end)) {
if (ON_STR_BEGIN(s) || !ONIGENC_IS_MBC_WORD(encode, sprev, end)) {
MOP_OUT;
continue;
}
}
goto fail;
break;
case OP_WORD_END: MOP_IN(OP_WORD_END);
if (!ON_STR_BEGIN(s) && ONIGENC_IS_MBC_WORD(encode, sprev, end)) {
if (ON_STR_END(s) || !ONIGENC_IS_MBC_WORD(encode, s, end)) {
MOP_OUT;
continue;
}
}
goto fail;
break;
#endif
case OP_BEGIN_BUF: MOP_IN(OP_BEGIN_BUF);
if (! ON_STR_BEGIN(s)) goto fail;
MOP_OUT;
continue;
break;
case OP_END_BUF: MOP_IN(OP_END_BUF);
if (! ON_STR_END(s)) goto fail;
MOP_OUT;
continue;
break;
case OP_BEGIN_LINE: MOP_IN(OP_BEGIN_LINE);
if (ON_STR_BEGIN(s)) {
if (IS_NOTBOL(msa->options)) goto fail;
MOP_OUT;
continue;
}
else if (ONIGENC_IS_MBC_NEWLINE(encode, sprev, end) && !ON_STR_END(s)) {
MOP_OUT;
continue;
}
goto fail;
break;
case OP_END_LINE: MOP_IN(OP_END_LINE);
if (ON_STR_END(s)) {
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
if (IS_EMPTY_STR || !ONIGENC_IS_MBC_NEWLINE(encode, sprev, end)) {
#endif
if (IS_NOTEOL(msa->options)) goto fail;
MOP_OUT;
continue;
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
}
#endif
}
else if (ONIGENC_IS_MBC_NEWLINE(encode, s, end)) {
MOP_OUT;
continue;
}
#ifdef USE_CRNL_AS_LINE_TERMINATOR
else if (ONIGENC_IS_MBC_CRNL(encode, s, end)) {
MOP_OUT;
continue;
}
#endif
goto fail;
break;
case OP_SEMI_END_BUF: MOP_IN(OP_SEMI_END_BUF);
if (ON_STR_END(s)) {
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
if (IS_EMPTY_STR || !ONIGENC_IS_MBC_NEWLINE(encode, sprev, end)) {
#endif
if (IS_NOTEOL(msa->options)) goto fail;
MOP_OUT;
continue;
#ifndef USE_NEWLINE_AT_END_OF_STRING_HAS_EMPTY_LINE
}
#endif
}
else if (ONIGENC_IS_MBC_NEWLINE(encode, s, end) &&
ON_STR_END(s + enclen(encode, s))) {
MOP_OUT;
continue;
}
#ifdef USE_CRNL_AS_LINE_TERMINATOR
else if (ONIGENC_IS_MBC_CRNL(encode, s, end)) {
UChar* ss = s + enclen(encode, s);
ss += enclen(encode, ss);
if (ON_STR_END(ss)) {
MOP_OUT;
continue;
}
}
#endif
goto fail;
break;
case OP_BEGIN_POSITION: MOP_IN(OP_BEGIN_POSITION);
if (s != msa->start)
goto fail;
MOP_OUT;
continue;
break;
case OP_MEMORY_START_PUSH: MOP_IN(OP_MEMORY_START_PUSH);
GET_MEMNUM_INC(mem, p);
STACK_PUSH_MEM_START(mem, s);
MOP_OUT;
continue;
break;
case OP_MEMORY_START: MOP_IN(OP_MEMORY_START);
GET_MEMNUM_INC(mem, p);
mem_start_stk[mem] = (OnigStackIndex )((void* )s);
MOP_OUT;
continue;
break;
case OP_MEMORY_END_PUSH: MOP_IN(OP_MEMORY_END_PUSH);
GET_MEMNUM_INC(mem, p);
STACK_PUSH_MEM_END(mem, s);
MOP_OUT;
continue;
break;
case OP_MEMORY_END: MOP_IN(OP_MEMORY_END);
GET_MEMNUM_INC(mem, p);
mem_end_stk[mem] = (OnigStackIndex )((void* )s);
MOP_OUT;
continue;
break;
#ifdef USE_SUBEXP_CALL
case OP_MEMORY_END_PUSH_REC: MOP_IN(OP_MEMORY_END_PUSH_REC);
GET_MEMNUM_INC(mem, p);
STACK_GET_MEM_START(mem, stkp); /* should be before push mem-end. */
STACK_PUSH_MEM_END(mem, s);
mem_start_stk[mem] = GET_STACK_INDEX(stkp);
MOP_OUT;
continue;
break;
case OP_MEMORY_END_REC: MOP_IN(OP_MEMORY_END_REC);
GET_MEMNUM_INC(mem, p);
mem_end_stk[mem] = (OnigStackIndex )((void* )s);
STACK_GET_MEM_START(mem, stkp);
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
mem_start_stk[mem] = GET_STACK_INDEX(stkp);
else
mem_start_stk[mem] = (OnigStackIndex )((void* )stkp->u.mem.pstr);
STACK_PUSH_MEM_END_MARK(mem);
MOP_OUT;
continue;
break;
#endif
case OP_BACKREF1: MOP_IN(OP_BACKREF1);
mem = 1;
goto backref;
break;
case OP_BACKREF2: MOP_IN(OP_BACKREF2);
mem = 2;
goto backref;
break;
case OP_BACKREFN: MOP_IN(OP_BACKREFN);
GET_MEMNUM_INC(mem, p);
backref:
{
int len;
UChar *pstart, *pend;
/* if you want to remove following line,
you should check in parse and compile time. */
if (mem > num_mem) goto fail;
if (mem_end_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
STRING_CMP(pstart, s, n);
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
MOP_OUT;
continue;
}
break;
case OP_BACKREFN_IC: MOP_IN(OP_BACKREFN_IC);
GET_MEMNUM_INC(mem, p);
{
int len;
UChar *pstart, *pend;
/* if you want to remove following line,
you should check in parse and compile time. */
if (mem > num_mem) goto fail;
if (mem_end_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) goto fail;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
STRING_CMP_IC(case_fold_flag, pstart, &s, n);
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
MOP_OUT;
continue;
}
break;
case OP_BACKREF_MULTI: MOP_IN(OP_BACKREF_MULTI);
{
int len, is_fail;
UChar *pstart, *pend, *swork;
GET_LENGTH_INC(tlen, p);
for (i = 0; i < tlen; i++) {
GET_MEMNUM_INC(mem, p);
if (mem_end_stk[mem] == INVALID_STACK_INDEX) continue;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) continue;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
swork = s;
STRING_CMP_VALUE(pstart, swork, n, is_fail);
if (is_fail) continue;
s = swork;
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
p += (SIZE_MEMNUM * (tlen - i - 1));
break; /* success */
}
if (i == tlen) goto fail;
MOP_OUT;
continue;
}
break;
case OP_BACKREF_MULTI_IC: MOP_IN(OP_BACKREF_MULTI_IC);
{
int len, is_fail;
UChar *pstart, *pend, *swork;
GET_LENGTH_INC(tlen, p);
for (i = 0; i < tlen; i++) {
GET_MEMNUM_INC(mem, p);
if (mem_end_stk[mem] == INVALID_STACK_INDEX) continue;
if (mem_start_stk[mem] == INVALID_STACK_INDEX) continue;
if (BIT_STATUS_AT(reg->bt_mem_start, mem))
pstart = STACK_AT(mem_start_stk[mem])->u.mem.pstr;
else
pstart = (UChar* )((void* )mem_start_stk[mem]);
pend = (BIT_STATUS_AT(reg->bt_mem_end, mem)
? STACK_AT(mem_end_stk[mem])->u.mem.pstr
: (UChar* )((void* )mem_end_stk[mem]));
n = pend - pstart;
DATA_ENSURE(n);
sprev = s;
swork = s;
STRING_CMP_VALUE_IC(case_fold_flag, pstart, &swork, n, is_fail);
if (is_fail) continue;
s = swork;
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
p += (SIZE_MEMNUM * (tlen - i - 1));
break; /* success */
}
if (i == tlen) goto fail;
MOP_OUT;
continue;
}
break;
#ifdef USE_BACKREF_WITH_LEVEL
case OP_BACKREF_WITH_LEVEL:
{
int len;
OnigOptionType ic;
LengthType level;
GET_OPTION_INC(ic, p);
GET_LENGTH_INC(level, p);
GET_LENGTH_INC(tlen, p);
sprev = s;
if (backref_match_at_nested_level(reg, stk, stk_base, ic
, case_fold_flag, (int )level, (int )tlen, p, &s, end)) {
while (sprev + (len = enclen(encode, sprev)) < s)
sprev += len;
p += (SIZE_MEMNUM * tlen);
}
else
goto fail;
MOP_OUT;
continue;
}
break;
#endif
#if 0 /* no need: IS_DYNAMIC_OPTION() == 0 */
case OP_SET_OPTION_PUSH: MOP_IN(OP_SET_OPTION_PUSH);
GET_OPTION_INC(option, p);
STACK_PUSH_ALT(p, s, sprev);
p += SIZE_OP_SET_OPTION + SIZE_OP_FAIL;
MOP_OUT;
continue;
break;
case OP_SET_OPTION: MOP_IN(OP_SET_OPTION);
GET_OPTION_INC(option, p);
MOP_OUT;
continue;
break;
#endif
case OP_NULL_CHECK_START: MOP_IN(OP_NULL_CHECK_START);
GET_MEMNUM_INC(mem, p); /* mem: null check id */
STACK_PUSH_NULL_CHECK_START(mem, s);
MOP_OUT;
continue;
break;
case OP_NULL_CHECK_END: MOP_IN(OP_NULL_CHECK_END);
{
int isnull;
GET_MEMNUM_INC(mem, p); /* mem: null check id */
STACK_NULL_CHECK(isnull, mem, s);
if (isnull) {
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "NULL_CHECK_END: skip id:%d, s:%d\n",
(int )mem, (int )s);
#endif
null_check_found:
/* empty loop founded, skip next instruction */
switch (*p++) {
case OP_JUMP:
case OP_PUSH:
p += SIZE_RELADDR;
break;
case OP_REPEAT_INC:
case OP_REPEAT_INC_NG:
case OP_REPEAT_INC_SG:
case OP_REPEAT_INC_NG_SG:
p += SIZE_MEMNUM;
break;
default:
goto unexpected_bytecode_error;
break;
}
}
}
MOP_OUT;
continue;
break;
#ifdef USE_MONOMANIAC_CHECK_CAPTURES_IN_ENDLESS_REPEAT
case OP_NULL_CHECK_END_MEMST: MOP_IN(OP_NULL_CHECK_END_MEMST);
{
int isnull;
GET_MEMNUM_INC(mem, p); /* mem: null check id */
STACK_NULL_CHECK_MEMST(isnull, mem, s, reg);
if (isnull) {
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "NULL_CHECK_END_MEMST: skip id:%d, s:%d\n",
(int )mem, (int )s);
#endif
if (isnull == -1) goto fail;
goto null_check_found;
}
}
MOP_OUT;
continue;
break;
#endif
#ifdef USE_SUBEXP_CALL
case OP_NULL_CHECK_END_MEMST_PUSH:
MOP_IN(OP_NULL_CHECK_END_MEMST_PUSH);
{
int isnull;
GET_MEMNUM_INC(mem, p); /* mem: null check id */
#ifdef USE_MONOMANIAC_CHECK_CAPTURES_IN_ENDLESS_REPEAT
STACK_NULL_CHECK_MEMST_REC(isnull, mem, s, reg);
#else
STACK_NULL_CHECK_REC(isnull, mem, s);
#endif
if (isnull) {
#ifdef ONIG_DEBUG_MATCH
fprintf(stderr, "NULL_CHECK_END_MEMST_PUSH: skip id:%d, s:%d\n",
(int )mem, (int )s);
#endif
if (isnull == -1) goto fail;
goto null_check_found;
}
else {
STACK_PUSH_NULL_CHECK_END(mem);
}
}
MOP_OUT;
continue;
break;
#endif
case OP_JUMP: MOP_IN(OP_JUMP);
GET_RELADDR_INC(addr, p);
p += addr;
MOP_OUT;
CHECK_INTERRUPT_IN_MATCH_AT;
continue;
break;
case OP_PUSH: MOP_IN(OP_PUSH);
GET_RELADDR_INC(addr, p);
STACK_PUSH_ALT(p + addr, s, sprev);
MOP_OUT;
continue;
break;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
case OP_STATE_CHECK_PUSH: MOP_IN(OP_STATE_CHECK_PUSH);
GET_STATE_CHECK_NUM_INC(mem, p);
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
GET_RELADDR_INC(addr, p);
STACK_PUSH_ALT_WITH_STATE_CHECK(p + addr, s, sprev, mem);
MOP_OUT;
continue;
break;
case OP_STATE_CHECK_PUSH_OR_JUMP: MOP_IN(OP_STATE_CHECK_PUSH_OR_JUMP);
GET_STATE_CHECK_NUM_INC(mem, p);
GET_RELADDR_INC(addr, p);
STATE_CHECK_VAL(scv, mem);
if (scv) {
p += addr;
}
else {
STACK_PUSH_ALT_WITH_STATE_CHECK(p + addr, s, sprev, mem);
}
MOP_OUT;
continue;
break;
case OP_STATE_CHECK: MOP_IN(OP_STATE_CHECK);
GET_STATE_CHECK_NUM_INC(mem, p);
STATE_CHECK_VAL(scv, mem);
if (scv) goto fail;
STACK_PUSH_STATE_CHECK(s, mem);
MOP_OUT;
continue;
break;
#endif /* USE_COMBINATION_EXPLOSION_CHECK */
case OP_POP: MOP_IN(OP_POP);
STACK_POP_ONE;
MOP_OUT;
continue;
break;
case OP_PUSH_OR_JUMP_EXACT1: MOP_IN(OP_PUSH_OR_JUMP_EXACT1);
GET_RELADDR_INC(addr, p);
if (*p == *s && DATA_ENSURE_CHECK1) {
p++;
STACK_PUSH_ALT(p + addr, s, sprev);
MOP_OUT;
continue;
}
p += (addr + 1);
MOP_OUT;
continue;
break;
case OP_PUSH_IF_PEEK_NEXT: MOP_IN(OP_PUSH_IF_PEEK_NEXT);
GET_RELADDR_INC(addr, p);
if (*p == *s) {
p++;
STACK_PUSH_ALT(p + addr, s, sprev);
MOP_OUT;
continue;
}
p++;
MOP_OUT;
continue;
break;
case OP_REPEAT: MOP_IN(OP_REPEAT);
{
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
GET_RELADDR_INC(addr, p);
STACK_ENSURE(1);
repeat_stk[mem] = GET_STACK_INDEX(stk);
STACK_PUSH_REPEAT(mem, p);
if (reg->repeat_range[mem].lower == 0) {
STACK_PUSH_ALT(p + addr, s, sprev);
}
}
MOP_OUT;
continue;
break;
case OP_REPEAT_NG: MOP_IN(OP_REPEAT_NG);
{
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
GET_RELADDR_INC(addr, p);
STACK_ENSURE(1);
repeat_stk[mem] = GET_STACK_INDEX(stk);
STACK_PUSH_REPEAT(mem, p);
if (reg->repeat_range[mem].lower == 0) {
STACK_PUSH_ALT(p, s, sprev);
p += addr;
}
}
MOP_OUT;
continue;
break;
case OP_REPEAT_INC: MOP_IN(OP_REPEAT_INC);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
si = repeat_stk[mem];
stkp = STACK_AT(si);
repeat_inc:
stkp->u.repeat.count++;
if (stkp->u.repeat.count >= reg->repeat_range[mem].upper) {
/* end of repeat. Nothing to do. */
}
else if (stkp->u.repeat.count >= reg->repeat_range[mem].lower) {
STACK_PUSH_ALT(p, s, sprev);
p = STACK_AT(si)->u.repeat.pcode; /* Don't use stkp after PUSH. */
}
else {
p = stkp->u.repeat.pcode;
}
STACK_PUSH_REPEAT_INC(si);
MOP_OUT;
CHECK_INTERRUPT_IN_MATCH_AT;
continue;
break;
case OP_REPEAT_INC_SG: MOP_IN(OP_REPEAT_INC_SG);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
STACK_GET_REPEAT(mem, stkp);
si = GET_STACK_INDEX(stkp);
goto repeat_inc;
break;
case OP_REPEAT_INC_NG: MOP_IN(OP_REPEAT_INC_NG);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
si = repeat_stk[mem];
stkp = STACK_AT(si);
repeat_inc_ng:
stkp->u.repeat.count++;
if (stkp->u.repeat.count < reg->repeat_range[mem].upper) {
if (stkp->u.repeat.count >= reg->repeat_range[mem].lower) {
UChar* pcode = stkp->u.repeat.pcode;
STACK_PUSH_REPEAT_INC(si);
STACK_PUSH_ALT(pcode, s, sprev);
}
else {
p = stkp->u.repeat.pcode;
STACK_PUSH_REPEAT_INC(si);
}
}
else if (stkp->u.repeat.count == reg->repeat_range[mem].upper) {
STACK_PUSH_REPEAT_INC(si);
}
MOP_OUT;
CHECK_INTERRUPT_IN_MATCH_AT;
continue;
break;
case OP_REPEAT_INC_NG_SG: MOP_IN(OP_REPEAT_INC_NG_SG);
GET_MEMNUM_INC(mem, p); /* mem: OP_REPEAT ID */
STACK_GET_REPEAT(mem, stkp);
si = GET_STACK_INDEX(stkp);
goto repeat_inc_ng;
break;
case OP_PUSH_POS: MOP_IN(OP_PUSH_POS);
STACK_PUSH_POS(s, sprev);
MOP_OUT;
continue;
break;
case OP_POP_POS: MOP_IN(OP_POP_POS);
{
STACK_POS_END(stkp);
s = stkp->u.state.pstr;
sprev = stkp->u.state.pstr_prev;
}
MOP_OUT;
continue;
break;
case OP_PUSH_POS_NOT: MOP_IN(OP_PUSH_POS_NOT);
GET_RELADDR_INC(addr, p);
STACK_PUSH_POS_NOT(p + addr, s, sprev);
MOP_OUT;
continue;
break;
case OP_FAIL_POS: MOP_IN(OP_FAIL_POS);
STACK_POP_TIL_POS_NOT;
goto fail;
break;
case OP_PUSH_STOP_BT: MOP_IN(OP_PUSH_STOP_BT);
STACK_PUSH_STOP_BT;
MOP_OUT;
continue;
break;
case OP_POP_STOP_BT: MOP_IN(OP_POP_STOP_BT);
STACK_STOP_BT_END;
MOP_OUT;
continue;
break;
case OP_LOOK_BEHIND: MOP_IN(OP_LOOK_BEHIND);
GET_LENGTH_INC(tlen, p);
s = (UChar* )ONIGENC_STEP_BACK(encode, str, s, (int )tlen);
if (IS_NULL(s)) goto fail;
sprev = (UChar* )onigenc_get_prev_char_head(encode, str, s);
MOP_OUT;
continue;
break;
case OP_PUSH_LOOK_BEHIND_NOT: MOP_IN(OP_PUSH_LOOK_BEHIND_NOT);
GET_RELADDR_INC(addr, p);
GET_LENGTH_INC(tlen, p);
q = (UChar* )ONIGENC_STEP_BACK(encode, str, s, (int )tlen);
if (IS_NULL(q)) {
/* too short case -> success. ex. /(?<!XXX)a/.match("a")
If you want to change to fail, replace following line. */
p += addr;
/* goto fail; */
}
else {
STACK_PUSH_LOOK_BEHIND_NOT(p + addr, s, sprev);
s = q;
sprev = (UChar* )onigenc_get_prev_char_head(encode, str, s);
}
MOP_OUT;
continue;
break;
case OP_FAIL_LOOK_BEHIND_NOT: MOP_IN(OP_FAIL_LOOK_BEHIND_NOT);
STACK_POP_TIL_LOOK_BEHIND_NOT;
goto fail;
break;
#ifdef USE_SUBEXP_CALL
case OP_CALL: MOP_IN(OP_CALL);
GET_ABSADDR_INC(addr, p);
STACK_PUSH_CALL_FRAME(p);
p = reg->p + addr;
MOP_OUT;
continue;
break;
case OP_RETURN: MOP_IN(OP_RETURN);
STACK_RETURN(p);
STACK_PUSH_RETURN;
MOP_OUT;
continue;
break;
#endif
case OP_FINISH:
goto finish;
break;
fail:
MOP_OUT;
/* fall */
case OP_FAIL: MOP_IN(OP_FAIL);
STACK_POP;
p = stk->u.state.pcode;
s = stk->u.state.pstr;
sprev = stk->u.state.pstr_prev;
#ifdef USE_COMBINATION_EXPLOSION_CHECK
if (stk->u.state.state_check != 0) {
stk->type = STK_STATE_CHECK_MARK;
stk++;
}
#endif
MOP_OUT;
continue;
break;
default:
goto bytecode_error;
} /* end of switch */
sprev = sbegin;
} /* end of while(1) */
finish:
STACK_SAVE;
return best_len;
#ifdef ONIG_DEBUG
stack_error:
STACK_SAVE;
return ONIGERR_STACK_BUG;
#endif
bytecode_error:
STACK_SAVE;
return ONIGERR_UNDEFINED_BYTECODE;
unexpected_bytecode_error:
STACK_SAVE;
return ONIGERR_UNEXPECTED_BYTECODE;
}
| 25,442 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void TestFlashMessageLoop::DestroyMessageLoopResourceTask(int32_t unused) {
if (message_loop_) {
delete message_loop_;
message_loop_ = NULL;
} else {
PP_NOTREACHED();
}
}
Commit Message: Fix PPB_Flash_MessageLoop.
This CL suspends script callbacks and resource loads while running nested message loop using PPB_Flash_MessageLoop.
BUG=569496
Review URL: https://codereview.chromium.org/1559113002
Cr-Commit-Position: refs/heads/master@{#374529}
CWE ID: CWE-264 | void TestFlashMessageLoop::DestroyMessageLoopResourceTask(int32_t unused) {
if (message_loop_) {
delete message_loop_;
message_loop_ = nullptr;
} else {
PP_NOTREACHED();
}
}
| 3,872 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int br_parse_ip_options(struct sk_buff *skb)
{
struct ip_options *opt;
struct iphdr *iph;
struct net_device *dev = skb->dev;
u32 len;
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
/* Zero out the CB buffer if no options present */
if (iph->ihl == 5) {
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
return 0;
}
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
goto inhdr_error;
/* Check correct handling of SRR option */
if (unlikely(opt->srr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
goto drop;
if (ip_options_rcv_srr(skb))
goto drop;
}
return 0;
inhdr_error:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
Commit Message: bridge: reset IPCB in br_parse_ip_options
Commit 462fb2af9788a82 (bridge : Sanitize skb before it enters the IP
stack), missed one IPCB init before calling ip_options_compile()
Thanks to Scot Doyle for his tests and bug reports.
Reported-by: Scot Doyle <lkml@scotdoyle.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
Acked-by: Bandan Das <bandan.das@stratus.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Cc: Jan Lübbe <jluebbe@debian.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: CWE-399 | static int br_parse_ip_options(struct sk_buff *skb)
{
struct ip_options *opt;
struct iphdr *iph;
struct net_device *dev = skb->dev;
u32 len;
iph = ip_hdr(skb);
opt = &(IPCB(skb)->opt);
/* Basic sanity checks */
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
iph = ip_hdr(skb);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
} else if (len < (iph->ihl*4))
goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop;
}
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (iph->ihl == 5)
return 0;
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb))
goto inhdr_error;
/* Check correct handling of SRR option */
if (unlikely(opt->srr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
goto drop;
if (ip_options_rcv_srr(skb))
goto drop;
}
return 0;
inhdr_error:
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
drop:
return -1;
}
| 17,232 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: png_set_IHDR(png_structp png_ptr, png_infop info_ptr,
png_uint_32 width, png_uint_32 height, int bit_depth,
int color_type, int interlace_type, int compression_type,
int filter_type)
{
png_debug1(1, "in %s storage function", "IHDR");
if (png_ptr == NULL || info_ptr == NULL)
return;
info_ptr->width = width;
info_ptr->height = height;
info_ptr->bit_depth = (png_byte)bit_depth;
info_ptr->color_type = (png_byte)color_type;
info_ptr->compression_type = (png_byte)compression_type;
info_ptr->filter_type = (png_byte)filter_type;
info_ptr->interlace_type = (png_byte)interlace_type;
png_check_IHDR (png_ptr, info_ptr->width, info_ptr->height,
info_ptr->bit_depth, info_ptr->color_type, info_ptr->interlace_type,
info_ptr->compression_type, info_ptr->filter_type);
if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
info_ptr->channels = 1;
else if (info_ptr->color_type & PNG_COLOR_MASK_COLOR)
info_ptr->channels = 3;
else
info_ptr->channels = 1;
if (info_ptr->color_type & PNG_COLOR_MASK_ALPHA)
info_ptr->channels++;
info_ptr->pixel_depth = (png_byte)(info_ptr->channels * info_ptr->bit_depth);
/* Check for potential overflow */
if (width > (PNG_UINT_32_MAX
>> 3) /* 8-byte RGBA pixels */
- 64 /* bigrowbuf hack */
- 1 /* filter byte */
- 7*8 /* rounding of width to multiple of 8 pixels */
- 8) /* extra max_pixel_depth pad */
info_ptr->rowbytes = (png_size_t)0;
else
info_ptr->rowbytes = PNG_ROWBYTES(info_ptr->pixel_depth, width);
}
Commit Message: third_party/libpng: update to 1.2.54
TBR=darin@chromium.org
BUG=560291
Review URL: https://codereview.chromium.org/1467263003
Cr-Commit-Position: refs/heads/master@{#362298}
CWE ID: CWE-119 | png_set_IHDR(png_structp png_ptr, png_infop info_ptr,
png_uint_32 width, png_uint_32 height, int bit_depth,
int color_type, int interlace_type, int compression_type,
int filter_type)
{
png_debug1(1, "in %s storage function", "IHDR");
if (png_ptr == NULL || info_ptr == NULL)
return;
info_ptr->width = width;
info_ptr->height = height;
info_ptr->bit_depth = (png_byte)bit_depth;
info_ptr->color_type = (png_byte)color_type;
info_ptr->compression_type = (png_byte)compression_type;
info_ptr->filter_type = (png_byte)filter_type;
info_ptr->interlace_type = (png_byte)interlace_type;
png_check_IHDR (png_ptr, info_ptr->width, info_ptr->height,
info_ptr->bit_depth, info_ptr->color_type, info_ptr->interlace_type,
info_ptr->compression_type, info_ptr->filter_type);
if (info_ptr->color_type == PNG_COLOR_TYPE_PALETTE)
info_ptr->channels = 1;
else if (info_ptr->color_type & PNG_COLOR_MASK_COLOR)
info_ptr->channels = 3;
else
info_ptr->channels = 1;
if (info_ptr->color_type & PNG_COLOR_MASK_ALPHA)
info_ptr->channels++;
info_ptr->pixel_depth = (png_byte)(info_ptr->channels * info_ptr->bit_depth);
/* Check for potential overflow */
if (width > (PNG_UINT_32_MAX
>> 3) /* 8-byte RGBA pixels */
- 64 /* bigrowbuf hack */
- 1 /* filter byte */
- 7*8 /* rounding of width to multiple of 8 pixels */
- 8) /* extra max_pixel_depth pad */
{
info_ptr->rowbytes = (png_size_t)0;
png_error(png_ptr, "Image width is too large for this architecture");
}
else
info_ptr->rowbytes = PNG_ROWBYTES(info_ptr->pixel_depth, width);
}
| 18,113 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t tail = (i << 1) + 1;
if (cdf_check_stream_offset(sst, h, p, tail * sizeof(uint32_t),
__LINE__) == -1)
goto out;
size_t ofs = CDF_GETUINT32(p, tail);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
Commit Message: Prevent wrap around (Remi Collet at redhat)
CWE ID: CWE-189 | cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
{
const cdf_section_header_t *shp;
cdf_section_header_t sh;
const uint8_t *p, *q, *e;
int16_t s16;
int32_t s32;
uint32_t u32;
int64_t s64;
uint64_t u64;
cdf_timestamp_t tp;
size_t i, o, o4, nelements, j;
cdf_property_info_t *inp;
if (offs > UINT32_MAX / 4) {
errno = EFTYPE;
goto out;
}
shp = CAST(const cdf_section_header_t *, (const void *)
((const char *)sst->sst_tab + offs));
if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
goto out;
sh.sh_len = CDF_TOLE4(shp->sh_len);
#define CDF_SHLEN_LIMIT (UINT32_MAX / 8)
if (sh.sh_len > CDF_SHLEN_LIMIT) {
errno = EFTYPE;
goto out;
}
sh.sh_properties = CDF_TOLE4(shp->sh_properties);
#define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp)))
if (sh.sh_properties > CDF_PROP_LIMIT)
goto out;
DPRINTF(("section len: %u properties %u\n", sh.sh_len,
sh.sh_properties));
if (*maxcount) {
if (*maxcount > CDF_PROP_LIMIT)
goto out;
*maxcount += sh.sh_properties;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
} else {
*maxcount = sh.sh_properties;
inp = CAST(cdf_property_info_t *,
malloc(*maxcount * sizeof(*inp)));
}
if (inp == NULL)
goto out;
*info = inp;
inp += *count;
*count += sh.sh_properties;
p = CAST(const uint8_t *, (const void *)
((const char *)(const void *)sst->sst_tab +
offs + sizeof(sh)));
e = CAST(const uint8_t *, (const void *)
(((const char *)(const void *)shp) + sh.sh_len));
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
size_t tail = (i << 1) + 1;
if (cdf_check_stream_offset(sst, h, p, tail * sizeof(uint32_t),
__LINE__) == -1)
goto out;
size_t ofs = CDF_GETUINT32(p, tail);
q = (const uint8_t *)(const void *)
((const char *)(const void *)p + ofs
- 2 * sizeof(uint32_t));
if (q < p) {
DPRINTF(("Wrapped around %p < %p\n", q, p));
goto out;
}
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
if (nelements == 0) {
DPRINTF(("CDF_VECTOR with nelements == 0\n"));
goto out;
}
o = 2;
} else {
nelements = 1;
o = 1;
}
o4 = o * sizeof(uint32_t);
if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
goto unknown;
switch (inp[i].pi_type & CDF_TYPEMASK) {
case CDF_NULL:
case CDF_EMPTY:
break;
case CDF_SIGNED16:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s16, &q[o4], sizeof(s16));
inp[i].pi_s16 = CDF_TOLE2(s16);
break;
case CDF_SIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s32, &q[o4], sizeof(s32));
inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32);
break;
case CDF_BOOL:
case CDF_UNSIGNED32:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
inp[i].pi_u32 = CDF_TOLE4(u32);
break;
case CDF_SIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&s64, &q[o4], sizeof(s64));
inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64);
break;
case CDF_UNSIGNED64:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64);
break;
case CDF_FLOAT:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u32, &q[o4], sizeof(u32));
u32 = CDF_TOLE4(u32);
memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f));
break;
case CDF_DOUBLE:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&u64, &q[o4], sizeof(u64));
u64 = CDF_TOLE8((uint64_t)u64);
memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d));
break;
case CDF_LENGTH32_STRING:
case CDF_LENGTH32_WSTRING:
if (nelements > 1) {
size_t nelem = inp - *info;
if (*maxcount > CDF_PROP_LIMIT
|| nelements > CDF_PROP_LIMIT)
goto out;
*maxcount += nelements;
inp = CAST(cdf_property_info_t *,
realloc(*info, *maxcount * sizeof(*inp)));
if (inp == NULL)
goto out;
*info = inp;
inp = *info + nelem;
}
DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n",
nelements));
for (j = 0; j < nelements && i < sh.sh_properties;
j++, i++)
{
uint32_t l = CDF_GETUINT32(q, o);
inp[i].pi_str.s_len = l;
inp[i].pi_str.s_buf = (const char *)
(const void *)(&q[o4 + sizeof(l)]);
DPRINTF(("l = %d, r = %" SIZE_T_FORMAT
"u, s = %s\n", l,
CDF_ROUND(l, sizeof(l)),
inp[i].pi_str.s_buf));
if (l & 1)
l++;
o += l >> 1;
if (q + o >= e)
goto out;
o4 = o * sizeof(uint32_t);
}
i--;
break;
case CDF_FILETIME:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
(void)memcpy(&tp, &q[o4], sizeof(tp));
inp[i].pi_tp = CDF_TOLE8((uint64_t)tp);
break;
case CDF_CLIPBOARD:
if (inp[i].pi_type & CDF_VECTOR)
goto unknown;
break;
default:
unknown:
DPRINTF(("Don't know how to deal with %x\n",
inp[i].pi_type));
break;
}
}
return 0;
out:
free(*info);
return -1;
}
| 10,365 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static enum entity_charset determine_charset(char *charset_hint TSRMLS_DC)
{
int i;
enum entity_charset charset = cs_utf_8;
int len = 0;
const zend_encoding *zenc;
/* Default is now UTF-8 */
if (charset_hint == NULL)
return cs_utf_8;
if ((len = strlen(charset_hint)) != 0) {
goto det_charset;
}
zenc = zend_multibyte_get_internal_encoding(TSRMLS_C);
if (zenc != NULL) {
charset_hint = (char *)zend_multibyte_get_encoding_name(zenc);
if (charset_hint != NULL && (len=strlen(charset_hint)) != 0) {
if ((len == 4) /* sizeof (none|auto|pass) */ &&
(!memcmp("pass", charset_hint, 4) ||
!memcmp("auto", charset_hint, 4) ||
!memcmp("auto", charset_hint, 4))) {
charset_hint = NULL;
len = 0;
} else {
goto det_charset;
}
}
}
charset_hint = SG(default_charset);
if (charset_hint != NULL && (len=strlen(charset_hint)) != 0) {
goto det_charset;
}
/* try to detect the charset for the locale */
#if HAVE_NL_LANGINFO && HAVE_LOCALE_H && defined(CODESET)
charset_hint = nl_langinfo(CODESET);
if (charset_hint != NULL && (len=strlen(charset_hint)) != 0) {
goto det_charset;
}
#endif
#if HAVE_LOCALE_H
/* try to figure out the charset from the locale */
{
char *localename;
char *dot, *at;
/* lang[_territory][.codeset][@modifier] */
localename = setlocale(LC_CTYPE, NULL);
dot = strchr(localename, '.');
if (dot) {
dot++;
/* locale specifies a codeset */
at = strchr(dot, '@');
if (at)
len = at - dot;
else
len = strlen(dot);
charset_hint = dot;
} else {
/* no explicit name; see if the name itself
* is the charset */
charset_hint = localename;
len = strlen(charset_hint);
}
}
#endif
det_charset:
if (charset_hint) {
int found = 0;
/* now walk the charset map and look for the codeset */
for (i = 0; charset_map[i].codeset; i++) {
if (len == strlen(charset_map[i].codeset) && strncasecmp(charset_hint, charset_map[i].codeset, len) == 0) {
charset = charset_map[i].charset;
found = 1;
break;
}
}
if (!found) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "charset `%s' not supported, assuming utf-8",
charset_hint);
}
}
return charset;
}
Commit Message: Fix bug #72135 - don't create strings with lengths outside int range
CWE ID: CWE-190 | static enum entity_charset determine_charset(char *charset_hint TSRMLS_DC)
{
int i;
enum entity_charset charset = cs_utf_8;
int len = 0;
const zend_encoding *zenc;
/* Default is now UTF-8 */
if (charset_hint == NULL)
return cs_utf_8;
if ((len = strlen(charset_hint)) != 0) {
goto det_charset;
}
zenc = zend_multibyte_get_internal_encoding(TSRMLS_C);
if (zenc != NULL) {
charset_hint = (char *)zend_multibyte_get_encoding_name(zenc);
if (charset_hint != NULL && (len=strlen(charset_hint)) != 0) {
if ((len == 4) /* sizeof (none|auto|pass) */ &&
(!memcmp("pass", charset_hint, 4) ||
!memcmp("auto", charset_hint, 4) ||
!memcmp("auto", charset_hint, 4))) {
charset_hint = NULL;
len = 0;
} else {
goto det_charset;
}
}
}
charset_hint = SG(default_charset);
if (charset_hint != NULL && (len=strlen(charset_hint)) != 0) {
goto det_charset;
}
/* try to detect the charset for the locale */
#if HAVE_NL_LANGINFO && HAVE_LOCALE_H && defined(CODESET)
charset_hint = nl_langinfo(CODESET);
if (charset_hint != NULL && (len=strlen(charset_hint)) != 0) {
goto det_charset;
}
#endif
#if HAVE_LOCALE_H
/* try to figure out the charset from the locale */
{
char *localename;
char *dot, *at;
/* lang[_territory][.codeset][@modifier] */
localename = setlocale(LC_CTYPE, NULL);
dot = strchr(localename, '.');
if (dot) {
dot++;
/* locale specifies a codeset */
at = strchr(dot, '@');
if (at)
len = at - dot;
else
len = strlen(dot);
charset_hint = dot;
} else {
/* no explicit name; see if the name itself
* is the charset */
charset_hint = localename;
len = strlen(charset_hint);
}
}
#endif
det_charset:
if (charset_hint) {
int found = 0;
/* now walk the charset map and look for the codeset */
for (i = 0; charset_map[i].codeset; i++) {
if (len == strlen(charset_map[i].codeset) && strncasecmp(charset_hint, charset_map[i].codeset, len) == 0) {
charset = charset_map[i].charset;
found = 1;
break;
}
}
if (!found) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "charset `%s' not supported, assuming utf-8",
charset_hint);
}
}
return charset;
}
| 8,616 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: const char* SegmentInfo::GetWritingAppAsUTF8() const
{
return m_pWritingAppAsUTF8;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | const char* SegmentInfo::GetWritingAppAsUTF8() const
| 27,215 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: wb_prep(netdissect_options *ndo,
const struct pkt_prep *prep, u_int len)
{
int n;
const struct pgstate *ps;
const u_char *ep = ndo->ndo_snapend;
ND_PRINT((ndo, " wb-prep:"));
if (len < sizeof(*prep)) {
return (-1);
}
n = EXTRACT_32BITS(&prep->pp_n);
ps = (const struct pgstate *)(prep + 1);
while (--n >= 0 && ND_TTEST(*ps)) {
const struct id_off *io, *ie;
char c = '<';
ND_PRINT((ndo, " %u/%s:%u",
EXTRACT_32BITS(&ps->slot),
ipaddr_string(ndo, &ps->page.p_sid),
EXTRACT_32BITS(&ps->page.p_uid)));
io = (const struct id_off *)(ps + 1);
for (ie = io + ps->nid; io < ie && ND_TTEST(*io); ++io) {
ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id),
EXTRACT_32BITS(&io->off)));
c = ',';
}
ND_PRINT((ndo, ">"));
ps = (const struct pgstate *)io;
}
return ((const u_char *)ps <= ep? 0 : -1);
}
Commit Message: CVE-2017-13014/White Board: Do more bounds checks.
This fixes a buffer over-read discovered by Yannick Formaggio.
Add a test using the capture file supplied by the reporter(s).
While we're at it, print a truncation error if the packets are
truncated, rather than just, in effect, ignoring the result of the
routines that print particular packet types.
CWE ID: CWE-125 | wb_prep(netdissect_options *ndo,
const struct pkt_prep *prep, u_int len)
{
int n;
const struct pgstate *ps;
const u_char *ep = ndo->ndo_snapend;
ND_PRINT((ndo, " wb-prep:"));
if (len < sizeof(*prep) || !ND_TTEST(*prep))
return (-1);
n = EXTRACT_32BITS(&prep->pp_n);
ps = (const struct pgstate *)(prep + 1);
while (--n >= 0 && ND_TTEST(*ps)) {
const struct id_off *io, *ie;
char c = '<';
ND_PRINT((ndo, " %u/%s:%u",
EXTRACT_32BITS(&ps->slot),
ipaddr_string(ndo, &ps->page.p_sid),
EXTRACT_32BITS(&ps->page.p_uid)));
io = (const struct id_off *)(ps + 1);
for (ie = io + ps->nid; io < ie && ND_TTEST(*io); ++io) {
ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id),
EXTRACT_32BITS(&io->off)));
c = ',';
}
ND_PRINT((ndo, ">"));
ps = (const struct pgstate *)io;
}
return ((const u_char *)ps <= ep? 0 : -1);
}
| 7,900 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: SPL_METHOD(SplFileObject, hasChildren)
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_FALSE;
} /* }}} */
/* {{{ proto bool SplFileObject::getChildren()
Commit Message: Fix bug #72262 - do not overflow int
CWE ID: CWE-190 | SPL_METHOD(SplFileObject, hasChildren)
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_FALSE;
} /* }}} */
/* {{{ proto bool SplFileObject::getChildren()
| 8,666 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: WebContents* DevToolsWindow::OpenURLFromTab(
WebContents* source,
const content::OpenURLParams& params) {
DCHECK(source == main_web_contents_);
if (!params.url.SchemeIs(content::kChromeDevToolsScheme)) {
WebContents* inspected_web_contents = GetInspectedWebContents();
return inspected_web_contents ?
inspected_web_contents->OpenURL(params) : NULL;
}
bindings_->Reload();
return main_web_contents_;
}
Commit Message: [DevTools] Use no-referrer for DevTools links
Bug: 732751
Change-Id: I77753120e2424203dedcc7bc0847fb67f87fe2b2
Reviewed-on: https://chromium-review.googlesource.com/615021
Reviewed-by: Andrey Kosyakov <caseq@chromium.org>
Commit-Queue: Dmitry Gozman <dgozman@chromium.org>
Cr-Commit-Position: refs/heads/master@{#494413}
CWE ID: CWE-668 | WebContents* DevToolsWindow::OpenURLFromTab(
WebContents* source,
const content::OpenURLParams& params) {
DCHECK(source == main_web_contents_);
if (!params.url.SchemeIs(content::kChromeDevToolsScheme)) {
WebContents* inspected_web_contents = GetInspectedWebContents();
if (!inspected_web_contents)
return nullptr;
content::OpenURLParams modified = params;
modified.referrer = content::Referrer();
return inspected_web_contents->OpenURL(modified);
}
bindings_->Reload();
return main_web_contents_;
}
| 14,063 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: v8::Local<v8::Value> ModuleSystem::LoadModule(const std::string& module_name) {
v8::EscapableHandleScope handle_scope(GetIsolate());
v8::Local<v8::Context> v8_context = context()->v8_context();
v8::Context::Scope context_scope(v8_context);
v8::Local<v8::Value> source(GetSource(module_name));
if (source.IsEmpty() || source->IsUndefined()) {
Fatal(context_, "No source for require(" + module_name + ")");
return v8::Undefined(GetIsolate());
}
v8::Local<v8::String> wrapped_source(
WrapSource(v8::Local<v8::String>::Cast(source)));
v8::Local<v8::String> v8_module_name;
if (!ToV8String(GetIsolate(), module_name.c_str(), &v8_module_name)) {
NOTREACHED() << "module_name is too long";
return v8::Undefined(GetIsolate());
}
v8::Local<v8::Value> func_as_value =
RunString(wrapped_source, v8_module_name);
if (func_as_value.IsEmpty() || func_as_value->IsUndefined()) {
Fatal(context_, "Bad source for require(" + module_name + ")");
return v8::Undefined(GetIsolate());
}
v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(func_as_value);
v8::Local<v8::Object> define_object = v8::Object::New(GetIsolate());
gin::ModuleRegistry::InstallGlobals(GetIsolate(), define_object);
v8::Local<v8::Value> exports = v8::Object::New(GetIsolate());
v8::Local<v8::Object> natives(NewInstance());
CHECK(!natives.IsEmpty()); // this can fail if v8 has issues
v8::Local<v8::Value> args[] = {
GetPropertyUnsafe(v8_context, define_object, "define"),
GetPropertyUnsafe(v8_context, natives, "require",
v8::NewStringType::kInternalized),
GetPropertyUnsafe(v8_context, natives, "requireNative",
v8::NewStringType::kInternalized),
GetPropertyUnsafe(v8_context, natives, "requireAsync",
v8::NewStringType::kInternalized),
exports,
console::AsV8Object(GetIsolate()),
GetPropertyUnsafe(v8_context, natives, "privates",
v8::NewStringType::kInternalized),
context_->safe_builtins()->GetArray(),
context_->safe_builtins()->GetFunction(),
context_->safe_builtins()->GetJSON(),
context_->safe_builtins()->GetObjekt(),
context_->safe_builtins()->GetRegExp(),
context_->safe_builtins()->GetString(),
context_->safe_builtins()->GetError(),
};
{
v8::TryCatch try_catch(GetIsolate());
try_catch.SetCaptureMessage(true);
context_->CallFunction(func, arraysize(args), args);
if (try_catch.HasCaught()) {
HandleException(try_catch);
return v8::Undefined(GetIsolate());
}
}
return handle_scope.Escape(exports);
}
Commit Message: [Extensions] Don't allow built-in extensions code to be overridden
BUG=546677
Review URL: https://codereview.chromium.org/1417513003
Cr-Commit-Position: refs/heads/master@{#356654}
CWE ID: CWE-264 | v8::Local<v8::Value> ModuleSystem::LoadModule(const std::string& module_name) {
v8::EscapableHandleScope handle_scope(GetIsolate());
v8::Local<v8::Context> v8_context = context()->v8_context();
v8::Context::Scope context_scope(v8_context);
v8::Local<v8::Value> source(GetSource(module_name));
if (source.IsEmpty() || source->IsUndefined()) {
Fatal(context_, "No source for require(" + module_name + ")");
return v8::Undefined(GetIsolate());
}
v8::Local<v8::String> wrapped_source(
WrapSource(v8::Local<v8::String>::Cast(source)));
v8::Local<v8::String> v8_module_name;
if (!ToV8String(GetIsolate(), module_name.c_str(), &v8_module_name)) {
NOTREACHED() << "module_name is too long";
return v8::Undefined(GetIsolate());
}
v8::Local<v8::Value> func_as_value =
RunString(wrapped_source, v8_module_name);
if (func_as_value.IsEmpty() || func_as_value->IsUndefined()) {
Fatal(context_, "Bad source for require(" + module_name + ")");
return v8::Undefined(GetIsolate());
}
v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(func_as_value);
v8::Local<v8::Object> define_object = v8::Object::New(GetIsolate());
gin::ModuleRegistry::InstallGlobals(GetIsolate(), define_object);
v8::Local<v8::Object> exports = v8::Object::New(GetIsolate());
v8::Local<v8::FunctionTemplate> tmpl = v8::FunctionTemplate::New(
GetIsolate(),
&SetExportsProperty);
v8::Local<v8::String> v8_key;
if (!v8_helpers::ToV8String(GetIsolate(), "$set", &v8_key)) {
NOTREACHED();
return v8::Undefined(GetIsolate());
}
v8::Local<v8::Function> function;
if (!tmpl->GetFunction(v8_context).ToLocal(&function)) {
NOTREACHED();
return v8::Undefined(GetIsolate());
}
exports->ForceSet(v8_key, function, v8::ReadOnly);
v8::Local<v8::Object> natives(NewInstance());
CHECK(!natives.IsEmpty()); // this can fail if v8 has issues
v8::Local<v8::Value> args[] = {
GetPropertyUnsafe(v8_context, define_object, "define"),
GetPropertyUnsafe(v8_context, natives, "require",
v8::NewStringType::kInternalized),
GetPropertyUnsafe(v8_context, natives, "requireNative",
v8::NewStringType::kInternalized),
GetPropertyUnsafe(v8_context, natives, "requireAsync",
v8::NewStringType::kInternalized),
exports,
console::AsV8Object(GetIsolate()),
GetPropertyUnsafe(v8_context, natives, "privates",
v8::NewStringType::kInternalized),
context_->safe_builtins()->GetArray(),
context_->safe_builtins()->GetFunction(),
context_->safe_builtins()->GetJSON(),
context_->safe_builtins()->GetObjekt(),
context_->safe_builtins()->GetRegExp(),
context_->safe_builtins()->GetString(),
context_->safe_builtins()->GetError(),
};
{
v8::TryCatch try_catch(GetIsolate());
try_catch.SetCaptureMessage(true);
context_->CallFunction(func, arraysize(args), args);
if (try_catch.HasCaught()) {
HandleException(try_catch);
return v8::Undefined(GetIsolate());
}
}
return handle_scope.Escape(exports);
}
| 26,045 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool WebPageSerializer::serialize(WebFrame* frame,
bool recursive,
WebPageSerializerClient* client,
const WebVector<WebURL>& links,
const WebVector<WebString>& localPaths,
const WebString& localDirectoryName)
{
ASSERT(frame);
ASSERT(client);
ASSERT(links.size() == localPaths.size());
LinkLocalPathMap m_localLinks;
for (size_t i = 0; i < links.size(); i++) {
KURL url = links[i];
ASSERT(!m_localLinks.contains(url.string()));
m_localLinks.set(url.string(), localPaths[i]);
}
Vector<SerializedResource> resources;
PageSerializer serializer(&resources, &m_localLinks, localDirectoryName);
serializer.serialize(toWebViewImpl(frame->view())->page());
for (Vector<SerializedResource>::const_iterator iter = resources.begin(); iter != resources.end(); ++iter) {
client->didSerializeDataForFrame(iter->url, WebCString(iter->data->data(), iter->data->size()), WebPageSerializerClient::CurrentFrameIsFinished);
}
client->didSerializeDataForFrame(KURL(), WebCString("", 0), WebPageSerializerClient::AllFramesAreFinished);
return true;
}
Commit Message: Revert 162155 "This review merges the two existing page serializ..."
Change r162155 broke the world even though it was landed using the CQ.
> This review merges the two existing page serializers, WebPageSerializerImpl and
> PageSerializer, into one, PageSerializer. In addition to this it moves all
> the old tests from WebPageNewSerializerTest and WebPageSerializerTest to the
> PageSerializerTest structure and splits out one test for MHTML into a new
> MHTMLTest file.
>
> Saving as 'Webpage, Complete', 'Webpage, HTML Only' and as MHTML when the
> 'Save Page as MHTML' flag is enabled now uses the same code, and should thus
> have the same feature set. Meaning that both modes now should be a bit better.
>
> Detailed list of changes:
>
> - PageSerializerTest: Prepare for more DTD test
> - PageSerializerTest: Remove now unneccesary input image test
> - PageSerializerTest: Remove unused WebPageSerializer/Impl code
> - PageSerializerTest: Move data URI morph test
> - PageSerializerTest: Move data URI test
> - PageSerializerTest: Move namespace test
> - PageSerializerTest: Move SVG Image test
> - MHTMLTest: Move MHTML specific test to own test file
> - PageSerializerTest: Delete duplicate XML header test
> - PageSerializerTest: Move blank frame test
> - PageSerializerTest: Move CSS test
> - PageSerializerTest: Add frameset/frame test
> - PageSerializerTest: Move old iframe test
> - PageSerializerTest: Move old elements test
> - Use PageSerizer for saving web pages
> - PageSerializerTest: Test for rewriting links
> - PageSerializer: Add rewrite link accumulator
> - PageSerializer: Serialize images in iframes/frames src
> - PageSerializer: XHTML fix for meta tags
> - PageSerializer: Add presentation CSS
> - PageSerializer: Rename out parameter
>
> BUG=
> R=abarth@chromium.org
>
> Review URL: https://codereview.chromium.org/68613003
TBR=tiger@opera.com
Review URL: https://codereview.chromium.org/73673003
git-svn-id: svn://svn.chromium.org/blink/trunk@162156 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-119 | bool WebPageSerializer::serialize(WebFrame* frame,
bool recursive,
WebPageSerializerClient* client,
const WebVector<WebURL>& links,
const WebVector<WebString>& localPaths,
const WebString& localDirectoryName)
{
WebPageSerializerImpl serializerImpl(
frame, recursive, client, links, localPaths, localDirectoryName);
return serializerImpl.serialize();
}
bool WebPageSerializer::retrieveAllResources(WebView* view,
const WebVector<WebCString>& supportedSchemes,
WebVector<WebURL>* resourceURLs,
WebVector<WebURL>* frameURLs) {
WebFrameImpl* mainFrame = toWebFrameImpl(view->mainFrame());
if (!mainFrame)
return false;
Vector<Frame*> framesToVisit;
Vector<Frame*> visitedFrames;
Vector<KURL> frameKURLs;
Vector<KURL> resourceKURLs;
// Let's retrieve the resources from every frame in this page.
framesToVisit.append(mainFrame->frame());
while (!framesToVisit.isEmpty()) {
Frame* frame = framesToVisit[0];
framesToVisit.remove(0);
retrieveResourcesForFrame(frame, supportedSchemes,
&visitedFrames, &framesToVisit,
&frameKURLs, &resourceKURLs);
}
// Converts the results to WebURLs.
WebVector<WebURL> resultResourceURLs(resourceKURLs.size());
for (size_t i = 0; i < resourceKURLs.size(); ++i) {
resultResourceURLs[i] = resourceKURLs[i];
// A frame's src can point to the same URL as another resource, keep the
// resource URL only in such cases.
size_t index = frameKURLs.find(resourceKURLs[i]);
if (index != kNotFound)
frameKURLs.remove(index);
}
*resourceURLs = resultResourceURLs;
WebVector<WebURL> resultFrameURLs(frameKURLs.size());
for (size_t i = 0; i < frameKURLs.size(); ++i)
resultFrameURLs[i] = frameKURLs[i];
*frameURLs = resultFrameURLs;
return true;
}
| 17,458 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: IDNSpoofChecker::IDNSpoofChecker() {
UErrorCode status = U_ZERO_ERROR;
checker_ = uspoof_open(&status);
if (U_FAILURE(status)) {
checker_ = nullptr;
return;
}
uspoof_setRestrictionLevel(checker_, USPOOF_HIGHLY_RESTRICTIVE);
SetAllowedUnicodeSet(&status);
int32_t checks = uspoof_getChecks(checker_, &status) | USPOOF_AUX_INFO;
uspoof_setChecks(checker_, checks, &status);
deviation_characters_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[\\u00df\\u03c2\\u200c\\u200d]"), status);
deviation_characters_.freeze();
non_ascii_latin_letters_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Latin:] - [a-zA-Z]]"), status);
non_ascii_latin_letters_.freeze();
kana_letters_exceptions_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[\\u3078-\\u307a\\u30d8-\\u30da\\u30fb-\\u30fe]"),
status);
kana_letters_exceptions_.freeze();
combining_diacritics_exceptions_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u0300-\\u0339]"), status);
combining_diacritics_exceptions_.freeze();
cyrillic_letters_latin_alike_ = icu::UnicodeSet(
icu::UnicodeString::fromUTF8("[асԁеһіјӏорԛѕԝхуъЬҽпгѵѡ]"), status);
cyrillic_letters_latin_alike_.freeze();
cyrillic_letters_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Cyrl:]]"), status);
cyrillic_letters_.freeze();
DCHECK(U_SUCCESS(status));
lgc_letters_n_ascii_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[[:Latin:][:Greek:][:Cyrillic:][0-9\\u002e_"
"\\u002d][\\u0300-\\u0339]]"),
status);
lgc_letters_n_ascii_.freeze();
UParseError parse_error;
diacritic_remover_.reset(icu::Transliterator::createFromRules(
UNICODE_STRING_SIMPLE("DropAcc"),
icu::UnicodeString::fromUTF8("::NFD; ::[:Nonspacing Mark:] Remove; ::NFC;"
" ł > l; ø > o; đ > d;"),
UTRANS_FORWARD, parse_error, status));
extra_confusable_mapper_.reset(icu::Transliterator::createFromRules(
UNICODE_STRING_SIMPLE("ExtraConf"),
icu::UnicodeString::fromUTF8("[þϼҏ] > p; [ħнћңҥӈӊԋԧԩ] > h;"
"[ĸκкқҝҟҡӄԟ] > k; [ŋп] > n; [ŧтҭԏ] > t;"
"[ƅьҍв] > b; [ωшщฟ] > w; [мӎ] > m;"
"[єҽҿၔ] > e; ґ > r; [ғӻ] > f; [ҫင] > c;"
"ұ > y; [χҳӽӿ] > x;"
"ԃ > d; [ԍဌ] > g; [ടร] > s; ၂ > j;"
"[зӡ] > 3"),
UTRANS_FORWARD, parse_error, status));
DCHECK(U_SUCCESS(status))
<< "Spoofchecker initalization failed due to an error: "
<< u_errorName(status);
}
Commit Message: Add a few more confusability mapping entries
U+0153(œ) => ce
U+00E6(æ), U+04D5 (ӕ) => ae
U+0499(ҙ) => 3
U+0525(ԥ) => n
Bug: 835554, 826019, 836885
Test: components_unittests --gtest_filter=*IDN*
Change-Id: Ic89211f70359d3d67cc25c1805b426b72cdb16ae
Reviewed-on: https://chromium-review.googlesource.com/1055894
Commit-Queue: Jungshik Shin <jshin@chromium.org>
Reviewed-by: Peter Kasting <pkasting@chromium.org>
Cr-Commit-Position: refs/heads/master@{#558928}
CWE ID: | IDNSpoofChecker::IDNSpoofChecker() {
UErrorCode status = U_ZERO_ERROR;
checker_ = uspoof_open(&status);
if (U_FAILURE(status)) {
checker_ = nullptr;
return;
}
uspoof_setRestrictionLevel(checker_, USPOOF_HIGHLY_RESTRICTIVE);
SetAllowedUnicodeSet(&status);
int32_t checks = uspoof_getChecks(checker_, &status) | USPOOF_AUX_INFO;
uspoof_setChecks(checker_, checks, &status);
deviation_characters_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[\\u00df\\u03c2\\u200c\\u200d]"), status);
deviation_characters_.freeze();
non_ascii_latin_letters_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Latin:] - [a-zA-Z]]"), status);
non_ascii_latin_letters_.freeze();
kana_letters_exceptions_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[\\u3078-\\u307a\\u30d8-\\u30da\\u30fb-\\u30fe]"),
status);
kana_letters_exceptions_.freeze();
combining_diacritics_exceptions_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[\\u0300-\\u0339]"), status);
combining_diacritics_exceptions_.freeze();
cyrillic_letters_latin_alike_ = icu::UnicodeSet(
icu::UnicodeString::fromUTF8("[асԁеһіјӏорԛѕԝхуъЬҽпгѵѡ]"), status);
cyrillic_letters_latin_alike_.freeze();
cyrillic_letters_ =
icu::UnicodeSet(UNICODE_STRING_SIMPLE("[[:Cyrl:]]"), status);
cyrillic_letters_.freeze();
DCHECK(U_SUCCESS(status));
lgc_letters_n_ascii_ = icu::UnicodeSet(
UNICODE_STRING_SIMPLE("[[:Latin:][:Greek:][:Cyrillic:][0-9\\u002e_"
"\\u002d][\\u0300-\\u0339]]"),
status);
lgc_letters_n_ascii_.freeze();
UParseError parse_error;
diacritic_remover_.reset(icu::Transliterator::createFromRules(
UNICODE_STRING_SIMPLE("DropAcc"),
icu::UnicodeString::fromUTF8("::NFD; ::[:Nonspacing Mark:] Remove; ::NFC;"
" ł > l; ø > o; đ > d;"),
UTRANS_FORWARD, parse_error, status));
// - {U+00E6 (æ), U+04D5 (ӕ)} => "ae"
// - {U+014B (ŋ), U+043F (п), U+0525 (ԥ)} => n
// - U+0153 (œ) => "ce"
// TODO: see https://crbug.com/843352 for further work on
// U+0525 and U+0153.
// - {U+0437 (з), U+0499 (ҙ), U+04E1 (ӡ)} => 3
extra_confusable_mapper_.reset(icu::Transliterator::createFromRules(
UNICODE_STRING_SIMPLE("ExtraConf"),
icu::UnicodeString::fromUTF8(
"[æӕ] > ae; [þϼҏ] > p; [ħнћңҥӈӊԋԧԩ] > h;"
"[ĸκкқҝҟҡӄԟ] > k; [ŋпԥ] > n; œ > ce;"
"[ŧтҭԏ] > t; [ƅьҍв] > b; [ωшщฟ] > w;"
"[мӎ] > m; [єҽҿၔ] > e; ґ > r; [ғӻ] > f;"
"[ҫင] > c; ұ > y; [χҳӽӿ] > x;"
"ԃ > d; [ԍဌ] > g; [ടร] > s; ၂ > j;"
"[зҙӡ] > 3"),
UTRANS_FORWARD, parse_error, status));
DCHECK(U_SUCCESS(status))
<< "Spoofchecker initalization failed due to an error: "
<< u_errorName(status);
}
| 14,421 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static char *create_output_name(unsigned char *fname, unsigned char *dir,
int lower, int isunix, int utf8)
{
unsigned char *p, *name, c, *fe, sep, slash;
unsigned int x;
sep = (isunix) ? '/' : '\\'; /* the path-seperator */
slash = (isunix) ? '\\' : '/'; /* the other slash */
/* length of filename */
x = strlen((char *) fname);
/* UTF8 worst case scenario: tolower() expands all chars from 1 to 3 bytes */
if (utf8) x *= 3;
/* length of output directory */
if (dir) x += strlen((char *) dir);
if (!(name = (unsigned char *) malloc(x + 2))) {
fprintf(stderr, "out of memory!\n");
return NULL;
}
/* start with blank name */
*name = '\0';
/* add output directory if needed */
if (dir) {
strcpy((char *) name, (char *) dir);
strcat((char *) name, "/");
}
/* remove leading slashes */
while (*fname == sep) fname++;
/* copy from fi->filename to new name, converting MS-DOS slashes to UNIX
* slashes as we go. Also lowercases characters if needed.
*/
p = &name[strlen((char *)name)];
fe = &fname[strlen((char *)fname)];
if (utf8) {
/* UTF8 translates two-byte unicode characters into 1, 2 or 3 bytes.
* %000000000xxxxxxx -> %0xxxxxxx
* %00000xxxxxyyyyyy -> %110xxxxx %10yyyyyy
* %xxxxyyyyyyzzzzzz -> %1110xxxx %10yyyyyy %10zzzzzz
*
* Therefore, the inverse is as follows:
* First char:
* 0x00 - 0x7F = one byte char
* 0x80 - 0xBF = invalid
* 0xC0 - 0xDF = 2 byte char (next char only 0x80-0xBF is valid)
* 0xE0 - 0xEF = 3 byte char (next 2 chars only 0x80-0xBF is valid)
* 0xF0 - 0xFF = invalid
*/
do {
if (fname >= fe) {
free(name);
return NULL;
}
/* get next UTF8 char */
if ((c = *fname++) < 0x80) x = c;
else {
if ((c >= 0xC0) && (c < 0xE0)) {
x = (c & 0x1F) << 6;
x |= *fname++ & 0x3F;
}
else if ((c >= 0xE0) && (c < 0xF0)) {
x = (c & 0xF) << 12;
x |= (*fname++ & 0x3F) << 6;
x |= *fname++ & 0x3F;
}
else x = '?';
}
/* whatever is the path seperator -> '/'
* whatever is the other slash -> '\\'
* otherwise, if lower is set, the lowercase version */
if (x == sep) x = '/';
else if (x == slash) x = '\\';
else if (lower) x = (unsigned int) tolower((int) x);
/* integer back to UTF8 */
if (x < 0x80) {
*p++ = (unsigned char) x;
}
else if (x < 0x800) {
*p++ = 0xC0 | (x >> 6);
*p++ = 0x80 | (x & 0x3F);
}
else {
*p++ = 0xE0 | (x >> 12);
*p++ = 0x80 | ((x >> 6) & 0x3F);
*p++ = 0x80 | (x & 0x3F);
}
} while (x);
}
else {
/* regular non-utf8 version */
do {
c = *fname++;
if (c == sep) c = '/';
else if (c == slash) c = '\\';
else if (lower) c = (unsigned char) tolower((int) c);
} while ((*p++ = c));
}
return (char *) name;
}
Commit Message: add anti "../" and leading slash protection to chmextract
CWE ID: CWE-22 | static char *create_output_name(unsigned char *fname, unsigned char *dir,
char *create_output_name(char *fname) {
char *out, *p;
if ((out = malloc(strlen(fname) + 1))) {
/* remove leading slashes */
while (*fname == '/' || *fname == '\\') fname++;
/* if that removes all characters, just call it "x" */
strcpy(out, (*fname) ? fname : "x");
/* change "../" to "xx/" */
for (p = out; *p; p++) {
if (p[0] == '.' && p[1] == '.' && (p[2] == '/' || p[2] == '\\')) {
p[0] = p[1] = 'x';
}
}
}
return out;
}
| 19,219 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
memset(src_, 255, block_size_);
const int half = block_size_ / 2;
memset(ref_, 255, half);
memset(ref_ + half, 0, half);
unsigned int sse;
unsigned int var;
REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
const int half = block_size_ / 2;
if (!use_high_bit_depth_) {
memset(src_, 255, block_size_);
memset(ref_, 255, half);
memset(ref_ + half, 0, half);
#if CONFIG_VP9_HIGHBITDEPTH
} else {
vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
block_size_);
vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
unsigned int sse;
unsigned int var;
ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
const unsigned int expected = block_size_ * 255 * 255 / 4;
EXPECT_EQ(expected, var);
}
| 15,008 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)
{
u_short type, class, dlen;
u_long ttl;
long n, i;
u_short s;
u_char *tp, *p;
char name[MAXHOSTNAMELEN];
int have_v6_break = 0, in_v6_break = 0;
*subarray = NULL;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, sizeof(name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
GETSHORT(type, cp);
GETSHORT(class, cp);
GETLONG(ttl, cp);
GETSHORT(dlen, cp);
if (type_to_fetch != T_ANY && type != type_to_fetch) {
cp += dlen;
return cp;
}
if (!store) {
cp += dlen;
return cp;
}
ALLOC_INIT_ZVAL(*subarray);
array_init(*subarray);
add_assoc_string(*subarray, "host", name, 1);
add_assoc_string(*subarray, "class", "IN", 1);
add_assoc_long(*subarray, "ttl", ttl);
if (raw) {
add_assoc_long(*subarray, "type", type);
add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1);
cp += dlen;
return cp;
}
switch (type) {
case DNS_T_A:
add_assoc_string(*subarray, "type", "A", 1);
snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]);
add_assoc_string(*subarray, "ip", name, 1);
cp += dlen;
break;
case DNS_T_MX:
add_assoc_string(*subarray, "type", "MX", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
/* no break; */
case DNS_T_CNAME:
if (type == DNS_T_CNAME) {
add_assoc_string(*subarray, "type", "CNAME", 1);
}
/* no break; */
case DNS_T_NS:
if (type == DNS_T_NS) {
add_assoc_string(*subarray, "type", "NS", 1);
}
/* no break; */
case DNS_T_PTR:
if (type == DNS_T_PTR) {
add_assoc_string(*subarray, "type", "PTR", 1);
}
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_HINFO:
/* See RFC 1010 for values */
add_assoc_string(*subarray, "type", "HINFO", 1);
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1);
cp += n;
n = *cp & 0xFF;
cp++;
add_assoc_stringl(*subarray, "os", (char*)cp, n, 1);
cp += n;
break;
case DNS_T_TXT:
{
int ll = 0;
zval *entries = NULL;
add_assoc_string(*subarray, "type", "TXT", 1);
tp = emalloc(dlen + 1);
MAKE_STD_ZVAL(entries);
array_init(entries);
while (ll < dlen) {
n = cp[ll];
if ((ll + n) >= dlen) {
n = dlen - (ll + 1);
}
memcpy(tp + ll , cp + ll + 1, n);
add_next_index_stringl(entries, cp + ll + 1, n, 1);
ll = ll + n + 1;
}
tp[dlen] = '\0';
cp += dlen;
add_assoc_stringl(*subarray, "txt", tp, (dlen>0)?dlen - 1:0, 0);
add_assoc_zval(*subarray, "entries", entries);
}
break;
case DNS_T_SOA:
add_assoc_string(*subarray, "type", "SOA", 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "mname", name, 1);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "rname", name, 1);
GETLONG(n, cp);
add_assoc_long(*subarray, "serial", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "refresh", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "retry", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "expire", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "minimum-ttl", n);
break;
case DNS_T_AAAA:
tp = (u_char*)name;
for(i=0; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "type", "AAAA", 1);
add_assoc_string(*subarray, "ipv6", name, 1);
break;
case DNS_T_A6:
p = cp;
add_assoc_string(*subarray, "type", "A6", 1);
n = ((int)cp[0]) & 0xFF;
cp++;
add_assoc_long(*subarray, "masklen", n);
tp = (u_char*)name;
if (n > 15) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
}
if (n % 16 > 8) {
/* Partial short */
if (cp[0] != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
sprintf((char*)tp, "%x", cp[0] & 0xFF);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
cp++;
}
for (i = (n + 8) / 16; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "ipv6", name, 1);
if (cp < p + dlen) {
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "chain", name, 1);
}
break;
case DNS_T_SRV:
add_assoc_string(*subarray, "type", "SRV", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "weight", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "port", n);
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_NAPTR:
add_assoc_string(*subarray, "type", "NAPTR", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "order", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pref", n);
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "flags", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "services", (char*)++cp, n, 1);
cp += n;
n = (cp[0] & 0xFF);
add_assoc_stringl(*subarray, "regex", (char*)++cp, n, 1);
cp += n;
n = dn_expand(answer->qb2, answer->qb2+65536, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "replacement", name, 1);
break;
default:
zval_ptr_dtor(subarray);
*subarray = NULL;
cp += dlen;
break;
}
return cp;
}
Commit Message: Fixed Sec Bug #67717 segfault in dns_get_record CVE-2014-3597
Incomplete fix for CVE-2014-4049
Check possible buffer overflow
- pass real buffer end to dn_expand calls
- check buffer len before each read
CWE ID: CWE-119 | static u_char *php_parserr(u_char *cp, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)
static u_char *php_parserr(u_char *cp, u_char *end, querybuf *answer, int type_to_fetch, int store, int raw, zval **subarray)
{
u_short type, class, dlen;
u_long ttl;
long n, i;
u_short s;
u_char *tp, *p;
char name[MAXHOSTNAMELEN];
int have_v6_break = 0, in_v6_break = 0;
*subarray = NULL;
n = dn_expand(answer->qb2, end, cp, name, sizeof(name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
CHECKCP(10);
GETSHORT(type, cp);
GETSHORT(class, cp);
GETLONG(ttl, cp);
GETSHORT(dlen, cp);
CHECKCP(dlen);
if (type_to_fetch != T_ANY && type != type_to_fetch) {
cp += dlen;
return cp;
}
if (!store) {
cp += dlen;
return cp;
}
ALLOC_INIT_ZVAL(*subarray);
array_init(*subarray);
add_assoc_string(*subarray, "host", name, 1);
add_assoc_string(*subarray, "class", "IN", 1);
add_assoc_long(*subarray, "ttl", ttl);
if (raw) {
add_assoc_long(*subarray, "type", type);
add_assoc_stringl(*subarray, "data", (char*) cp, (uint) dlen, 1);
cp += dlen;
return cp;
}
switch (type) {
case DNS_T_A:
CHECKCP(4);
add_assoc_string(*subarray, "type", "A", 1);
snprintf(name, sizeof(name), "%d.%d.%d.%d", cp[0], cp[1], cp[2], cp[3]);
add_assoc_string(*subarray, "ip", name, 1);
cp += dlen;
break;
case DNS_T_MX:
CHECKCP(2);
add_assoc_string(*subarray, "type", "MX", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
/* no break; */
case DNS_T_CNAME:
if (type == DNS_T_CNAME) {
add_assoc_string(*subarray, "type", "CNAME", 1);
}
/* no break; */
case DNS_T_NS:
if (type == DNS_T_NS) {
add_assoc_string(*subarray, "type", "NS", 1);
}
/* no break; */
case DNS_T_PTR:
if (type == DNS_T_PTR) {
add_assoc_string(*subarray, "type", "PTR", 1);
}
n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_HINFO:
/* See RFC 1010 for values */
add_assoc_string(*subarray, "type", "HINFO", 1);
CHECKCP(1);
n = *cp & 0xFF;
cp++;
CHECKCP(n);
add_assoc_stringl(*subarray, "cpu", (char*)cp, n, 1);
cp += n;
CHECKCP(1);
n = *cp & 0xFF;
cp++;
CHECKCP(n);
add_assoc_stringl(*subarray, "os", (char*)cp, n, 1);
cp += n;
break;
case DNS_T_TXT:
{
int l1 = 0, l2 = 0;
zval *entries = NULL;
add_assoc_string(*subarray, "type", "TXT", 1);
tp = emalloc(dlen + 1);
MAKE_STD_ZVAL(entries);
array_init(entries);
while (l1 < dlen) {
n = cp[l1];
if ((l1 + n) >= dlen) {
n = dlen - (l1 + 1);
}
if (n) {
memcpy(tp + l2 , cp + l1 + 1, n);
add_next_index_stringl(entries, cp + l1 + 1, n, 1);
}
l1 = l1 + n + 1;
l2 = l2 + n;
}
tp[l2] = '\0';
cp += dlen;
add_assoc_stringl(*subarray, "txt", tp, l2, 0);
add_assoc_zval(*subarray, "entries", entries);
}
break;
case DNS_T_SOA:
add_assoc_string(*subarray, "type", "SOA", 1);
n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "mname", name, 1);
n = dn_expand(answer->qb2, end, cp, name, (sizeof name) -2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "rname", name, 1);
CHECKCP(5*4);
GETLONG(n, cp);
add_assoc_long(*subarray, "serial", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "refresh", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "retry", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "expire", n);
GETLONG(n, cp);
add_assoc_long(*subarray, "minimum-ttl", n);
break;
case DNS_T_AAAA:
tp = (u_char*)name;
CHECKCP(8*2);
for(i=0; i < 8; i++) {
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "type", "AAAA", 1);
add_assoc_string(*subarray, "ipv6", name, 1);
break;
case DNS_T_A6:
p = cp;
add_assoc_string(*subarray, "type", "A6", 1);
CHECKCP(1);
n = ((int)cp[0]) & 0xFF;
cp++;
add_assoc_long(*subarray, "masklen", n);
tp = (u_char*)name;
if (n > 15) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
}
if (n % 16 > 8) {
/* Partial short */
if (cp[0] != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
sprintf((char*)tp, "%x", cp[0] & 0xFF);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
cp++;
}
for (i = (n + 8) / 16; i < 8; i++) {
CHECKCP(2);
GETSHORT(s, cp);
if (s != 0) {
if (tp > (u_char *)name) {
in_v6_break = 0;
tp[0] = ':';
tp++;
}
tp += sprintf((char*)tp,"%x",s);
} else {
if (!have_v6_break) {
have_v6_break = 1;
in_v6_break = 1;
tp[0] = ':';
tp++;
} else if (!in_v6_break) {
tp[0] = ':';
tp++;
tp[0] = '0';
tp++;
}
}
}
if (have_v6_break && in_v6_break) {
tp[0] = ':';
tp++;
}
tp[0] = '\0';
add_assoc_string(*subarray, "ipv6", name, 1);
if (cp < p + dlen) {
n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "chain", name, 1);
}
break;
case DNS_T_SRV:
CHECKCP(3*2);
add_assoc_string(*subarray, "type", "SRV", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pri", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "weight", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "port", n);
n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "target", name, 1);
break;
case DNS_T_NAPTR:
CHECKCP(2*2);
add_assoc_string(*subarray, "type", "NAPTR", 1);
GETSHORT(n, cp);
add_assoc_long(*subarray, "order", n);
GETSHORT(n, cp);
add_assoc_long(*subarray, "pref", n);
CHECKCP(1);
n = (cp[0] & 0xFF);
cp++;
CHECKCP(n);
add_assoc_stringl(*subarray, "flags", (char*)cp, n, 1);
cp += n;
CHECKCP(1);
n = (cp[0] & 0xFF);
cp++;
CHECKCP(n);
add_assoc_stringl(*subarray, "services", (char*)cp, n, 1);
cp += n;
CHECKCP(1);
n = (cp[0] & 0xFF);
cp++;
CHECKCP(n);
add_assoc_stringl(*subarray, "regex", (char*)cp, n, 1);
cp += n;
n = dn_expand(answer->qb2, end, cp, name, (sizeof name) - 2);
if (n < 0) {
return NULL;
}
cp += n;
add_assoc_string(*subarray, "replacement", name, 1);
break;
default:
zval_ptr_dtor(subarray);
*subarray = NULL;
cp += dlen;
break;
}
return cp;
}
| 4,393 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void php_wddx_push_element(void *user_data, const XML_Char *name, const XML_Char **atts)
{
st_entry ent;
wddx_stack *stack = (wddx_stack *)user_data;
if (!strcmp(name, EL_PACKET)) {
int i;
if (atts) for (i=0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VERSION)) {
/* nothing for now */
}
}
} else if (!strcmp(name, EL_STRING)) {
ent.type = ST_STRING;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BINARY)) {
ent.type = ST_BINARY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_CHAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_CHAR_CODE) && atts[i+1] && atts[i+1][0]) {
char tmp_buf[2];
snprintf(tmp_buf, sizeof(tmp_buf), "%c", (char)strtol(atts[i+1], NULL, 16));
php_wddx_process_data(user_data, tmp_buf, strlen(tmp_buf));
break;
}
}
} else if (!strcmp(name, EL_NUMBER)) {
ent.type = ST_NUMBER;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
Z_LVAL_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BOOLEAN)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VALUE) && atts[i+1] && atts[i+1][0]) {
ent.type = ST_BOOLEAN;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_BOOL;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
php_wddx_process_data(user_data, atts[i+1], strlen(atts[i+1]));
break;
}
} else {
ent.type = ST_BOOLEAN;
SET_STACK_VARNAME;
ZVAL_FALSE(&ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
} else if (!strcmp(name, EL_NULL)) {
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
} else if (!strcmp(name, EL_NULL)) {
ent.type = ST_NULL;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
ZVAL_NULL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_ARRAY)) {
ent.type = ST_ARRAY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_STRUCT)) {
ent.type = ST_STRUCT;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_VAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) {
if (stack->varname) efree(stack->varname);
stack->varname = estrdup(atts[i+1]);
break;
}
}
} else if (!strcmp(name, EL_RECORDSET)) {
int i;
ent.type = ST_RECORDSET;
SET_STACK_VARNAME;
MAKE_STD_ZVAL(ent.data);
array_init(ent.data);
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], "fieldNames") && atts[i+1] && atts[i+1][0]) {
zval *tmp;
char *key;
char *p1, *p2, *endp;
i++;
endp = (char *)atts[i] + strlen(atts[i]);
p1 = (char *)atts[i];
while ((p2 = php_memnstr(p1, ",", sizeof(",")-1, endp)) != NULL) {
key = estrndup(p1, p2 - p1);
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, key, p2 - p1 + 1, tmp);
p1 = p2 + sizeof(",")-1;
efree(key);
}
if (p1 <= endp) {
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, p1, endp - p1 + 1, tmp);
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_FIELD)) {
int i;
st_entry ent;
ent.type = ST_FIELD;
ent.varname = NULL;
ent.data = NULL;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) {
st_entry *recordset;
zval **field;
if (wddx_stack_top(stack, (void**)&recordset) == SUCCESS &&
recordset->type == ST_RECORDSET &&
zend_hash_find(Z_ARRVAL_P(recordset->data), (char*)atts[i+1], strlen(atts[i+1])+1, (void**)&field) == SUCCESS) {
ent.data = *field;
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_DATETIME)) {
ent.type = ST_DATETIME;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
Commit Message:
CWE ID: CWE-502 | static void php_wddx_push_element(void *user_data, const XML_Char *name, const XML_Char **atts)
{
st_entry ent;
wddx_stack *stack = (wddx_stack *)user_data;
if (!strcmp(name, EL_PACKET)) {
int i;
if (atts) for (i=0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VERSION)) {
/* nothing for now */
}
}
} else if (!strcmp(name, EL_STRING)) {
ent.type = ST_STRING;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BINARY)) {
ent.type = ST_BINARY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_STRING;
Z_STRVAL_P(ent.data) = STR_EMPTY_ALLOC();
Z_STRLEN_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_CHAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_CHAR_CODE) && atts[i+1] && atts[i+1][0]) {
char tmp_buf[2];
snprintf(tmp_buf, sizeof(tmp_buf), "%c", (char)strtol(atts[i+1], NULL, 16));
php_wddx_process_data(user_data, tmp_buf, strlen(tmp_buf));
break;
}
}
} else if (!strcmp(name, EL_NUMBER)) {
ent.type = ST_NUMBER;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
Z_LVAL_P(ent.data) = 0;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_BOOLEAN)) {
int i;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_BOOL;
ent.type = ST_BOOLEAN;
SET_STACK_VARNAME;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_VALUE) && atts[i+1] && atts[i+1][0]) {
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
php_wddx_process_data(user_data, atts[i+1], strlen(atts[i+1]));
break;
}
} else {
ZVAL_FALSE(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
} else if (!strcmp(name, EL_NULL)) {
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
} else if (!strcmp(name, EL_NULL)) {
ent.type = ST_NULL;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
ZVAL_NULL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_ARRAY)) {
ent.type = ST_ARRAY;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_STRUCT)) {
ent.type = ST_STRUCT;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
array_init(ent.data);
INIT_PZVAL(ent.data);
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_VAR)) {
int i;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) {
if (stack->varname) efree(stack->varname);
stack->varname = estrdup(atts[i+1]);
break;
}
}
} else if (!strcmp(name, EL_RECORDSET)) {
int i;
ent.type = ST_RECORDSET;
SET_STACK_VARNAME;
MAKE_STD_ZVAL(ent.data);
array_init(ent.data);
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], "fieldNames") && atts[i+1] && atts[i+1][0]) {
zval *tmp;
char *key;
char *p1, *p2, *endp;
i++;
endp = (char *)atts[i] + strlen(atts[i]);
p1 = (char *)atts[i];
while ((p2 = php_memnstr(p1, ",", sizeof(",")-1, endp)) != NULL) {
key = estrndup(p1, p2 - p1);
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, key, p2 - p1 + 1, tmp);
p1 = p2 + sizeof(",")-1;
efree(key);
}
if (p1 <= endp) {
MAKE_STD_ZVAL(tmp);
array_init(tmp);
add_assoc_zval_ex(ent.data, p1, endp - p1 + 1, tmp);
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_FIELD)) {
int i;
st_entry ent;
ent.type = ST_FIELD;
ent.varname = NULL;
ent.data = NULL;
if (atts) for (i = 0; atts[i]; i++) {
if (!strcmp(atts[i], EL_NAME) && atts[i+1] && atts[i+1][0]) {
st_entry *recordset;
zval **field;
if (wddx_stack_top(stack, (void**)&recordset) == SUCCESS &&
recordset->type == ST_RECORDSET &&
zend_hash_find(Z_ARRVAL_P(recordset->data), (char*)atts[i+1], strlen(atts[i+1])+1, (void**)&field) == SUCCESS) {
ent.data = *field;
}
break;
}
}
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
} else if (!strcmp(name, EL_DATETIME)) {
ent.type = ST_DATETIME;
SET_STACK_VARNAME;
ALLOC_ZVAL(ent.data);
INIT_PZVAL(ent.data);
Z_TYPE_P(ent.data) = IS_LONG;
wddx_stack_push((wddx_stack *)stack, &ent, sizeof(st_entry));
}
| 2,181 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int verify_vc_kbmode(int fd) {
int curr_mode;
/*
* Make sure we only adjust consoles in K_XLATE or K_UNICODE mode.
* Otherwise we would (likely) interfere with X11's processing of the
* key events.
*
* http://lists.freedesktop.org/archives/systemd-devel/2013-February/008573.html
*/
if (ioctl(fd, KDGKBMODE, &curr_mode) < 0)
return -errno;
return IN_SET(curr_mode, K_XLATE, K_UNICODE) ? 0 : -EBUSY;
}
Commit Message: Merge pull request #12378 from rbalint/vt-kbd-reset-check
VT kbd reset check
CWE ID: CWE-255 | static int verify_vc_kbmode(int fd) {
| 29,571 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: X11SurfaceFactory::GetAllowedGLImplementations() {
std::vector<gl::GLImplementation> impls;
impls.push_back(gl::kGLImplementationEGLGLES2);
impls.push_back(gl::kGLImplementationDesktopGL);
impls.push_back(gl::kGLImplementationOSMesaGL);
return impls;
}
Commit Message: Add ThreadChecker for Ozone X11 GPU.
Ensure Ozone X11 tests the same thread constraints we have in Ozone GBM.
BUG=none
Review-Url: https://codereview.chromium.org/2366643002
Cr-Commit-Position: refs/heads/master@{#421817}
CWE ID: CWE-284 | X11SurfaceFactory::GetAllowedGLImplementations() {
DCHECK(thread_checker_.CalledOnValidThread());
std::vector<gl::GLImplementation> impls;
impls.push_back(gl::kGLImplementationEGLGLES2);
impls.push_back(gl::kGLImplementationDesktopGL);
impls.push_back(gl::kGLImplementationOSMesaGL);
return impls;
}
| 14,880 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void cipso_v4_sock_delattr(struct sock *sk)
{
int hdr_delta;
struct ip_options *opt;
struct inet_sock *sk_inet;
sk_inet = inet_sk(sk);
opt = sk_inet->opt;
if (opt == NULL || opt->cipso == 0)
return;
hdr_delta = cipso_v4_delopt(&sk_inet->opt);
if (sk_inet->is_icsk && hdr_delta > 0) {
struct inet_connection_sock *sk_conn = inet_csk(sk);
sk_conn->icsk_ext_hdr_len -= hdr_delta;
sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
}
}
Commit Message: inet: add RCU protection to inet->opt
We lack proper synchronization to manipulate inet->opt ip_options
Problem is ip_make_skb() calls ip_setup_cork() and
ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options),
without any protection against another thread manipulating inet->opt.
Another thread can change inet->opt pointer and free old one under us.
Use RCU to protect inet->opt (changed to inet->inet_opt).
Instead of handling atomic refcounts, just copy ip_options when
necessary, to avoid cache line dirtying.
We cant insert an rcu_head in struct ip_options since its included in
skb->cb[], so this patch is large because I had to introduce a new
ip_options_rcu structure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: CWE-362 | void cipso_v4_sock_delattr(struct sock *sk)
{
int hdr_delta;
struct ip_options_rcu *opt;
struct inet_sock *sk_inet;
sk_inet = inet_sk(sk);
opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
if (opt == NULL || opt->opt.cipso == 0)
return;
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
if (sk_inet->is_icsk && hdr_delta > 0) {
struct inet_connection_sock *sk_conn = inet_csk(sk);
sk_conn->icsk_ext_hdr_len -= hdr_delta;
sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
}
}
| 11,305 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: cJSON *cJSON_DetachItemFromObject( cJSON *object, const char *string )
{
int i = 0;
cJSON *c = object->child;
while ( c && cJSON_strcasecmp( c->string, string ) ) {
++i;
c = c->next;
}
if ( c )
return cJSON_DetachItemFromArray( object, i );
return 0;
}
Commit Message: Fix a buffer overflow / heap corruption issue that could occur if a
malformed JSON string was passed on the control channel. This issue,
present in the cJSON library, was already fixed upstream, so was
addressed here in iperf3 by importing a newer version of cJSON (plus
local ESnet modifications).
Discovered and reported by Dave McDaniel, Cisco Talos.
Based on a patch by @dopheide-esnet, with input from @DaveGamble.
Cross-references: TALOS-CAN-0164, ESNET-SECADV-2016-0001,
CVE-2016-4303
(cherry picked from commit ed94082be27d971a5e1b08b666e2c217cf470a40)
Signed-off-by: Bruce A. Mah <bmah@es.net>
CWE ID: CWE-119 | cJSON *cJSON_DetachItemFromObject( cJSON *object, const char *string )
| 15,885 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void V8RecursionScope::didLeaveScriptContext()
{
Microtask::performCheckpoint();
V8PerIsolateData::from(m_isolate)->runEndOfScopeTasks();
}
Commit Message: Correctly keep track of isolates for microtask execution
BUG=487155
R=haraken@chromium.org
Review URL: https://codereview.chromium.org/1161823002
git-svn-id: svn://svn.chromium.org/blink/trunk@195985 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: CWE-254 | void V8RecursionScope::didLeaveScriptContext()
{
Microtask::performCheckpoint(m_isolate);
V8PerIsolateData::from(m_isolate)->runEndOfScopeTasks();
}
| 25,166 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_ip6t_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ip6t_get_target(e);
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
if (ret)
goto out;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
}
Commit Message: netfilter: x_tables: make sure e->next_offset covers remaining blob size
Otherwise this function may read data beyond the ruleset blob.
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
CWE ID: CWE-119 | check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
const char *name)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off, h;
duprintf("check_compat_entry_size_and_hooks %p\n", e);
if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit) {
duprintf("Bad offset %p, limit = %p\n", e, limit);
return -EINVAL;
}
if (e->next_offset < sizeof(struct compat_ip6t_entry) +
sizeof(struct compat_xt_entry_target)) {
duprintf("checking: element %p size %u\n",
e, e->next_offset);
return -EINVAL;
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, name, &e->ipv6, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ip6t_get_target(e);
target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
t->u.user.name);
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
if (ret)
goto out;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h])
newinfo->underflow[h] = underflows[h];
}
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
}
| 1,903 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: get_uncompressed_data(struct archive_read *a, const void **buff, size_t size,
size_t minimum)
{
struct _7zip *zip = (struct _7zip *)a->format->data;
ssize_t bytes_avail;
if (zip->codec == _7Z_COPY && zip->codec2 == (unsigned long)-1) {
/* Copy mode. */
/*
* Note: '1' here is a performance optimization.
* Recall that the decompression layer returns a count of
* available bytes; asking for more than that forces the
* decompressor to combine reads by copying data.
*/
*buff = __archive_read_ahead(a, 1, &bytes_avail);
if (bytes_avail <= 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated 7-Zip file data");
return (ARCHIVE_FATAL);
}
if ((size_t)bytes_avail >
zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
if ((size_t)bytes_avail > size)
bytes_avail = (ssize_t)size;
zip->pack_stream_bytes_unconsumed = bytes_avail;
} else if (zip->uncompressed_buffer_pointer == NULL) {
/* Decompression has failed. */
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC, "Damaged 7-Zip archive");
return (ARCHIVE_FATAL);
} else {
/* Packed mode. */
if (minimum > zip->uncompressed_buffer_bytes_remaining) {
/*
* If remaining uncompressed data size is less than
* the minimum size, fill the buffer up to the
* minimum size.
*/
if (extract_pack_stream(a, minimum) < 0)
return (ARCHIVE_FATAL);
}
if (size > zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
else
bytes_avail = (ssize_t)size;
*buff = zip->uncompressed_buffer_pointer;
zip->uncompressed_buffer_pointer += bytes_avail;
}
zip->uncompressed_buffer_bytes_remaining -= bytes_avail;
return (bytes_avail);
}
Commit Message: 7zip: fix crash when parsing certain archives
Fuzzing with CRCs disabled revealed that a call to get_uncompressed_data()
would sometimes fail to return at least 'minimum' bytes. This can cause
the crc32() invocation in header_bytes to read off into invalid memory.
A specially crafted archive can use this to cause a crash.
An ASAN trace is below, but ASAN is not required - an uninstrumented
binary will also crash.
==7719==ERROR: AddressSanitizer: SEGV on unknown address 0x631000040000 (pc 0x7fbdb3b3ec1d bp 0x7ffe77a51310 sp 0x7ffe77a51150 T0)
==7719==The signal is caused by a READ memory access.
#0 0x7fbdb3b3ec1c in crc32_z (/lib/x86_64-linux-gnu/libz.so.1+0x2c1c)
#1 0x84f5eb in header_bytes (/tmp/libarchive/bsdtar+0x84f5eb)
#2 0x856156 in read_Header (/tmp/libarchive/bsdtar+0x856156)
#3 0x84e134 in slurp_central_directory (/tmp/libarchive/bsdtar+0x84e134)
#4 0x849690 in archive_read_format_7zip_read_header (/tmp/libarchive/bsdtar+0x849690)
#5 0x5713b7 in _archive_read_next_header2 (/tmp/libarchive/bsdtar+0x5713b7)
#6 0x570e63 in _archive_read_next_header (/tmp/libarchive/bsdtar+0x570e63)
#7 0x6f08bd in archive_read_next_header (/tmp/libarchive/bsdtar+0x6f08bd)
#8 0x52373f in read_archive (/tmp/libarchive/bsdtar+0x52373f)
#9 0x5257be in tar_mode_x (/tmp/libarchive/bsdtar+0x5257be)
#10 0x51daeb in main (/tmp/libarchive/bsdtar+0x51daeb)
#11 0x7fbdb27cab96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310
#12 0x41dd09 in _start (/tmp/libarchive/bsdtar+0x41dd09)
This was primarly done with afl and FairFuzz. Some early corpus entries
may have been generated by qsym.
CWE ID: CWE-125 | get_uncompressed_data(struct archive_read *a, const void **buff, size_t size,
size_t minimum)
{
struct _7zip *zip = (struct _7zip *)a->format->data;
ssize_t bytes_avail;
if (zip->codec == _7Z_COPY && zip->codec2 == (unsigned long)-1) {
/* Copy mode. */
*buff = __archive_read_ahead(a, minimum, &bytes_avail);
if (bytes_avail <= 0) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated 7-Zip file data");
return (ARCHIVE_FATAL);
}
if ((size_t)bytes_avail >
zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
if ((size_t)bytes_avail > size)
bytes_avail = (ssize_t)size;
zip->pack_stream_bytes_unconsumed = bytes_avail;
} else if (zip->uncompressed_buffer_pointer == NULL) {
/* Decompression has failed. */
archive_set_error(&(a->archive),
ARCHIVE_ERRNO_MISC, "Damaged 7-Zip archive");
return (ARCHIVE_FATAL);
} else {
/* Packed mode. */
if (minimum > zip->uncompressed_buffer_bytes_remaining) {
/*
* If remaining uncompressed data size is less than
* the minimum size, fill the buffer up to the
* minimum size.
*/
if (extract_pack_stream(a, minimum) < 0)
return (ARCHIVE_FATAL);
}
if (size > zip->uncompressed_buffer_bytes_remaining)
bytes_avail = (ssize_t)
zip->uncompressed_buffer_bytes_remaining;
else
bytes_avail = (ssize_t)size;
*buff = zip->uncompressed_buffer_pointer;
zip->uncompressed_buffer_pointer += bytes_avail;
}
zip->uncompressed_buffer_bytes_remaining -= bytes_avail;
return (bytes_avail);
}
| 651 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: xps_select_font_encoding(xps_font_t *font, int idx)
{
byte *cmapdata, *entry;
int pid, eid;
if (idx < 0 || idx >= font->cmapsubcount)
return;
cmapdata = font->data + font->cmaptable;
entry = cmapdata + 4 + idx * 8;
pid = u16(entry + 0);
eid = u16(entry + 2);
font->cmapsubtable = font->cmaptable + u32(entry + 4);
font->usepua = (pid == 3 && eid == 0);
}
Commit Message:
CWE ID: CWE-125 | xps_select_font_encoding(xps_font_t *font, int idx)
{
byte *cmapdata, *entry;
int pid, eid;
if (idx < 0 || idx >= font->cmapsubcount)
return 0;
cmapdata = font->data + font->cmaptable;
entry = cmapdata + 4 + idx * 8;
pid = u16(entry + 0);
eid = u16(entry + 2);
font->cmapsubtable = font->cmaptable + u32(entry + 4);
if (font->cmapsubtable >= font->length) {
font->cmapsubtable = 0;
return 0;
}
font->usepua = (pid == 3 && eid == 0);
return 1;
}
| 6,018 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ScrollHitTestDisplayItem::Record(
GraphicsContext& context,
const DisplayItemClient& client,
DisplayItem::Type type,
scoped_refptr<const TransformPaintPropertyNode> scroll_offset_node) {
PaintController& paint_controller = context.GetPaintController();
DCHECK_NE(paint_controller.CurrentPaintChunkProperties().Transform(),
scroll_offset_node.get());
if (paint_controller.DisplayItemConstructionIsDisabled())
return;
paint_controller.CreateAndAppend<ScrollHitTestDisplayItem>(
client, type, std::move(scroll_offset_node));
}
Commit Message: Reland "[CI] Make paint property nodes non-ref-counted"
This reverts commit 887383b30842d9d9006e11bb6932660a3cb5b1b7.
Reason for revert: Retry in M69.
Original change's description:
> Revert "[CI] Make paint property nodes non-ref-counted"
>
> This reverts commit 70fc0b018c9517558b7aa2be00edf2debb449123.
>
> Reason for revert: Caused bugs found by clusterfuzz
>
> Original change's description:
> > [CI] Make paint property nodes non-ref-counted
> >
> > Now all paint property nodes are owned by ObjectPaintProperties
> > (and LocalFrameView temporarily before removing non-RLS mode).
> > Others just use raw pointers or references.
> >
> > Bug: 833496
> > Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> > Change-Id: I2d544fe153bb94698623248748df63c8aa2081ae
> > Reviewed-on: https://chromium-review.googlesource.com/1031101
> > Reviewed-by: Tien-Ren Chen <trchen@chromium.org>
> > Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
> > Cr-Commit-Position: refs/heads/master@{#554626}
>
> TBR=wangxianzhu@chromium.org,trchen@chromium.org,chrishtr@chromium.org
>
> Change-Id: I02bb50d6744cb81a797246a0116b677e80a3c69f
> No-Presubmit: true
> No-Tree-Checks: true
> No-Try: true
> Bug: 833496,837932,837943
> Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
> Reviewed-on: https://chromium-review.googlesource.com/1034292
> Reviewed-by: Xianzhu Wang <wangxianzhu@chromium.org>
> Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#554653}
TBR=wangxianzhu@chromium.org,trchen@chromium.org,chrishtr@chromium.org
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 833496, 837932, 837943
Change-Id: I0b4ef70db1f1f211ba97c30d617225355c750992
Cq-Include-Trybots: master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_layout_tests_slimming_paint_v2
Reviewed-on: https://chromium-review.googlesource.com/1083491
Commit-Queue: Xianzhu Wang <wangxianzhu@chromium.org>
Reviewed-by: Xianzhu Wang <wangxianzhu@chromium.org>
Cr-Commit-Position: refs/heads/master@{#563930}
CWE ID: | void ScrollHitTestDisplayItem::Record(
GraphicsContext& context,
const DisplayItemClient& client,
DisplayItem::Type type,
const TransformPaintPropertyNode& scroll_offset_node) {
PaintController& paint_controller = context.GetPaintController();
DCHECK_NE(paint_controller.CurrentPaintChunkProperties().Transform(),
&scroll_offset_node);
if (paint_controller.DisplayItemConstructionIsDisabled())
return;
paint_controller.CreateAndAppend<ScrollHitTestDisplayItem>(
client, type, scroll_offset_node);
}
| 14,631 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static av_cold int vqa_decode_init(AVCodecContext *avctx)
{
VqaContext *s = avctx->priv_data;
int i, j, codebook_index;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
/* make sure the extradata made it */
if (s->avctx->extradata_size != VQA_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE);
return -1;
}
/* load up the VQA parameters from the header */
s->vqa_version = s->avctx->extradata[0];
s->width = AV_RL16(&s->avctx->extradata[6]);
s->height = AV_RL16(&s->avctx->extradata[8]);
if(av_image_check_size(s->width, s->height, 0, avctx)){
s->width= s->height= 0;
return -1;
}
s->vector_width = s->avctx->extradata[10];
s->vector_height = s->avctx->extradata[11];
s->partial_count = s->partial_countdown = s->avctx->extradata[13];
/* the vector dimensions have to meet very stringent requirements */
if ((s->vector_width != 4) ||
((s->vector_height != 2) && (s->vector_height != 4))) {
/* return without further initialization */
return -1;
}
/* allocate codebooks */
s->codebook_size = MAX_CODEBOOK_SIZE;
s->codebook = av_malloc(s->codebook_size);
/* allocate decode buffer */
s->decode_buffer_size = (s->width / s->vector_width) *
(s->height / s->vector_height) * 2;
s->decode_buffer = av_malloc(s->decode_buffer_size);
if (!s->decode_buffer)
goto fail;
/* initialize the solid-color vectors */
if (s->vector_height == 4) {
codebook_index = 0xFF00 * 16;
for (i = 0; i < 256; i++)
for (j = 0; j < 16; j++)
s->codebook[codebook_index++] = i;
} else {
codebook_index = 0xF00 * 8;
for (i = 0; i < 256; i++)
for (j = 0; j < 8; j++)
s->codebook[codebook_index++] = i;
}
s->next_codebook_buffer_index = 0;
s->frame.data[0] = NULL;
return 0;
fail:
av_freep(&s->codebook);
av_freep(&s->next_codebook_buffer);
av_freep(&s->decode_buffer);
return AVERROR(ENOMEM);
}
Commit Message:
CWE ID: CWE-119 | static av_cold int vqa_decode_init(AVCodecContext *avctx)
{
VqaContext *s = avctx->priv_data;
int i, j, codebook_index;
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_PAL8;
/* make sure the extradata made it */
if (s->avctx->extradata_size != VQA_HEADER_SIZE) {
av_log(s->avctx, AV_LOG_ERROR, " VQA video: expected extradata size of %d\n", VQA_HEADER_SIZE);
return -1;
}
/* load up the VQA parameters from the header */
s->vqa_version = s->avctx->extradata[0];
s->width = AV_RL16(&s->avctx->extradata[6]);
s->height = AV_RL16(&s->avctx->extradata[8]);
if(av_image_check_size(s->width, s->height, 0, avctx)){
s->width= s->height= 0;
return -1;
}
s->vector_width = s->avctx->extradata[10];
s->vector_height = s->avctx->extradata[11];
s->partial_count = s->partial_countdown = s->avctx->extradata[13];
/* the vector dimensions have to meet very stringent requirements */
if ((s->vector_width != 4) ||
((s->vector_height != 2) && (s->vector_height != 4))) {
/* return without further initialization */
return -1;
}
if (s->width & (s->vector_width - 1) ||
s->height & (s->vector_height - 1)) {
av_log(avctx, AV_LOG_ERROR, "Image size not multiple of block size\n");
return AVERROR_INVALIDDATA;
}
/* allocate codebooks */
s->codebook_size = MAX_CODEBOOK_SIZE;
s->codebook = av_malloc(s->codebook_size);
/* allocate decode buffer */
s->decode_buffer_size = (s->width / s->vector_width) *
(s->height / s->vector_height) * 2;
s->decode_buffer = av_malloc(s->decode_buffer_size);
if (!s->decode_buffer)
goto fail;
/* initialize the solid-color vectors */
if (s->vector_height == 4) {
codebook_index = 0xFF00 * 16;
for (i = 0; i < 256; i++)
for (j = 0; j < 16; j++)
s->codebook[codebook_index++] = i;
} else {
codebook_index = 0xF00 * 8;
for (i = 0; i < 256; i++)
for (j = 0; j < 8; j++)
s->codebook[codebook_index++] = i;
}
s->next_codebook_buffer_index = 0;
s->frame.data[0] = NULL;
return 0;
fail:
av_freep(&s->codebook);
av_freep(&s->next_codebook_buffer);
av_freep(&s->decode_buffer);
return AVERROR(ENOMEM);
}
| 24,269 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: DidFinishNavigation(NavigationHandle* handle) {
if (handle->GetFrameTreeNodeId() != frame_tree_node_id_)
return;
if (!handle->HasCommitted())
return;
if (handle->GetRenderFrameHost()->GetSiteInstance() != parent_site_instance_)
return;
if (!handle->GetURL().IsAboutBlank())
return;
if (!handle->GetRenderFrameHost()->PrepareForInnerWebContentsAttach()) {
filter_->ResumeAttachOrDestroy(element_instance_id_,
MSG_ROUTING_NONE /* no plugin frame */);
}
base::PostTaskWithTraits(
FROM_HERE, {BrowserThread::UI},
base::BindOnce(&ExtensionsGuestViewMessageFilter::ResumeAttachOrDestroy,
filter_, element_instance_id_,
handle->GetRenderFrameHost()->GetRoutingID()));
}
Commit Message: [GuestView] - Introduce MimeHandlerViewAttachHelper
This CL is for the most part a mechanical change which extracts almost
all the frame-based MimeHandlerView code out of
ExtensionsGuestViewMessageFilter. This change both removes the current
clutter form EGVMF as well as fixesa race introduced when the
frame-based logic was added to EGVMF. The reason for the race was that
EGVMF is destroyed on IO thread but all the access to it (for
frame-based MHV) are from UI.
TBR=avi@chromium.org,lazyboy@chromium.org
Bug: 659750, 896679, 911161, 918861
Change-Id: I6474b870e4d56daa68be03637bb633665d9f9dda
Reviewed-on: https://chromium-review.googlesource.com/c/1401451
Commit-Queue: Ehsan Karamad <ekaramad@chromium.org>
Reviewed-by: James MacLean <wjmaclean@chromium.org>
Reviewed-by: Ehsan Karamad <ekaramad@chromium.org>
Cr-Commit-Position: refs/heads/master@{#621155}
CWE ID: CWE-362 | DidFinishNavigation(NavigationHandle* handle) {
| 15,129 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool PPB_ImageData_Impl::Init(PP_ImageDataFormat format,
int width, int height,
bool init_to_zero) {
if (!IsImageDataFormatSupported(format))
return false; // Only support this one format for now.
if (width <= 0 || height <= 0)
return false;
if (static_cast<int64>(width) * static_cast<int64>(height) * 4 >=
std::numeric_limits<int32>::max())
return false; // Prevent overflow of signed 32-bit ints.
format_ = format;
width_ = width;
height_ = height;
return backend_->Init(this, format, width, height, init_to_zero);
}
Commit Message: Security fix: integer overflow on checking image size
Test is left in another CL (codereview.chromiu,.org/11274036) to avoid conflict there. Hope it's fine.
BUG=160926
Review URL: https://chromiumcodereview.appspot.com/11410081
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@167882 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-190 | bool PPB_ImageData_Impl::Init(PP_ImageDataFormat format,
int width, int height,
bool init_to_zero) {
if (!IsImageDataFormatSupported(format))
return false; // Only support this one format for now.
if (width <= 0 || height <= 0)
return false;
if (static_cast<int64>(width) * static_cast<int64>(height) >=
std::numeric_limits<int32>::max() / 4)
return false; // Prevent overflow of signed 32-bit ints.
format_ = format;
width_ = width;
height_ = height;
return backend_->Init(this, format, width, height, init_to_zero);
}
| 3,255 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: DevToolsSession::DevToolsSession(DevToolsAgentHostImpl* agent_host,
DevToolsAgentHostClient* client)
: binding_(this),
agent_host_(agent_host),
client_(client),
process_(nullptr),
host_(nullptr),
dispatcher_(new protocol::UberDispatcher(this)),
weak_factory_(this) {
dispatcher_->setFallThroughForNotFound(true);
}
Commit Message: DevTools: speculative fix for crash in NetworkHandler::Disable
This keeps BrowserContext* and StoragePartition* instead of
RenderProcessHost* in an attemp to resolve UAF of RenderProcessHost
upon closure of DevTools front-end.
Bug: 801117, 783067, 780694
Change-Id: I6c2cca60cc0c29f0949d189cf918769059f80c1b
Reviewed-on: https://chromium-review.googlesource.com/876657
Commit-Queue: Andrey Kosyakov <caseq@chromium.org>
Reviewed-by: Dmitry Gozman <dgozman@chromium.org>
Cr-Commit-Position: refs/heads/master@{#531157}
CWE ID: CWE-20 | DevToolsSession::DevToolsSession(DevToolsAgentHostImpl* agent_host,
DevToolsAgentHostClient* client)
: binding_(this),
agent_host_(agent_host),
client_(client),
process_host_id_(ChildProcessHost::kInvalidUniqueID),
host_(nullptr),
dispatcher_(new protocol::UberDispatcher(this)),
weak_factory_(this) {
dispatcher_->setFallThroughForNotFound(true);
}
| 14,713 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
struct mnt_namespace *new_ns;
struct ucounts *ucounts;
int ret;
ucounts = inc_mnt_namespaces(user_ns);
if (!ucounts)
return ERR_PTR(-ENOSPC);
new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns) {
dec_mnt_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
}
ret = ns_alloc_inum(&new_ns->ns);
if (ret) {
kfree(new_ns);
dec_mnt_namespaces(ucounts);
return ERR_PTR(ret);
}
new_ns->ns.ops = &mntns_operations;
new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts;
return new_ns;
}
Commit Message: mnt: Add a per mount namespace limit on the number of mounts
CAI Qian <caiqian@redhat.com> pointed out that the semantics
of shared subtrees make it possible to create an exponentially
increasing number of mounts in a mount namespace.
mkdir /tmp/1 /tmp/2
mount --make-rshared /
for i in $(seq 1 20) ; do mount --bind /tmp/1 /tmp/2 ; done
Will create create 2^20 or 1048576 mounts, which is a practical problem
as some people have managed to hit this by accident.
As such CVE-2016-6213 was assigned.
Ian Kent <raven@themaw.net> described the situation for autofs users
as follows:
> The number of mounts for direct mount maps is usually not very large because of
> the way they are implemented, large direct mount maps can have performance
> problems. There can be anywhere from a few (likely case a few hundred) to less
> than 10000, plus mounts that have been triggered and not yet expired.
>
> Indirect mounts have one autofs mount at the root plus the number of mounts that
> have been triggered and not yet expired.
>
> The number of autofs indirect map entries can range from a few to the common
> case of several thousand and in rare cases up to between 30000 and 50000. I've
> not heard of people with maps larger than 50000 entries.
>
> The larger the number of map entries the greater the possibility for a large
> number of active mounts so it's not hard to expect cases of a 1000 or somewhat
> more active mounts.
So I am setting the default number of mounts allowed per mount
namespace at 100,000. This is more than enough for any use case I
know of, but small enough to quickly stop an exponential increase
in mounts. Which should be perfect to catch misconfigurations and
malfunctioning programs.
For anyone who needs a higher limit this can be changed by writing
to the new /proc/sys/fs/mount-max sysctl.
Tested-by: CAI Qian <caiqian@redhat.com>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
CWE ID: CWE-400 | static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
{
struct mnt_namespace *new_ns;
struct ucounts *ucounts;
int ret;
ucounts = inc_mnt_namespaces(user_ns);
if (!ucounts)
return ERR_PTR(-ENOSPC);
new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
if (!new_ns) {
dec_mnt_namespaces(ucounts);
return ERR_PTR(-ENOMEM);
}
ret = ns_alloc_inum(&new_ns->ns);
if (ret) {
kfree(new_ns);
dec_mnt_namespaces(ucounts);
return ERR_PTR(ret);
}
new_ns->ns.ops = &mntns_operations;
new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
atomic_set(&new_ns->count, 1);
new_ns->root = NULL;
INIT_LIST_HEAD(&new_ns->list);
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
new_ns->user_ns = get_user_ns(user_ns);
new_ns->ucounts = ucounts;
new_ns->mounts = 0;
new_ns->pending_mounts = 0;
return new_ns;
}
| 3,080 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void close_all_sockets(atransport* t) {
asocket* s;
/* this is a little gross, but since s->close() *will* modify
** the list out from under you, your options are limited.
*/
adb_mutex_lock(&socket_list_lock);
restart:
for (s = local_socket_list.next; s != &local_socket_list; s = s->next) {
if (s->transport == t || (s->peer && s->peer->transport == t)) {
local_socket_close_locked(s);
goto restart;
}
}
adb_mutex_unlock(&socket_list_lock);
}
Commit Message: adb: switch the socket list mutex to a recursive_mutex.
sockets.cpp was branching on whether a socket close function was
local_socket_close in order to avoid a potential deadlock if the socket
list lock was held while closing a peer socket.
Bug: http://b/28347842
Change-Id: I5e56f17fa54275284787f0f1dc150d1960256ab3
(cherry picked from commit 9b587dec6d0a57c8fe1083c1c543fbeb163d65fa)
CWE ID: CWE-264 | void close_all_sockets(atransport* t) {
asocket* s;
/* this is a little gross, but since s->close() *will* modify
** the list out from under you, your options are limited.
*/
std::lock_guard<std::recursive_mutex> lock(local_socket_list_lock);
restart:
for (s = local_socket_list.next; s != &local_socket_list; s = s->next) {
if (s->transport == t || (s->peer && s->peer->transport == t)) {
local_socket_close(s);
goto restart;
}
}
}
| 28,289 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool SniffMimeType(const char* content,
size_t content_size,
const GURL& url,
const std::string& type_hint,
std::string* result) {
DCHECK_LT(content_size, 1000000U); // sanity check
DCHECK(content);
DCHECK(result);
bool have_enough_content = true;
result->assign(type_hint);
if (IsOfficeType(type_hint))
return SniffForInvalidOfficeDocs(content, content_size, url, result);
const bool hint_is_unknown_mime_type = IsUnknownMimeType(type_hint);
if (hint_is_unknown_mime_type && !url.SchemeIsFile() &&
SniffForHTML(content, content_size, &have_enough_content, result)) {
return true;
}
const bool hint_is_text_plain = (type_hint == "text/plain");
if (hint_is_unknown_mime_type || hint_is_text_plain) {
if (!SniffBinary(content, content_size, &have_enough_content, result)) {
if (hint_is_text_plain) {
return have_enough_content;
}
}
}
if (type_hint == "text/xml" || type_hint == "application/xml") {
if (SniffXML(content, content_size, &have_enough_content, result))
return true;
return have_enough_content;
}
if (SniffCRX(content, content_size, url, type_hint,
&have_enough_content, result))
return true;
if (SniffForOfficeDocs(content, content_size, url,
&have_enough_content, result))
return true; // We've matched a magic number. No more content needed.
if (type_hint == "application/octet-stream")
return have_enough_content;
if (SniffForMagicNumbers(content, content_size,
&have_enough_content, result))
return true; // We've matched a magic number. No more content needed.
return have_enough_content;
}
Commit Message: Revert "Don't sniff HTML from documents delivered via the file protocol"
This reverts commit 3519e867dc606437f804561f889d7ed95b95876a.
Reason for revert: crbug.com/786150. Application compatibility for Android WebView applications means we need to allow sniffing on that platform.
Original change's description:
> Don't sniff HTML from documents delivered via the file protocol
>
> To reduce attack surface, Chrome should not MIME-sniff to text/html for
> any document delivered via the file protocol. This change only impacts
> the file protocol (documents served via HTTP/HTTPS/etc are unaffected).
>
> Bug: 777737
> Cq-Include-Trybots: master.tryserver.chromium.android:android_cronet_tester;master.tryserver.chromium.mac:ios-simulator-cronet
> Change-Id: I7086454356b8d2d092be9e1bca0f5ff6dd3b62c0
> Reviewed-on: https://chromium-review.googlesource.com/751402
> Reviewed-by: Ben Wells <benwells@chromium.org>
> Reviewed-by: Sylvain Defresne <sdefresne@chromium.org>
> Reviewed-by: Achuith Bhandarkar <achuith@chromium.org>
> Reviewed-by: Asanka Herath <asanka@chromium.org>
> Reviewed-by: Matt Menke <mmenke@chromium.org>
> Commit-Queue: Eric Lawrence <elawrence@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#514372}
TBR=achuith@chromium.org,benwells@chromium.org,mmenke@chromium.org,sdefresne@chromium.org,asanka@chromium.org,elawrence@chromium.org
# Not skipping CQ checks because original CL landed > 1 day ago.
Bug: 777737
Change-Id: I864ae060ce3277d41ea257ae75e0b80c51f3ea98
Cq-Include-Trybots: master.tryserver.chromium.android:android_cronet_tester;master.tryserver.chromium.mac:ios-simulator-cronet
Reviewed-on: https://chromium-review.googlesource.com/790790
Reviewed-by: Eric Lawrence <elawrence@chromium.org>
Reviewed-by: Matt Menke <mmenke@chromium.org>
Commit-Queue: Eric Lawrence <elawrence@chromium.org>
Cr-Commit-Position: refs/heads/master@{#519347}
CWE ID: | bool SniffMimeType(const char* content,
size_t content_size,
const GURL& url,
const std::string& type_hint,
std::string* result) {
DCHECK_LT(content_size, 1000000U); // sanity check
DCHECK(content);
DCHECK(result);
bool have_enough_content = true;
result->assign(type_hint);
if (IsOfficeType(type_hint))
return SniffForInvalidOfficeDocs(content, content_size, url, result);
const bool hint_is_unknown_mime_type = IsUnknownMimeType(type_hint);
// First check for HTML
if (hint_is_unknown_mime_type) {
// We're only willing to sniff HTML if the server has not supplied a mime
// type, or if the type it did supply indicates that it doesn't know what
// the type should be.
if (SniffForHTML(content, content_size, &have_enough_content, result))
return true; // We succeeded in sniffing HTML. No more content needed.
}
const bool hint_is_text_plain = (type_hint == "text/plain");
if (hint_is_unknown_mime_type || hint_is_text_plain) {
if (!SniffBinary(content, content_size, &have_enough_content, result)) {
if (hint_is_text_plain) {
return have_enough_content;
}
}
}
if (type_hint == "text/xml" || type_hint == "application/xml") {
if (SniffXML(content, content_size, &have_enough_content, result))
return true;
return have_enough_content;
}
if (SniffCRX(content, content_size, url, type_hint,
&have_enough_content, result))
return true;
if (SniffForOfficeDocs(content, content_size, url,
&have_enough_content, result))
return true; // We've matched a magic number. No more content needed.
if (type_hint == "application/octet-stream")
return have_enough_content;
if (SniffForMagicNumbers(content, content_size,
&have_enough_content, result))
return true; // We've matched a magic number. No more content needed.
return have_enough_content;
}
| 2,318 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: flac_read_loop (SF_PRIVATE *psf, unsigned len)
{ FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ;
pflac->pos = 0 ;
pflac->len = len ;
pflac->remain = len ;
/* First copy data that has already been decoded and buffered. */
if (pflac->frame != NULL && pflac->bufferpos < pflac->frame->header.blocksize)
flac_buffer_copy (psf) ;
/* Decode some more. */
while (pflac->pos < pflac->len)
{ if (FLAC__stream_decoder_process_single (pflac->fsd) == 0)
break ;
if (FLAC__stream_decoder_get_state (pflac->fsd) >= FLAC__STREAM_DECODER_END_OF_STREAM)
break ;
} ;
pflac->ptr = NULL ;
return pflac->pos ;
} /* flac_read_loop */
Commit Message: src/flac.c: Improve error handling
Especially when dealing with corrupt or malicious files.
CWE ID: CWE-119 | flac_read_loop (SF_PRIVATE *psf, unsigned len)
{ FLAC_PRIVATE* pflac = (FLAC_PRIVATE*) psf->codec_data ;
FLAC__StreamDecoderState state ;
pflac->pos = 0 ;
pflac->len = len ;
pflac->remain = len ;
state = FLAC__stream_decoder_get_state (pflac->fsd) ;
if (state > FLAC__STREAM_DECODER_END_OF_STREAM)
{ psf_log_printf (psf, "FLAC__stream_decoder_get_state returned %s\n", FLAC__StreamDecoderStateString [state]) ;
/* Current frame is busted, so NULL the pointer. */
pflac->frame = NULL ;
} ;
/* First copy data that has already been decoded and buffered. */
if (pflac->frame != NULL && pflac->bufferpos < pflac->frame->header.blocksize)
flac_buffer_copy (psf) ;
/* Decode some more. */
while (pflac->pos < pflac->len)
{ if (FLAC__stream_decoder_process_single (pflac->fsd) == 0)
break ;
state = FLAC__stream_decoder_get_state (pflac->fsd) ;
if (state >= FLAC__STREAM_DECODER_END_OF_STREAM)
{ psf_log_printf (psf, "FLAC__stream_decoder_get_state returned %s\n", FLAC__StreamDecoderStateString [state]) ;
/* Current frame is busted, so NULL the pointer. */
pflac->frame = NULL ;
break ;
} ;
} ;
pflac->ptr = NULL ;
return pflac->pos ;
} /* flac_read_loop */
| 11,820 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: TabStyle::TabColors GM2TabStyle::CalculateColors() const {
const ui::ThemeProvider* theme_provider = tab_->GetThemeProvider();
constexpr float kMinimumActiveContrastRatio = 6.05f;
constexpr float kMinimumInactiveContrastRatio = 4.61f;
constexpr float kMinimumHoveredContrastRatio = 5.02f;
constexpr float kMinimumPressedContrastRatio = 4.41f;
float expected_opacity = 0.0f;
if (tab_->IsActive()) {
expected_opacity = 1.0f;
} else if (tab_->IsSelected()) {
expected_opacity = kSelectedTabOpacity;
} else if (tab_->mouse_hovered()) {
expected_opacity = GetHoverOpacity();
}
const SkColor bg_color = color_utils::AlphaBlend(
tab_->controller()->GetTabBackgroundColor(TAB_ACTIVE),
tab_->controller()->GetTabBackgroundColor(TAB_INACTIVE),
expected_opacity);
SkColor title_color = tab_->controller()->GetTabForegroundColor(
expected_opacity > 0.5f ? TAB_ACTIVE : TAB_INACTIVE, bg_color);
title_color = color_utils::GetColorWithMinimumContrast(title_color, bg_color);
const SkColor base_hovered_color = theme_provider->GetColor(
ThemeProperties::COLOR_TAB_CLOSE_BUTTON_BACKGROUND_HOVER);
const SkColor base_pressed_color = theme_provider->GetColor(
ThemeProperties::COLOR_TAB_CLOSE_BUTTON_BACKGROUND_PRESSED);
const auto get_color_for_contrast_ratio = [](SkColor fg_color,
SkColor bg_color,
float contrast_ratio) {
const SkAlpha blend_alpha = color_utils::GetBlendValueWithMinimumContrast(
bg_color, fg_color, bg_color, contrast_ratio);
return color_utils::AlphaBlend(fg_color, bg_color, blend_alpha);
};
const SkColor generated_icon_color = get_color_for_contrast_ratio(
title_color, bg_color,
tab_->IsActive() ? kMinimumActiveContrastRatio
: kMinimumInactiveContrastRatio);
const SkColor generated_hovered_color = get_color_for_contrast_ratio(
base_hovered_color, bg_color, kMinimumHoveredContrastRatio);
const SkColor generated_pressed_color = get_color_for_contrast_ratio(
base_pressed_color, bg_color, kMinimumPressedContrastRatio);
const SkColor generated_hovered_icon_color =
color_utils::GetColorWithMinimumContrast(title_color,
generated_hovered_color);
const SkColor generated_pressed_icon_color =
color_utils::GetColorWithMinimumContrast(title_color,
generated_pressed_color);
return {bg_color,
title_color,
generated_icon_color,
generated_hovered_icon_color,
generated_pressed_icon_color,
generated_hovered_color,
generated_pressed_color};
}
Commit Message: Paint tab groups with the group color.
* The background of TabGroupHeader now uses the group color.
* The backgrounds of tabs in the group are tinted with the group color.
This treatment, along with the colors chosen, are intended to be
a placeholder.
Bug: 905491
Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504
Commit-Queue: Bret Sepulveda <bsep@chromium.org>
Reviewed-by: Taylor Bergquist <tbergquist@chromium.org>
Cr-Commit-Position: refs/heads/master@{#660498}
CWE ID: CWE-20 | TabStyle::TabColors GM2TabStyle::CalculateColors() const {
const ui::ThemeProvider* theme_provider = tab_->GetThemeProvider();
constexpr float kMinimumActiveContrastRatio = 6.05f;
constexpr float kMinimumInactiveContrastRatio = 4.61f;
constexpr float kMinimumHoveredContrastRatio = 5.02f;
constexpr float kMinimumPressedContrastRatio = 4.41f;
float expected_opacity = 0.0f;
if (tab_->IsActive()) {
expected_opacity = 1.0f;
} else if (tab_->IsSelected()) {
expected_opacity = kSelectedTabOpacity;
} else if (tab_->mouse_hovered()) {
expected_opacity = GetHoverOpacity();
}
const SkColor bg_color = color_utils::AlphaBlend(
GetTabBackgroundColor(TAB_ACTIVE), GetTabBackgroundColor(TAB_INACTIVE),
expected_opacity);
SkColor title_color = tab_->controller()->GetTabForegroundColor(
expected_opacity > 0.5f ? TAB_ACTIVE : TAB_INACTIVE, bg_color);
title_color = color_utils::GetColorWithMinimumContrast(title_color, bg_color);
const SkColor base_hovered_color = theme_provider->GetColor(
ThemeProperties::COLOR_TAB_CLOSE_BUTTON_BACKGROUND_HOVER);
const SkColor base_pressed_color = theme_provider->GetColor(
ThemeProperties::COLOR_TAB_CLOSE_BUTTON_BACKGROUND_PRESSED);
const auto get_color_for_contrast_ratio = [](SkColor fg_color,
SkColor bg_color,
float contrast_ratio) {
const SkAlpha blend_alpha = color_utils::GetBlendValueWithMinimumContrast(
bg_color, fg_color, bg_color, contrast_ratio);
return color_utils::AlphaBlend(fg_color, bg_color, blend_alpha);
};
const SkColor generated_icon_color = get_color_for_contrast_ratio(
title_color, bg_color,
tab_->IsActive() ? kMinimumActiveContrastRatio
: kMinimumInactiveContrastRatio);
const SkColor generated_hovered_color = get_color_for_contrast_ratio(
base_hovered_color, bg_color, kMinimumHoveredContrastRatio);
const SkColor generated_pressed_color = get_color_for_contrast_ratio(
base_pressed_color, bg_color, kMinimumPressedContrastRatio);
const SkColor generated_hovered_icon_color =
color_utils::GetColorWithMinimumContrast(title_color,
generated_hovered_color);
const SkColor generated_pressed_icon_color =
color_utils::GetColorWithMinimumContrast(title_color,
generated_pressed_color);
return {bg_color,
title_color,
generated_icon_color,
generated_hovered_icon_color,
generated_pressed_icon_color,
generated_hovered_color,
generated_pressed_color};
}
| 3,121 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: TestWCDelegateForDialogsAndFullscreen()
: is_fullscreen_(false), message_loop_runner_(new MessageLoopRunner) {}
Commit Message: If a page calls |window.focus()|, kick it out of fullscreen.
BUG=776418, 800056
Change-Id: I1880fe600e4814c073f247c43b1c1ac80c8fc017
Reviewed-on: https://chromium-review.googlesource.com/852378
Reviewed-by: Nasko Oskov <nasko@chromium.org>
Reviewed-by: Philip Jägenstedt <foolip@chromium.org>
Commit-Queue: Avi Drissman <avi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533790}
CWE ID: | TestWCDelegateForDialogsAndFullscreen()
| 7,484 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
List<BufferInfo *> &outQueue = getPortQueue(1);
BufferInfo *outInfo = NULL;
OMX_BUFFERHEADERTYPE *outHeader = NULL;
vpx_codec_iter_t iter = NULL;
if (flushDecoder && mFrameParallelMode) {
if (vpx_codec_decode((vpx_codec_ctx_t *)mCtx, NULL, 0, NULL, 0)) {
ALOGE("Failed to flush on2 decoder.");
return false;
}
}
if (!display) {
if (!flushDecoder) {
ALOGE("Invalid operation.");
return false;
}
while ((mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter))) {
}
return true;
}
while (!outQueue.empty()) {
if (mImg == NULL) {
mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
if (mImg == NULL) {
break;
}
}
uint32_t width = mImg->d_w;
uint32_t height = mImg->d_h;
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
CHECK_EQ(mImg->fmt, VPX_IMG_FMT_I420);
handlePortSettingsChange(portWillReset, width, height);
if (*portWillReset) {
return true;
}
outHeader->nOffset = 0;
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
if (outHeader->nAllocLen >= outHeader->nFilledLen) {
uint8_t *dst = outHeader->pBuffer;
const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
size_t srcYStride = mImg->stride[VPX_PLANE_Y];
size_t srcUStride = mImg->stride[VPX_PLANE_U];
size_t srcVStride = mImg->stride[VPX_PLANE_V];
copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
} else {
ALOGE("b/27597103, buffer too small");
android_errorWriteLog(0x534e4554, "27597103");
outHeader->nFilledLen = 0;
}
mImg = NULL;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
if (!eos) {
return true;
}
if (!outQueue.empty()) {
outInfo = *outQueue.begin();
outQueue.erase(outQueue.begin());
outHeader = outInfo->mHeader;
outHeader->nTimeStamp = 0;
outHeader->nFilledLen = 0;
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
outInfo->mOwnedByUs = false;
notifyFillBufferDone(outHeader);
mEOSStatus = OUTPUT_FRAMES_FLUSHED;
}
return true;
}
Commit Message: SoftVPX: fix nFilledLen overflow
Bug: 29421675
Change-Id: I25d4cf54a5df22c2130c37e95c7c7f75063111f3
CWE ID: CWE-119 | bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
List<BufferInfo *> &outQueue = getPortQueue(1);
BufferInfo *outInfo = NULL;
OMX_BUFFERHEADERTYPE *outHeader = NULL;
vpx_codec_iter_t iter = NULL;
if (flushDecoder && mFrameParallelMode) {
if (vpx_codec_decode((vpx_codec_ctx_t *)mCtx, NULL, 0, NULL, 0)) {
ALOGE("Failed to flush on2 decoder.");
return false;
}
}
if (!display) {
if (!flushDecoder) {
ALOGE("Invalid operation.");
return false;
}
while ((mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter))) {
}
return true;
}
while (!outQueue.empty()) {
if (mImg == NULL) {
mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
if (mImg == NULL) {
break;
}
}
uint32_t width = mImg->d_w;
uint32_t height = mImg->d_h;
outInfo = *outQueue.begin();
outHeader = outInfo->mHeader;
CHECK_EQ(mImg->fmt, VPX_IMG_FMT_I420);
handlePortSettingsChange(portWillReset, width, height);
if (*portWillReset) {
return true;
}
outHeader->nOffset = 0;
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
if (outputBufferSafe(outHeader)) {
uint8_t *dst = outHeader->pBuffer;
const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
size_t srcYStride = mImg->stride[VPX_PLANE_Y];
size_t srcUStride = mImg->stride[VPX_PLANE_U];
size_t srcVStride = mImg->stride[VPX_PLANE_V];
copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
} else {
outHeader->nFilledLen = 0;
}
mImg = NULL;
outInfo->mOwnedByUs = false;
outQueue.erase(outQueue.begin());
outInfo = NULL;
notifyFillBufferDone(outHeader);
outHeader = NULL;
}
if (!eos) {
return true;
}
if (!outQueue.empty()) {
outInfo = *outQueue.begin();
outQueue.erase(outQueue.begin());
outHeader = outInfo->mHeader;
outHeader->nTimeStamp = 0;
outHeader->nFilledLen = 0;
outHeader->nFlags = OMX_BUFFERFLAG_EOS;
outInfo->mOwnedByUs = false;
notifyFillBufferDone(outHeader);
mEOSStatus = OUTPUT_FRAMES_FLUSHED;
}
return true;
}
| 10,152 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void impeg2d_dec_pic_data_thread(dec_state_t *ps_dec)
{
WORD32 i4_continue_decode;
WORD32 i4_cur_row, temp;
UWORD32 u4_bits_read;
WORD32 i4_dequeue_job;
IMPEG2D_ERROR_CODES_T e_error;
i4_cur_row = ps_dec->u2_mb_y + 1;
i4_continue_decode = 1;
i4_dequeue_job = 1;
do
{
if(i4_cur_row > ps_dec->u2_num_vert_mb)
{
i4_continue_decode = 0;
break;
}
{
if((ps_dec->i4_num_cores> 1) && (i4_dequeue_job))
{
job_t s_job;
IV_API_CALL_STATUS_T e_ret;
UWORD8 *pu1_buf;
e_ret = impeg2_jobq_dequeue(ps_dec->pv_jobq, &s_job, sizeof(s_job), 1, 1);
if(e_ret != IV_SUCCESS)
break;
if(CMD_PROCESS == s_job.i4_cmd)
{
pu1_buf = ps_dec->pu1_inp_bits_buf + s_job.i4_bistream_ofst;
impeg2d_bit_stream_init(&(ps_dec->s_bit_stream), pu1_buf,
(ps_dec->u4_num_inp_bytes - s_job.i4_bistream_ofst) + 8);
i4_cur_row = s_job.i2_start_mb_y;
ps_dec->i4_start_mb_y = s_job.i2_start_mb_y;
ps_dec->i4_end_mb_y = s_job.i2_end_mb_y;
ps_dec->u2_mb_x = 0;
ps_dec->u2_mb_y = ps_dec->i4_start_mb_y;
ps_dec->u2_num_mbs_left = (ps_dec->i4_end_mb_y - ps_dec->i4_start_mb_y) * ps_dec->u2_num_horiz_mb;
}
else
{
WORD32 start_row;
WORD32 num_rows;
start_row = s_job.i2_start_mb_y << 4;
num_rows = MIN((s_job.i2_end_mb_y << 4), ps_dec->u2_vertical_size);
num_rows -= start_row;
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
start_row, num_rows);
break;
}
}
e_error = impeg2d_dec_slice(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
impeg2d_next_start_code(ps_dec);
}
}
/* Detecting next slice start code */
while(1)
{
u4_bits_read = impeg2d_bit_stream_nxt(&ps_dec->s_bit_stream,START_CODE_LEN);
temp = u4_bits_read & 0xFF;
i4_continue_decode = (((u4_bits_read >> 8) == 0x01) && (temp) && (temp <= 0xAF));
if(i4_continue_decode)
{
/* If the slice is from the same row, then continue decoding without dequeue */
if((temp - 1) == i4_cur_row)
{
i4_dequeue_job = 0;
break;
}
if(temp < ps_dec->i4_end_mb_y)
{
i4_cur_row = ps_dec->u2_mb_y;
}
else
{
i4_dequeue_job = 1;
}
break;
}
else
break;
}
}while(i4_continue_decode);
if(ps_dec->i4_num_cores > 1)
{
while(1)
{
job_t s_job;
IV_API_CALL_STATUS_T e_ret;
e_ret = impeg2_jobq_dequeue(ps_dec->pv_jobq, &s_job, sizeof(s_job), 1, 1);
if(e_ret != IV_SUCCESS)
break;
if(CMD_FMTCONV == s_job.i4_cmd)
{
WORD32 start_row;
WORD32 num_rows;
start_row = s_job.i2_start_mb_y << 4;
num_rows = MIN((s_job.i2_end_mb_y << 4), ps_dec->u2_vertical_size);
num_rows -= start_row;
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
start_row, num_rows);
}
}
}
else
{
if((NULL != ps_dec->ps_disp_pic) && ((0 == ps_dec->u4_share_disp_buf) || (IV_YUV_420P != ps_dec->i4_chromaFormat)))
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
0, ps_dec->u2_vertical_size);
}
}
Commit Message: Fix for handling streams which resulted in negative num_mbs_left
Bug: 26070014
Change-Id: Id9f063a2c72a802d991b92abaf00ec687db5bb0f
CWE ID: CWE-119 | void impeg2d_dec_pic_data_thread(dec_state_t *ps_dec)
{
WORD32 i4_continue_decode;
WORD32 i4_cur_row, temp;
UWORD32 u4_bits_read;
WORD32 i4_dequeue_job;
IMPEG2D_ERROR_CODES_T e_error;
i4_cur_row = ps_dec->u2_mb_y + 1;
i4_continue_decode = 1;
i4_dequeue_job = 1;
do
{
if(i4_cur_row > ps_dec->u2_num_vert_mb)
{
i4_continue_decode = 0;
break;
}
{
if((ps_dec->i4_num_cores> 1) && (i4_dequeue_job))
{
job_t s_job;
IV_API_CALL_STATUS_T e_ret;
UWORD8 *pu1_buf;
e_ret = impeg2_jobq_dequeue(ps_dec->pv_jobq, &s_job, sizeof(s_job), 1, 1);
if(e_ret != IV_SUCCESS)
break;
if(CMD_PROCESS == s_job.i4_cmd)
{
pu1_buf = ps_dec->pu1_inp_bits_buf + s_job.i4_bistream_ofst;
impeg2d_bit_stream_init(&(ps_dec->s_bit_stream), pu1_buf,
(ps_dec->u4_num_inp_bytes - s_job.i4_bistream_ofst) + 8);
i4_cur_row = s_job.i2_start_mb_y;
ps_dec->i4_start_mb_y = s_job.i2_start_mb_y;
ps_dec->i4_end_mb_y = s_job.i2_end_mb_y;
ps_dec->u2_mb_x = 0;
ps_dec->u2_mb_y = ps_dec->i4_start_mb_y;
ps_dec->u2_num_mbs_left = (ps_dec->i4_end_mb_y - ps_dec->i4_start_mb_y) * ps_dec->u2_num_horiz_mb;
}
else
{
WORD32 start_row;
WORD32 num_rows;
start_row = s_job.i2_start_mb_y << 4;
num_rows = MIN((s_job.i2_end_mb_y << 4), ps_dec->u2_vertical_size);
num_rows -= start_row;
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
start_row, num_rows);
break;
}
}
e_error = impeg2d_dec_slice(ps_dec);
if ((IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE != e_error)
{
impeg2d_next_start_code(ps_dec);
}
}
/* Detecting next slice start code */
while(1)
{
u4_bits_read = impeg2d_bit_stream_nxt(&ps_dec->s_bit_stream,START_CODE_LEN);
temp = u4_bits_read & 0xFF;
i4_continue_decode = (((u4_bits_read >> 8) == 0x01) && (temp) && (temp <= 0xAF));
if (1 == ps_dec->i4_num_cores && 0 == ps_dec->u2_num_mbs_left)
{
i4_continue_decode = 0;
android_errorWriteLog(0x534e4554, "26070014");
}
if(i4_continue_decode)
{
/* If the slice is from the same row, then continue decoding without dequeue */
if((temp - 1) == i4_cur_row)
{
i4_dequeue_job = 0;
break;
}
if(temp < ps_dec->i4_end_mb_y)
{
i4_cur_row = ps_dec->u2_mb_y;
}
else
{
i4_dequeue_job = 1;
}
break;
}
else
break;
}
}while(i4_continue_decode);
if(ps_dec->i4_num_cores > 1)
{
while(1)
{
job_t s_job;
IV_API_CALL_STATUS_T e_ret;
e_ret = impeg2_jobq_dequeue(ps_dec->pv_jobq, &s_job, sizeof(s_job), 1, 1);
if(e_ret != IV_SUCCESS)
break;
if(CMD_FMTCONV == s_job.i4_cmd)
{
WORD32 start_row;
WORD32 num_rows;
start_row = s_job.i2_start_mb_y << 4;
num_rows = MIN((s_job.i2_end_mb_y << 4), ps_dec->u2_vertical_size);
num_rows -= start_row;
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
start_row, num_rows);
}
}
}
else
{
if((NULL != ps_dec->ps_disp_pic) && ((0 == ps_dec->u4_share_disp_buf) || (IV_YUV_420P != ps_dec->i4_chromaFormat)))
impeg2d_format_convert(ps_dec, ps_dec->ps_disp_pic,
ps_dec->ps_disp_frm_buf,
0, ps_dec->u2_vertical_size);
}
}
| 3,246 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: GURL SiteInstance::GetSiteForURL(BrowserContext* browser_context,
const GURL& real_url) {
if (real_url.SchemeIs(kGuestScheme))
return real_url;
GURL url = SiteInstanceImpl::GetEffectiveURL(browser_context, real_url);
url::Origin origin = url::Origin::Create(url);
auto* policy = ChildProcessSecurityPolicyImpl::GetInstance();
url::Origin isolated_origin;
if (policy->GetMatchingIsolatedOrigin(origin, &isolated_origin))
return isolated_origin.GetURL();
if (!origin.host().empty() && origin.scheme() != url::kFileScheme) {
std::string domain = net::registry_controlled_domains::GetDomainAndRegistry(
origin.host(),
net::registry_controlled_domains::INCLUDE_PRIVATE_REGISTRIES);
std::string site = origin.scheme();
site += url::kStandardSchemeSeparator;
site += domain.empty() ? origin.host() : domain;
return GURL(site);
}
if (!origin.unique()) {
DCHECK(!origin.scheme().empty());
return GURL(origin.scheme() + ":");
} else if (url.has_scheme()) {
if (url.SchemeIsBlob()) {
if (url.has_ref()) {
GURL::Replacements replacements;
replacements.ClearRef();
url = url.ReplaceComponents(replacements);
}
return url;
}
DCHECK(!url.scheme().empty());
return GURL(url.scheme() + ":");
}
DCHECK(!url.is_valid()) << url;
return GURL();
}
Commit Message: Use unique processes for data URLs on restore.
Data URLs are usually put into the process that created them, but this
info is not tracked after a tab restore. Ensure that they do not end up
in the parent frame's process (or each other's process), in case they
are malicious.
BUG=863069
Change-Id: Ib391f90c7bdf28a0a9c057c5cc7918c10aed968b
Reviewed-on: https://chromium-review.googlesource.com/1150767
Reviewed-by: Alex Moshchuk <alexmos@chromium.org>
Reviewed-by: Lei Zhang <thestig@chromium.org>
Commit-Queue: Charlie Reis <creis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#581023}
CWE ID: CWE-285 | GURL SiteInstance::GetSiteForURL(BrowserContext* browser_context,
const GURL& real_url) {
if (real_url.SchemeIs(kGuestScheme))
return real_url;
GURL url = SiteInstanceImpl::GetEffectiveURL(browser_context, real_url);
url::Origin origin = url::Origin::Create(url);
auto* policy = ChildProcessSecurityPolicyImpl::GetInstance();
url::Origin isolated_origin;
if (policy->GetMatchingIsolatedOrigin(origin, &isolated_origin))
return isolated_origin.GetURL();
if (!origin.host().empty() && origin.scheme() != url::kFileScheme) {
std::string domain = net::registry_controlled_domains::GetDomainAndRegistry(
origin.host(),
net::registry_controlled_domains::INCLUDE_PRIVATE_REGISTRIES);
std::string site = origin.scheme();
site += url::kStandardSchemeSeparator;
site += domain.empty() ? origin.host() : domain;
return GURL(site);
}
if (!origin.unique()) {
DCHECK(!origin.scheme().empty());
return GURL(origin.scheme() + ":");
} else if (url.has_scheme()) {
// that might allow two URLs created by different sites to share a process.
// See https://crbug.com/863623 and https://crbug.com/863069.
// schemes, such as file:.
// TODO(creis): This currently causes problems with tests on Android and
// Android WebView. For now, skip it when Site Isolation is not enabled,
// since there's no need to isolate data and blob URLs from each other in
// that case.
bool is_site_isolation_enabled =
SiteIsolationPolicy::UseDedicatedProcessesForAllSites() ||
SiteIsolationPolicy::AreIsolatedOriginsEnabled();
if (is_site_isolation_enabled &&
(url.SchemeIsBlob() || url.scheme() == url::kDataScheme)) {
// origins from each other. We also get here for browser-initiated
// navigations to data URLs, which have a unique origin and should only
// share a process when they are identical. Remove hash from the URL in
// either case, since same-document navigations shouldn't use a different
// site URL.
if (url.has_ref()) {
GURL::Replacements replacements;
replacements.ClearRef();
url = url.ReplaceComponents(replacements);
}
return url;
}
DCHECK(!url.scheme().empty());
return GURL(url.scheme() + ":");
}
DCHECK(!url.is_valid()) << url;
return GURL();
}
| 19,441 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: SPL_METHOD(SplFileObject, __construct)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
zend_bool use_include_path = 0;
char *p1, *p2;
char *tmp_path;
int tmp_path_len;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);
intern->u.file.open_mode = NULL;
intern->u.file.open_mode_len = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!",
&intern->file_name, &intern->file_name_len,
&intern->u.file.open_mode, &intern->u.file.open_mode_len,
&use_include_path, &intern->u.file.zcontext) == FAILURE) {
intern->u.file.open_mode = NULL;
intern->file_name = NULL;
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
if (intern->u.file.open_mode == NULL) {
intern->u.file.open_mode = "r";
intern->u.file.open_mode_len = 1;
}
if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == SUCCESS) {
tmp_path_len = strlen(intern->u.file.stream->orig_path);
if (tmp_path_len > 1 && IS_SLASH_AT(intern->u.file.stream->orig_path, tmp_path_len-1)) {
tmp_path_len--;
}
tmp_path = estrndup(intern->u.file.stream->orig_path, tmp_path_len);
p1 = strrchr(tmp_path, '/');
#if defined(PHP_WIN32) || defined(NETWARE)
p2 = strrchr(tmp_path, '\\');
#else
p2 = 0;
#endif
if (p1 || p2) {
intern->_path_len = (p1 > p2 ? p1 : p2) - tmp_path;
} else {
intern->_path_len = 0;
}
efree(tmp_path);
intern->_path = estrndup(intern->u.file.stream->orig_path, intern->_path_len);
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
} /* }}} */
/* {{{ proto void SplTempFileObject::__construct([int max_memory])
Commit Message: Fix bug #72262 - do not overflow int
CWE ID: CWE-190 | SPL_METHOD(SplFileObject, __construct)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
zend_bool use_include_path = 0;
char *p1, *p2;
char *tmp_path;
int tmp_path_len;
zend_error_handling error_handling;
zend_replace_error_handling(EH_THROW, spl_ce_RuntimeException, &error_handling TSRMLS_CC);
intern->u.file.open_mode = NULL;
intern->u.file.open_mode_len = 0;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "p|sbr!",
&intern->file_name, &intern->file_name_len,
&intern->u.file.open_mode, &intern->u.file.open_mode_len,
&use_include_path, &intern->u.file.zcontext) == FAILURE) {
intern->u.file.open_mode = NULL;
intern->file_name = NULL;
zend_restore_error_handling(&error_handling TSRMLS_CC);
return;
}
if (intern->u.file.open_mode == NULL) {
intern->u.file.open_mode = "r";
intern->u.file.open_mode_len = 1;
}
if (spl_filesystem_file_open(intern, use_include_path, 0 TSRMLS_CC) == SUCCESS) {
tmp_path_len = strlen(intern->u.file.stream->orig_path);
if (tmp_path_len > 1 && IS_SLASH_AT(intern->u.file.stream->orig_path, tmp_path_len-1)) {
tmp_path_len--;
}
tmp_path = estrndup(intern->u.file.stream->orig_path, tmp_path_len);
p1 = strrchr(tmp_path, '/');
#if defined(PHP_WIN32) || defined(NETWARE)
p2 = strrchr(tmp_path, '\\');
#else
p2 = 0;
#endif
if (p1 || p2) {
intern->_path_len = (p1 > p2 ? p1 : p2) - tmp_path;
} else {
intern->_path_len = 0;
}
efree(tmp_path);
intern->_path = estrndup(intern->u.file.stream->orig_path, intern->_path_len);
}
zend_restore_error_handling(&error_handling TSRMLS_CC);
} /* }}} */
/* {{{ proto void SplTempFileObject::__construct([int max_memory])
| 9,968 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep)
{
int vm_shared = dst_vma->vm_flags & VM_SHARED;
struct hstate *h = hstate_vma(dst_vma);
pte_t _dst_pte;
spinlock_t *ptl;
int ret;
struct page *page;
if (!*pagep) {
ret = -ENOMEM;
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page))
goto out;
ret = copy_huge_page_from_user(page,
(const void __user *) src_addr,
pages_per_huge_page(h), false);
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
ret = -EFAULT;
*pagep = page;
/* don't free the page */
goto out;
}
} else {
page = *pagep;
*pagep = NULL;
}
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
set_page_huge_active(page);
/*
* If shared, add to page cache
*/
if (vm_shared) {
struct address_space *mapping = dst_vma->vm_file->f_mapping;
pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
ret = huge_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
}
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
spin_lock(ptl);
ret = -EEXIST;
if (!huge_pte_none(huge_ptep_get(dst_pte)))
goto out_release_unlock;
if (vm_shared) {
page_dup_rmap(page, true);
} else {
ClearPagePrivate(page);
hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
}
_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
if (dst_vma->vm_flags & VM_WRITE)
_dst_pte = huge_pte_mkdirty(_dst_pte);
_dst_pte = pte_mkyoung(_dst_pte);
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
dst_vma->vm_flags & VM_WRITE);
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
if (vm_shared)
unlock_page(page);
ret = 0;
out:
return ret;
out_release_unlock:
spin_unlock(ptl);
if (vm_shared)
unlock_page(page);
out_release_nounlock:
put_page(page);
goto out;
}
Commit Message: userfaultfd: hugetlbfs: prevent UFFDIO_COPY to fill beyond the end of i_size
This oops:
kernel BUG at fs/hugetlbfs/inode.c:484!
RIP: remove_inode_hugepages+0x3d0/0x410
Call Trace:
hugetlbfs_setattr+0xd9/0x130
notify_change+0x292/0x410
do_truncate+0x65/0xa0
do_sys_ftruncate.constprop.3+0x11a/0x180
SyS_ftruncate+0xe/0x10
tracesys+0xd9/0xde
was caused by the lack of i_size check in hugetlb_mcopy_atomic_pte.
mmap() can still succeed beyond the end of the i_size after vmtruncate
zapped vmas in those ranges, but the faults must not succeed, and that
includes UFFDIO_COPY.
We could differentiate the retval to userland to represent a SIGBUS like
a page fault would do (vs SIGSEGV), but it doesn't seem very useful and
we'd need to pick a random retval as there's no meaningful syscall
retval that would differentiate from SIGSEGV and SIGBUS, there's just
-EFAULT.
Link: http://lkml.kernel.org/r/20171016223914.2421-2-aarcange@redhat.com
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
CWE ID: CWE-119 | int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
pte_t *dst_pte,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
unsigned long src_addr,
struct page **pagep)
{
struct address_space *mapping;
pgoff_t idx;
unsigned long size;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
struct hstate *h = hstate_vma(dst_vma);
pte_t _dst_pte;
spinlock_t *ptl;
int ret;
struct page *page;
if (!*pagep) {
ret = -ENOMEM;
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page))
goto out;
ret = copy_huge_page_from_user(page,
(const void __user *) src_addr,
pages_per_huge_page(h), false);
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
ret = -EFAULT;
*pagep = page;
/* don't free the page */
goto out;
}
} else {
page = *pagep;
*pagep = NULL;
}
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
set_page_huge_active(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
/*
* If shared, add to page cache
*/
if (vm_shared) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
ret = -EFAULT;
if (idx >= size)
goto out_release_nounlock;
/*
* Serialization between remove_inode_hugepages() and
* huge_add_to_page_cache() below happens through the
* hugetlb_fault_mutex_table that here must be hold by
* the caller.
*/
ret = huge_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
}
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
spin_lock(ptl);
/*
* Recheck the i_size after holding PT lock to make sure not
* to leave any page mapped (as page_mapped()) beyond the end
* of the i_size (remove_inode_hugepages() is strict about
* enforcing that). If we bail out here, we'll also leave a
* page in the radix tree in the vm_shared case beyond the end
* of the i_size, but remove_inode_hugepages() will take care
* of it as soon as we drop the hugetlb_fault_mutex_table.
*/
size = i_size_read(mapping->host) >> huge_page_shift(h);
ret = -EFAULT;
if (idx >= size)
goto out_release_unlock;
ret = -EEXIST;
if (!huge_pte_none(huge_ptep_get(dst_pte)))
goto out_release_unlock;
if (vm_shared) {
page_dup_rmap(page, true);
} else {
ClearPagePrivate(page);
hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
}
_dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
if (dst_vma->vm_flags & VM_WRITE)
_dst_pte = huge_pte_mkdirty(_dst_pte);
_dst_pte = pte_mkyoung(_dst_pte);
set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
(void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
dst_vma->vm_flags & VM_WRITE);
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
if (vm_shared)
unlock_page(page);
ret = 0;
out:
return ret;
out_release_unlock:
spin_unlock(ptl);
if (vm_shared)
unlock_page(page);
out_release_nounlock:
put_page(page);
goto out;
}
| 7,675 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: cleanup_pathname(struct archive_write_disk *a)
{
char *dest, *src;
char separator = '\0';
dest = src = a->name;
if (*src == '\0') {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Invalid empty pathname");
return (ARCHIVE_FAILED);
}
#if defined(__CYGWIN__)
cleanup_pathname_win(a);
#endif
/* Skip leading '/'. */
if (*src == '/') {
if (a->flags & ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC,
"Path is absolute");
return (ARCHIVE_FAILED);
}
separator = *src++;
}
/* Scan the pathname one element at a time. */
for (;;) {
/* src points to first char after '/' */
if (src[0] == '\0') {
break;
} else if (src[0] == '/') {
/* Found '//', ignore second one. */
src++;
continue;
} else if (src[0] == '.') {
if (src[1] == '\0') {
/* Ignore trailing '.' */
break;
} else if (src[1] == '/') {
/* Skip './'. */
src += 2;
continue;
} else if (src[1] == '.') {
if (src[2] == '/' || src[2] == '\0') {
/* Conditionally warn about '..' */
if (a->flags & ARCHIVE_EXTRACT_SECURE_NODOTDOT) {
archive_set_error(&a->archive,
ARCHIVE_ERRNO_MISC,
"Path contains '..'");
return (ARCHIVE_FAILED);
}
}
/*
* Note: Under no circumstances do we
* remove '..' elements. In
* particular, restoring
* '/foo/../bar/' should create the
* 'foo' dir as a side-effect.
*/
}
}
/* Copy current element, including leading '/'. */
if (separator)
*dest++ = '/';
while (*src != '\0' && *src != '/') {
*dest++ = *src++;
}
if (*src == '\0')
break;
/* Skip '/' separator. */
separator = *src++;
}
/*
* We've just copied zero or more path elements, not including the
* final '/'.
*/
if (dest == a->name) {
/*
* Nothing got copied. The path must have been something
* like '.' or '/' or './' or '/././././/./'.
*/
if (separator)
*dest++ = '/';
else
*dest++ = '.';
}
/* Terminate the result. */
*dest = '\0';
return (ARCHIVE_OK);
}
Commit Message: Fixes for Issue #745 and Issue #746 from Doran Moppert.
CWE ID: CWE-20 | cleanup_pathname(struct archive_write_disk *a)
cleanup_pathname_fsobj(char *path, int *error_number, struct archive_string *error_string, int flags)
{
char *dest, *src;
char separator = '\0';
dest = src = path;
if (*src == '\0') {
if (error_number) *error_number = ARCHIVE_ERRNO_MISC;
if (error_string)
archive_string_sprintf(error_string,
"Invalid empty pathname");
return (ARCHIVE_FAILED);
}
#if defined(__CYGWIN__)
cleanup_pathname_win(a);
#endif
/* Skip leading '/'. */
if (*src == '/') {
if (flags & ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS) {
if (error_number) *error_number = ARCHIVE_ERRNO_MISC;
if (error_string)
archive_string_sprintf(error_string,
"Path is absolute");
return (ARCHIVE_FAILED);
}
separator = *src++;
}
/* Scan the pathname one element at a time. */
for (;;) {
/* src points to first char after '/' */
if (src[0] == '\0') {
break;
} else if (src[0] == '/') {
/* Found '//', ignore second one. */
src++;
continue;
} else if (src[0] == '.') {
if (src[1] == '\0') {
/* Ignore trailing '.' */
break;
} else if (src[1] == '/') {
/* Skip './'. */
src += 2;
continue;
} else if (src[1] == '.') {
if (src[2] == '/' || src[2] == '\0') {
/* Conditionally warn about '..' */
if (flags & ARCHIVE_EXTRACT_SECURE_NODOTDOT) {
if (error_number) *error_number = ARCHIVE_ERRNO_MISC;
if (error_string)
archive_string_sprintf(error_string,
"Path contains '..'");
return (ARCHIVE_FAILED);
}
}
/*
* Note: Under no circumstances do we
* remove '..' elements. In
* particular, restoring
* '/foo/../bar/' should create the
* 'foo' dir as a side-effect.
*/
}
}
/* Copy current element, including leading '/'. */
if (separator)
*dest++ = '/';
while (*src != '\0' && *src != '/') {
*dest++ = *src++;
}
if (*src == '\0')
break;
/* Skip '/' separator. */
separator = *src++;
}
/*
* We've just copied zero or more path elements, not including the
* final '/'.
*/
if (dest == path) {
/*
* Nothing got copied. The path must have been something
* like '.' or '/' or './' or '/././././/./'.
*/
if (separator)
*dest++ = '/';
else
*dest++ = '.';
}
/* Terminate the result. */
*dest = '\0';
return (ARCHIVE_OK);
}
| 22,331 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: sequential_row(standard_display *dp, png_structp pp, png_infop pi,
PNG_CONST int iImage, PNG_CONST int iDisplay)
{
PNG_CONST int npasses = dp->npasses;
PNG_CONST int do_interlace = dp->do_interlace &&
dp->interlace_type == PNG_INTERLACE_ADAM7;
PNG_CONST png_uint_32 height = standard_height(pp, dp->id);
PNG_CONST png_uint_32 width = standard_width(pp, dp->id);
PNG_CONST png_store* ps = dp->ps;
int pass;
for (pass=0; pass<npasses; ++pass)
{
png_uint_32 y;
png_uint_32 wPass = PNG_PASS_COLS(width, pass);
for (y=0; y<height; ++y)
{
if (do_interlace)
{
/* wPass may be zero or this row may not be in this pass.
* png_read_row must not be called in either case.
*/
if (wPass > 0 && PNG_ROW_IN_INTERLACE_PASS(y, pass))
{
/* Read the row into a pair of temporary buffers, then do the
* merge here into the output rows.
*/
png_byte row[STANDARD_ROWMAX], display[STANDARD_ROWMAX];
/* The following aids (to some extent) error detection - we can
* see where png_read_row wrote. Use opposite values in row and
* display to make this easier. Don't use 0xff (which is used in
* the image write code to fill unused bits) or 0 (which is a
* likely value to overwrite unused bits with).
*/
memset(row, 0xc5, sizeof row);
memset(display, 0x5c, sizeof display);
png_read_row(pp, row, display);
if (iImage >= 0)
deinterlace_row(store_image_row(ps, pp, iImage, y), row,
dp->pixel_size, dp->w, pass);
if (iDisplay >= 0)
deinterlace_row(store_image_row(ps, pp, iDisplay, y), display,
dp->pixel_size, dp->w, pass);
}
}
else
png_read_row(pp,
iImage >= 0 ? store_image_row(ps, pp, iImage, y) : NULL,
iDisplay >= 0 ? store_image_row(ps, pp, iDisplay, y) : NULL);
}
}
/* And finish the read operation (only really necessary if the caller wants
* to find additional data in png_info from chunks after the last IDAT.)
*/
png_read_end(pp, pi);
}
Commit Message: DO NOT MERGE Update libpng to 1.6.20
BUG:23265085
Change-Id: I85199805636d771f3597b691b63bc0bf46084833
(cherry picked from commit bbe98b40cda082024b669fa508931042eed18f82)
CWE ID: | sequential_row(standard_display *dp, png_structp pp, png_infop pi,
const int iImage, const int iDisplay)
{
const int npasses = dp->npasses;
const int do_interlace = dp->do_interlace &&
dp->interlace_type == PNG_INTERLACE_ADAM7;
const png_uint_32 height = standard_height(pp, dp->id);
const png_uint_32 width = standard_width(pp, dp->id);
const png_store* ps = dp->ps;
int pass;
for (pass=0; pass<npasses; ++pass)
{
png_uint_32 y;
png_uint_32 wPass = PNG_PASS_COLS(width, pass);
for (y=0; y<height; ++y)
{
if (do_interlace)
{
/* wPass may be zero or this row may not be in this pass.
* png_read_row must not be called in either case.
*/
if (wPass > 0 && PNG_ROW_IN_INTERLACE_PASS(y, pass))
{
/* Read the row into a pair of temporary buffers, then do the
* merge here into the output rows.
*/
png_byte row[STANDARD_ROWMAX], display[STANDARD_ROWMAX];
/* The following aids (to some extent) error detection - we can
* see where png_read_row wrote. Use opposite values in row and
* display to make this easier. Don't use 0xff (which is used in
* the image write code to fill unused bits) or 0 (which is a
* likely value to overwrite unused bits with).
*/
memset(row, 0xc5, sizeof row);
memset(display, 0x5c, sizeof display);
png_read_row(pp, row, display);
if (iImage >= 0)
deinterlace_row(store_image_row(ps, pp, iImage, y), row,
dp->pixel_size, dp->w, pass, dp->littleendian);
if (iDisplay >= 0)
deinterlace_row(store_image_row(ps, pp, iDisplay, y), display,
dp->pixel_size, dp->w, pass, dp->littleendian);
}
}
else
png_read_row(pp,
iImage >= 0 ? store_image_row(ps, pp, iImage, y) : NULL,
iDisplay >= 0 ? store_image_row(ps, pp, iDisplay, y) : NULL);
}
}
/* And finish the read operation (only really necessary if the caller wants
* to find additional data in png_info from chunks after the last IDAT.)
*/
png_read_end(pp, pi);
}
| 26,181 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int tga_readheader(FILE *fp, unsigned int *bits_per_pixel,
unsigned int *width, unsigned int *height, int *flip_image)
{
int palette_size;
unsigned char tga[TGA_HEADER_SIZE];
unsigned char id_len, /*cmap_type,*/ image_type;
unsigned char pixel_depth, image_desc;
unsigned short /*cmap_index,*/ cmap_len, cmap_entry_size;
unsigned short /*x_origin, y_origin,*/ image_w, image_h;
if (!bits_per_pixel || !width || !height || !flip_image) {
return 0;
}
if (fread(tga, TGA_HEADER_SIZE, 1, fp) != 1) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
return 0 ;
}
id_len = tga[0];
/*cmap_type = tga[1];*/
image_type = tga[2];
/*cmap_index = get_ushort(&tga[3]);*/
cmap_len = get_ushort(&tga[5]);
cmap_entry_size = tga[7];
#if 0
x_origin = get_ushort(&tga[8]);
y_origin = get_ushort(&tga[10]);
#endif
image_w = get_ushort(&tga[12]);
image_h = get_ushort(&tga[14]);
pixel_depth = tga[16];
image_desc = tga[17];
*bits_per_pixel = (unsigned int)pixel_depth;
*width = (unsigned int)image_w;
*height = (unsigned int)image_h;
/* Ignore tga identifier, if present ... */
if (id_len) {
unsigned char *id = (unsigned char *) malloc(id_len);
if (id == 0) {
fprintf(stderr, "tga_readheader: memory out\n");
return 0;
}
if (!fread(id, id_len, 1, fp)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
free(id);
return 0 ;
}
free(id);
}
/* Test for compressed formats ... not yet supported ...
if (image_type > 8) {
fprintf(stderr, "Sorry, compressed tga files are not currently supported.\n");
return 0 ;
}
*flip_image = !(image_desc & 32);
/* Palettized formats are not yet supported, skip over the palette, if present ... */
palette_size = cmap_len * (cmap_entry_size / 8);
if (palette_size > 0) {
fprintf(stderr, "File contains a palette - not yet supported.");
fseek(fp, palette_size, SEEK_CUR);
}
return 1;
}
Commit Message: tgatoimage(): avoid excessive memory allocation attempt, and fixes unaligned load (#995)
CWE ID: CWE-787 | static int tga_readheader(FILE *fp, unsigned int *bits_per_pixel,
unsigned int *width, unsigned int *height, int *flip_image)
{
int palette_size;
unsigned char tga[TGA_HEADER_SIZE];
unsigned char id_len, /*cmap_type,*/ image_type;
unsigned char pixel_depth, image_desc;
unsigned short /*cmap_index,*/ cmap_len, cmap_entry_size;
unsigned short /*x_origin, y_origin,*/ image_w, image_h;
if (!bits_per_pixel || !width || !height || !flip_image) {
return 0;
}
if (fread(tga, TGA_HEADER_SIZE, 1, fp) != 1) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
return 0 ;
}
id_len = tga[0];
/*cmap_type = tga[1];*/
image_type = tga[2];
/*cmap_index = get_tga_ushort(&tga[3]);*/
cmap_len = get_tga_ushort(&tga[5]);
cmap_entry_size = tga[7];
#if 0
x_origin = get_tga_ushort(&tga[8]);
y_origin = get_tga_ushort(&tga[10]);
#endif
image_w = get_tga_ushort(&tga[12]);
image_h = get_tga_ushort(&tga[14]);
pixel_depth = tga[16];
image_desc = tga[17];
*bits_per_pixel = (unsigned int)pixel_depth;
*width = (unsigned int)image_w;
*height = (unsigned int)image_h;
/* Ignore tga identifier, if present ... */
if (id_len) {
unsigned char *id = (unsigned char *) malloc(id_len);
if (id == 0) {
fprintf(stderr, "tga_readheader: memory out\n");
return 0;
}
if (!fread(id, id_len, 1, fp)) {
fprintf(stderr,
"\nError: fread return a number of element different from the expected.\n");
free(id);
return 0 ;
}
free(id);
}
/* Test for compressed formats ... not yet supported ...
if (image_type > 8) {
fprintf(stderr, "Sorry, compressed tga files are not currently supported.\n");
return 0 ;
}
*flip_image = !(image_desc & 32);
/* Palettized formats are not yet supported, skip over the palette, if present ... */
palette_size = cmap_len * (cmap_entry_size / 8);
if (palette_size > 0) {
fprintf(stderr, "File contains a palette - not yet supported.");
fseek(fp, palette_size, SEEK_CUR);
}
return 1;
}
| 3,678 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void DevToolsUIBindings::RecordEnumeratedHistogram(const std::string& name,
int sample,
int boundary_value) {
if (!(boundary_value >= 0 && boundary_value <= 100 && sample >= 0 &&
sample < boundary_value)) {
frontend_host_->BadMessageRecieved();
return;
}
if (name == kDevToolsActionTakenHistogram)
UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value);
else if (name == kDevToolsPanelShownHistogram)
UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value);
else
frontend_host_->BadMessageRecieved();
}
Commit Message: DevTools: move front-end URL handling to DevToolsUIBindingds
BUG=662859
Review-Url: https://codereview.chromium.org/2607833002
Cr-Commit-Position: refs/heads/master@{#440926}
CWE ID: CWE-200 | void DevToolsUIBindings::RecordEnumeratedHistogram(const std::string& name,
int sample,
int boundary_value) {
if (!frontend_host_)
return;
if (!(boundary_value >= 0 && boundary_value <= 100 && sample >= 0 &&
sample < boundary_value)) {
frontend_host_->BadMessageRecieved();
return;
}
if (name == kDevToolsActionTakenHistogram)
UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value);
else if (name == kDevToolsPanelShownHistogram)
UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value);
else
frontend_host_->BadMessageRecieved();
}
| 22,077 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
vpx_image_t raw;
vpx_codec_err_t res;
VpxVideoInfo info = {0};
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
int keyframe_interval = 0;
const char *codec_arg = NULL;
const char *width_arg = NULL;
const char *height_arg = NULL;
const char *infile_arg = NULL;
const char *outfile_arg = NULL;
const char *keyframe_interval_arg = NULL;
exec_name = argv[0];
if (argc < 7)
die("Invalid number of arguments");
codec_arg = argv[1];
width_arg = argv[2];
height_arg = argv[3];
infile_arg = argv[4];
outfile_arg = argv[5];
keyframe_interval_arg = argv[6];
encoder = get_vpx_encoder_by_name(codec_arg);
if (!encoder)
die("Unsupported codec.");
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(width_arg, NULL, 0);
info.frame_height = strtol(height_arg, NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
keyframe_interval = strtol(keyframe_interval_arg, NULL, 0);
if (keyframe_interval < 0)
die("Invalid keyframe interval value.");
printf("Using %s\n", vpx_codec_iface_name(encoder->interface()));
res = vpx_codec_enc_config_default(encoder->interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = bitrate;
cfg.g_error_resilient = argc > 7 ? strtol(argv[7], NULL, 0) : 0;
writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", outfile_arg);
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading.", infile_arg);
if (vpx_codec_enc_init(&codec, encoder->interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
while (vpx_img_read(&raw, infile)) {
int flags = 0;
if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
flags |= VPX_EFLAG_FORCE_KF;
encode_frame(&codec, &raw, frame_count++, flags, writer);
}
encode_frame(&codec, NULL, -1, 0, writer); // flush the encoder
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
Commit Message: Merge Conflict Fix CL to lmp-mr1-release for ag/849478
DO NOT MERGE - libvpx: Pull from upstream
Current HEAD: 7105df53d7dc13d5e575bc8df714ec8d1da36b06
BUG=23452792
Change-Id: Ic78176fc369e0bacc71d423e0e2e6075d004aaec
CWE ID: CWE-119 | int main(int argc, char **argv) {
FILE *infile = NULL;
vpx_codec_ctx_t codec;
vpx_codec_enc_cfg_t cfg;
int frame_count = 0;
vpx_image_t raw;
vpx_codec_err_t res;
VpxVideoInfo info = {0};
VpxVideoWriter *writer = NULL;
const VpxInterface *encoder = NULL;
const int fps = 30; // TODO(dkovalev) add command line argument
const int bitrate = 200; // kbit/s TODO(dkovalev) add command line argument
int keyframe_interval = 0;
const char *codec_arg = NULL;
const char *width_arg = NULL;
const char *height_arg = NULL;
const char *infile_arg = NULL;
const char *outfile_arg = NULL;
const char *keyframe_interval_arg = NULL;
exec_name = argv[0];
if (argc < 7)
die("Invalid number of arguments");
codec_arg = argv[1];
width_arg = argv[2];
height_arg = argv[3];
infile_arg = argv[4];
outfile_arg = argv[5];
keyframe_interval_arg = argv[6];
encoder = get_vpx_encoder_by_name(codec_arg);
if (!encoder)
die("Unsupported codec.");
info.codec_fourcc = encoder->fourcc;
info.frame_width = strtol(width_arg, NULL, 0);
info.frame_height = strtol(height_arg, NULL, 0);
info.time_base.numerator = 1;
info.time_base.denominator = fps;
if (info.frame_width <= 0 ||
info.frame_height <= 0 ||
(info.frame_width % 2) != 0 ||
(info.frame_height % 2) != 0) {
die("Invalid frame size: %dx%d", info.frame_width, info.frame_height);
}
if (!vpx_img_alloc(&raw, VPX_IMG_FMT_I420, info.frame_width,
info.frame_height, 1)) {
die("Failed to allocate image.");
}
keyframe_interval = strtol(keyframe_interval_arg, NULL, 0);
if (keyframe_interval < 0)
die("Invalid keyframe interval value.");
printf("Using %s\n", vpx_codec_iface_name(encoder->codec_interface()));
res = vpx_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
if (res)
die_codec(&codec, "Failed to get default codec config.");
cfg.g_w = info.frame_width;
cfg.g_h = info.frame_height;
cfg.g_timebase.num = info.time_base.numerator;
cfg.g_timebase.den = info.time_base.denominator;
cfg.rc_target_bitrate = bitrate;
cfg.g_error_resilient = argc > 7 ? strtol(argv[7], NULL, 0) : 0;
writer = vpx_video_writer_open(outfile_arg, kContainerIVF, &info);
if (!writer)
die("Failed to open %s for writing.", outfile_arg);
if (!(infile = fopen(infile_arg, "rb")))
die("Failed to open %s for reading.", infile_arg);
if (vpx_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
die_codec(&codec, "Failed to initialize encoder");
// Encode frames.
while (vpx_img_read(&raw, infile)) {
int flags = 0;
if (keyframe_interval > 0 && frame_count % keyframe_interval == 0)
flags |= VPX_EFLAG_FORCE_KF;
encode_frame(&codec, &raw, frame_count++, flags, writer);
}
// Flush encoder.
while (encode_frame(&codec, NULL, -1, 0, writer)) {};
printf("\n");
fclose(infile);
printf("Processed %d frames.\n", frame_count);
vpx_img_free(&raw);
if (vpx_codec_destroy(&codec))
die_codec(&codec, "Failed to destroy codec.");
vpx_video_writer_close(writer);
return EXIT_SUCCESS;
}
| 24,047 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void MemoryInstrumentation::GetVmRegionsForHeapProfiler(
RequestGlobalDumpCallback callback) {
const auto& coordinator = GetCoordinatorBindingForCurrentThread();
coordinator->GetVmRegionsForHeapProfiler(callback);
}
Commit Message: memory-infra: split up memory-infra coordinator service into two
This allows for heap profiler to use its own service with correct
capabilities and all other instances to use the existing coordinator
service.
Bug: 792028
Change-Id: I84e4ec71f5f1d00991c0516b1424ce7334bcd3cd
Reviewed-on: https://chromium-review.googlesource.com/836896
Commit-Queue: Lalit Maganti <lalitm@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: oysteine <oysteine@chromium.org>
Reviewed-by: Albert J. Wong <ajwong@chromium.org>
Reviewed-by: Hector Dearman <hjd@chromium.org>
Cr-Commit-Position: refs/heads/master@{#529059}
CWE ID: CWE-269 | void MemoryInstrumentation::GetVmRegionsForHeapProfiler(
| 29,028 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: unsigned char *ssl_add_clienthello_tlsext(SSL *s, unsigned char *buf, unsigned char *limit)
{
int extdatalen=0;
unsigned char *orig = buf;
unsigned char *ret = buf;
/* don't add extensions for SSLv3 unless doing secure renegotiation */
if (s->client_version == SSL3_VERSION
&& !s->s3->send_connection_binding)
return orig;
ret+=2;
if (ret>=limit) return NULL; /* this really never occurs, but ... */
if (s->tlsext_hostname != NULL)
{
/* Add TLS extension servername to the Client Hello message */
unsigned long size_str;
long lenmax;
/* check for enough space.
4 for the servername type and entension length
2 for servernamelist length
1 for the hostname type
2 for hostname length
+ hostname length
*/
if ((lenmax = limit - ret - 9) < 0
|| (size_str = strlen(s->tlsext_hostname)) > (unsigned long)lenmax)
return NULL;
/* extension type and length */
s2n(TLSEXT_TYPE_server_name,ret);
s2n(size_str+5,ret);
/* length of servername list */
s2n(size_str+3,ret);
/* hostname type, length and hostname */
*(ret++) = (unsigned char) TLSEXT_NAMETYPE_host_name;
s2n(size_str,ret);
memcpy(ret, s->tlsext_hostname, size_str);
ret+=size_str;
}
/* Add RI if renegotiating */
if (s->renegotiate)
{
int el;
if(!ssl_add_clienthello_renegotiate_ext(s, 0, &el, 0))
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
if((limit - ret - 4 - el) < 0) return NULL;
s2n(TLSEXT_TYPE_renegotiate,ret);
s2n(el,ret);
if(!ssl_add_clienthello_renegotiate_ext(s, ret, &el, el))
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
ret += el;
}
#ifndef OPENSSL_NO_SRP
/* Add SRP username if there is one */
if (s->srp_ctx.login != NULL)
{ /* Add TLS extension SRP username to the Client Hello message */
int login_len = strlen(s->srp_ctx.login);
if (login_len > 255 || login_len == 0)
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
/* check for enough space.
4 for the srp type type and entension length
1 for the srp user identity
+ srp user identity length
*/
if ((limit - ret - 5 - login_len) < 0) return NULL;
/* fill in the extension */
s2n(TLSEXT_TYPE_srp,ret);
s2n(login_len+1,ret);
(*ret++) = (unsigned char) login_len;
memcpy(ret, s->srp_ctx.login, login_len);
ret+=login_len;
}
#endif
#ifndef OPENSSL_NO_EC
if (s->tlsext_ecpointformatlist != NULL)
{
/* Add TLS extension ECPointFormats to the ClientHello message */
long lenmax;
if ((lenmax = limit - ret - 5) < 0) return NULL;
if (s->tlsext_ecpointformatlist_length > (unsigned long)lenmax) return NULL;
if (s->tlsext_ecpointformatlist_length > 255)
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
s2n(TLSEXT_TYPE_ec_point_formats,ret);
s2n(s->tlsext_ecpointformatlist_length + 1,ret);
*(ret++) = (unsigned char) s->tlsext_ecpointformatlist_length;
memcpy(ret, s->tlsext_ecpointformatlist, s->tlsext_ecpointformatlist_length);
ret+=s->tlsext_ecpointformatlist_length;
}
if (s->tlsext_ellipticcurvelist != NULL)
{
/* Add TLS extension EllipticCurves to the ClientHello message */
long lenmax;
if ((lenmax = limit - ret - 6) < 0) return NULL;
if (s->tlsext_ellipticcurvelist_length > (unsigned long)lenmax) return NULL;
if (s->tlsext_ellipticcurvelist_length > 65532)
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
s2n(TLSEXT_TYPE_elliptic_curves,ret);
s2n(s->tlsext_ellipticcurvelist_length + 2, ret);
/* NB: draft-ietf-tls-ecc-12.txt uses a one-byte prefix for
* elliptic_curve_list, but the examples use two bytes.
* http://www1.ietf.org/mail-archive/web/tls/current/msg00538.html
* resolves this to two bytes.
*/
s2n(s->tlsext_ellipticcurvelist_length, ret);
memcpy(ret, s->tlsext_ellipticcurvelist, s->tlsext_ellipticcurvelist_length);
ret+=s->tlsext_ellipticcurvelist_length;
}
#endif /* OPENSSL_NO_EC */
if (!(SSL_get_options(s) & SSL_OP_NO_TICKET))
{
int ticklen;
if (!s->new_session && s->session && s->session->tlsext_tick)
ticklen = s->session->tlsext_ticklen;
else if (s->session && s->tlsext_session_ticket &&
s->tlsext_session_ticket->data)
{
ticklen = s->tlsext_session_ticket->length;
s->session->tlsext_tick = OPENSSL_malloc(ticklen);
if (!s->session->tlsext_tick)
return NULL;
memcpy(s->session->tlsext_tick,
s->tlsext_session_ticket->data,
ticklen);
s->session->tlsext_ticklen = ticklen;
}
else
ticklen = 0;
if (ticklen == 0 && s->tlsext_session_ticket &&
s->tlsext_session_ticket->data == NULL)
goto skip_ext;
/* Check for enough room 2 for extension type, 2 for len
* rest for ticket
*/
if ((long)(limit - ret - 4 - ticklen) < 0) return NULL;
s2n(TLSEXT_TYPE_session_ticket,ret);
s2n(ticklen,ret);
if (ticklen)
{
memcpy(ret, s->session->tlsext_tick, ticklen);
ret += ticklen;
}
}
skip_ext:
if (TLS1_get_client_version(s) >= TLS1_2_VERSION)
{
if ((size_t)(limit - ret) < sizeof(tls12_sigalgs) + 6)
return NULL;
s2n(TLSEXT_TYPE_signature_algorithms,ret);
s2n(sizeof(tls12_sigalgs) + 2, ret);
s2n(sizeof(tls12_sigalgs), ret);
memcpy(ret, tls12_sigalgs, sizeof(tls12_sigalgs));
ret += sizeof(tls12_sigalgs);
}
#ifdef TLSEXT_TYPE_opaque_prf_input
if (s->s3->client_opaque_prf_input != NULL &&
s->version != DTLS1_VERSION)
{
size_t col = s->s3->client_opaque_prf_input_len;
if ((long)(limit - ret - 6 - col < 0))
return NULL;
if (col > 0xFFFD) /* can't happen */
return NULL;
s2n(TLSEXT_TYPE_opaque_prf_input, ret);
s2n(col + 2, ret);
s2n(col, ret);
memcpy(ret, s->s3->client_opaque_prf_input, col);
ret += col;
}
#endif
if (s->tlsext_status_type == TLSEXT_STATUSTYPE_ocsp &&
s->version != DTLS1_VERSION)
{
int i;
long extlen, idlen, itmp;
OCSP_RESPID *id;
idlen = 0;
for (i = 0; i < sk_OCSP_RESPID_num(s->tlsext_ocsp_ids); i++)
{
id = sk_OCSP_RESPID_value(s->tlsext_ocsp_ids, i);
itmp = i2d_OCSP_RESPID(id, NULL);
if (itmp <= 0)
return NULL;
idlen += itmp + 2;
}
if (s->tlsext_ocsp_exts)
{
extlen = i2d_X509_EXTENSIONS(s->tlsext_ocsp_exts, NULL);
if (extlen < 0)
return NULL;
}
else
extlen = 0;
if ((long)(limit - ret - 7 - extlen - idlen) < 0) return NULL;
s2n(TLSEXT_TYPE_status_request, ret);
if (extlen + idlen > 0xFFF0)
return NULL;
s2n(extlen + idlen + 5, ret);
*(ret++) = TLSEXT_STATUSTYPE_ocsp;
s2n(idlen, ret);
for (i = 0; i < sk_OCSP_RESPID_num(s->tlsext_ocsp_ids); i++)
{
/* save position of id len */
unsigned char *q = ret;
id = sk_OCSP_RESPID_value(s->tlsext_ocsp_ids, i);
/* skip over id len */
ret += 2;
itmp = i2d_OCSP_RESPID(id, &ret);
/* write id len */
s2n(itmp, q);
}
s2n(extlen, ret);
if (extlen > 0)
i2d_X509_EXTENSIONS(s->tlsext_ocsp_exts, &ret);
}
#ifndef OPENSSL_NO_HEARTBEATS
/* Add Heartbeat extension */
if ((limit - ret - 4 - 1) < 0)
return NULL;
s2n(TLSEXT_TYPE_heartbeat,ret);
s2n(1,ret);
/* Set mode:
* 1: peer may send requests
* 2: peer not allowed to send requests
*/
if (s->tlsext_heartbeat & SSL_TLSEXT_HB_DONT_RECV_REQUESTS)
*(ret++) = SSL_TLSEXT_HB_DONT_SEND_REQUESTS;
else
*(ret++) = SSL_TLSEXT_HB_ENABLED;
#endif
#ifndef OPENSSL_NO_NEXTPROTONEG
if (s->ctx->next_proto_select_cb && !s->s3->tmp.finish_md_len)
{
/* The client advertises an emtpy extension to indicate its
* support for Next Protocol Negotiation */
if (limit - ret - 4 < 0)
return NULL;
s2n(TLSEXT_TYPE_next_proto_neg,ret);
s2n(0,ret);
}
#endif
#ifndef OPENSSL_NO_SRTP
if(SSL_get_srtp_profiles(s))
{
int el;
ssl_add_clienthello_use_srtp_ext(s, 0, &el, 0);
if((limit - ret - 4 - el) < 0) return NULL;
s2n(TLSEXT_TYPE_use_srtp,ret);
s2n(el,ret);
if(ssl_add_clienthello_use_srtp_ext(s, ret, &el, el))
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
ret += el;
}
#endif
/* Add padding to workaround bugs in F5 terminators.
* See https://tools.ietf.org/html/draft-agl-tls-padding-03
*
* NB: because this code works out the length of all existing
* extensions it MUST always appear last.
*/
if (s->options & SSL_OP_TLSEXT_PADDING)
{
int hlen = ret - (unsigned char *)s->init_buf->data;
/* The code in s23_clnt.c to build ClientHello messages
* includes the 5-byte record header in the buffer, while
* the code in s3_clnt.c does not.
*/
if (s->state == SSL23_ST_CW_CLNT_HELLO_A)
hlen -= 5;
if (hlen > 0xff && hlen < 0x200)
{
hlen = 0x200 - hlen;
if (hlen >= 4)
hlen -= 4;
else
hlen = 0;
s2n(TLSEXT_TYPE_padding, ret);
s2n(hlen, ret);
memset(ret, 0, hlen);
ret += hlen;
}
}
if ((extdatalen = ret-orig-2)== 0)
return orig;
s2n(extdatalen, orig);
return ret;
}
Commit Message:
CWE ID: CWE-20 | unsigned char *ssl_add_clienthello_tlsext(SSL *s, unsigned char *buf, unsigned char *limit)
{
int extdatalen=0;
unsigned char *orig = buf;
unsigned char *ret = buf;
/* don't add extensions for SSLv3 unless doing secure renegotiation */
if (s->client_version == SSL3_VERSION
&& !s->s3->send_connection_binding)
return orig;
ret+=2;
if (ret>=limit) return NULL; /* this really never occurs, but ... */
if (s->tlsext_hostname != NULL)
{
/* Add TLS extension servername to the Client Hello message */
unsigned long size_str;
long lenmax;
/* check for enough space.
4 for the servername type and entension length
2 for servernamelist length
1 for the hostname type
2 for hostname length
+ hostname length
*/
if ((lenmax = limit - ret - 9) < 0
|| (size_str = strlen(s->tlsext_hostname)) > (unsigned long)lenmax)
return NULL;
/* extension type and length */
s2n(TLSEXT_TYPE_server_name,ret);
s2n(size_str+5,ret);
/* length of servername list */
s2n(size_str+3,ret);
/* hostname type, length and hostname */
*(ret++) = (unsigned char) TLSEXT_NAMETYPE_host_name;
s2n(size_str,ret);
memcpy(ret, s->tlsext_hostname, size_str);
ret+=size_str;
}
/* Add RI if renegotiating */
if (s->renegotiate)
{
int el;
if(!ssl_add_clienthello_renegotiate_ext(s, 0, &el, 0))
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
if((limit - ret - 4 - el) < 0) return NULL;
s2n(TLSEXT_TYPE_renegotiate,ret);
s2n(el,ret);
if(!ssl_add_clienthello_renegotiate_ext(s, ret, &el, el))
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
ret += el;
}
#ifndef OPENSSL_NO_SRP
/* Add SRP username if there is one */
if (s->srp_ctx.login != NULL)
{ /* Add TLS extension SRP username to the Client Hello message */
int login_len = strlen(s->srp_ctx.login);
if (login_len > 255 || login_len == 0)
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
/* check for enough space.
4 for the srp type type and entension length
1 for the srp user identity
+ srp user identity length
*/
if ((limit - ret - 5 - login_len) < 0) return NULL;
/* fill in the extension */
s2n(TLSEXT_TYPE_srp,ret);
s2n(login_len+1,ret);
(*ret++) = (unsigned char) login_len;
memcpy(ret, s->srp_ctx.login, login_len);
ret+=login_len;
}
#endif
#ifndef OPENSSL_NO_EC
if (s->tlsext_ecpointformatlist != NULL)
{
/* Add TLS extension ECPointFormats to the ClientHello message */
long lenmax;
if ((lenmax = limit - ret - 5) < 0) return NULL;
if (s->tlsext_ecpointformatlist_length > (unsigned long)lenmax) return NULL;
if (s->tlsext_ecpointformatlist_length > 255)
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
s2n(TLSEXT_TYPE_ec_point_formats,ret);
s2n(s->tlsext_ecpointformatlist_length + 1,ret);
*(ret++) = (unsigned char) s->tlsext_ecpointformatlist_length;
memcpy(ret, s->tlsext_ecpointformatlist, s->tlsext_ecpointformatlist_length);
ret+=s->tlsext_ecpointformatlist_length;
}
if (s->tlsext_ellipticcurvelist != NULL)
{
/* Add TLS extension EllipticCurves to the ClientHello message */
long lenmax;
if ((lenmax = limit - ret - 6) < 0) return NULL;
if (s->tlsext_ellipticcurvelist_length > (unsigned long)lenmax) return NULL;
if (s->tlsext_ellipticcurvelist_length > 65532)
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
s2n(TLSEXT_TYPE_elliptic_curves,ret);
s2n(s->tlsext_ellipticcurvelist_length + 2, ret);
/* NB: draft-ietf-tls-ecc-12.txt uses a one-byte prefix for
* elliptic_curve_list, but the examples use two bytes.
* http://www1.ietf.org/mail-archive/web/tls/current/msg00538.html
* resolves this to two bytes.
*/
s2n(s->tlsext_ellipticcurvelist_length, ret);
memcpy(ret, s->tlsext_ellipticcurvelist, s->tlsext_ellipticcurvelist_length);
ret+=s->tlsext_ellipticcurvelist_length;
}
#endif /* OPENSSL_NO_EC */
if (!(SSL_get_options(s) & SSL_OP_NO_TICKET))
{
int ticklen;
if (!s->new_session && s->session && s->session->tlsext_tick)
ticklen = s->session->tlsext_ticklen;
else if (s->session && s->tlsext_session_ticket &&
s->tlsext_session_ticket->data)
{
ticklen = s->tlsext_session_ticket->length;
s->session->tlsext_tick = OPENSSL_malloc(ticklen);
if (!s->session->tlsext_tick)
return NULL;
memcpy(s->session->tlsext_tick,
s->tlsext_session_ticket->data,
ticklen);
s->session->tlsext_ticklen = ticklen;
}
else
ticklen = 0;
if (ticklen == 0 && s->tlsext_session_ticket &&
s->tlsext_session_ticket->data == NULL)
goto skip_ext;
/* Check for enough room 2 for extension type, 2 for len
* rest for ticket
*/
if ((long)(limit - ret - 4 - ticklen) < 0) return NULL;
s2n(TLSEXT_TYPE_session_ticket,ret);
s2n(ticklen,ret);
if (ticklen)
{
memcpy(ret, s->session->tlsext_tick, ticklen);
ret += ticklen;
}
}
skip_ext:
if (TLS1_get_client_version(s) >= TLS1_2_VERSION)
{
if ((size_t)(limit - ret) < sizeof(tls12_sigalgs) + 6)
return NULL;
s2n(TLSEXT_TYPE_signature_algorithms,ret);
s2n(sizeof(tls12_sigalgs) + 2, ret);
s2n(sizeof(tls12_sigalgs), ret);
memcpy(ret, tls12_sigalgs, sizeof(tls12_sigalgs));
ret += sizeof(tls12_sigalgs);
}
#ifdef TLSEXT_TYPE_opaque_prf_input
if (s->s3->client_opaque_prf_input != NULL &&
s->version != DTLS1_VERSION)
{
size_t col = s->s3->client_opaque_prf_input_len;
if ((long)(limit - ret - 6 - col < 0))
return NULL;
if (col > 0xFFFD) /* can't happen */
return NULL;
s2n(TLSEXT_TYPE_opaque_prf_input, ret);
s2n(col + 2, ret);
s2n(col, ret);
memcpy(ret, s->s3->client_opaque_prf_input, col);
ret += col;
}
#endif
if (s->tlsext_status_type == TLSEXT_STATUSTYPE_ocsp &&
s->version != DTLS1_VERSION)
{
int i;
long extlen, idlen, itmp;
OCSP_RESPID *id;
idlen = 0;
for (i = 0; i < sk_OCSP_RESPID_num(s->tlsext_ocsp_ids); i++)
{
id = sk_OCSP_RESPID_value(s->tlsext_ocsp_ids, i);
itmp = i2d_OCSP_RESPID(id, NULL);
if (itmp <= 0)
return NULL;
idlen += itmp + 2;
}
if (s->tlsext_ocsp_exts)
{
extlen = i2d_X509_EXTENSIONS(s->tlsext_ocsp_exts, NULL);
if (extlen < 0)
return NULL;
}
else
extlen = 0;
if ((long)(limit - ret - 7 - extlen - idlen) < 0) return NULL;
s2n(TLSEXT_TYPE_status_request, ret);
if (extlen + idlen > 0xFFF0)
return NULL;
s2n(extlen + idlen + 5, ret);
*(ret++) = TLSEXT_STATUSTYPE_ocsp;
s2n(idlen, ret);
for (i = 0; i < sk_OCSP_RESPID_num(s->tlsext_ocsp_ids); i++)
{
/* save position of id len */
unsigned char *q = ret;
id = sk_OCSP_RESPID_value(s->tlsext_ocsp_ids, i);
/* skip over id len */
ret += 2;
itmp = i2d_OCSP_RESPID(id, &ret);
/* write id len */
s2n(itmp, q);
}
s2n(extlen, ret);
if (extlen > 0)
i2d_X509_EXTENSIONS(s->tlsext_ocsp_exts, &ret);
}
#ifndef OPENSSL_NO_HEARTBEATS
/* Add Heartbeat extension */
if ((limit - ret - 4 - 1) < 0)
return NULL;
s2n(TLSEXT_TYPE_heartbeat,ret);
s2n(1,ret);
/* Set mode:
* 1: peer may send requests
* 2: peer not allowed to send requests
*/
if (s->tlsext_heartbeat & SSL_TLSEXT_HB_DONT_RECV_REQUESTS)
*(ret++) = SSL_TLSEXT_HB_DONT_SEND_REQUESTS;
else
*(ret++) = SSL_TLSEXT_HB_ENABLED;
#endif
#ifndef OPENSSL_NO_NEXTPROTONEG
if (s->ctx->next_proto_select_cb && !s->s3->tmp.finish_md_len)
{
/* The client advertises an emtpy extension to indicate its
* support for Next Protocol Negotiation */
if (limit - ret - 4 < 0)
return NULL;
s2n(TLSEXT_TYPE_next_proto_neg,ret);
s2n(0,ret);
}
#endif
#ifndef OPENSSL_NO_SRTP
if(SSL_IS_DTLS(s) && SSL_get_srtp_profiles(s))
{
int el;
ssl_add_clienthello_use_srtp_ext(s, 0, &el, 0);
if((limit - ret - 4 - el) < 0) return NULL;
s2n(TLSEXT_TYPE_use_srtp,ret);
s2n(el,ret);
if(ssl_add_clienthello_use_srtp_ext(s, ret, &el, el))
{
SSLerr(SSL_F_SSL_ADD_CLIENTHELLO_TLSEXT, ERR_R_INTERNAL_ERROR);
return NULL;
}
ret += el;
}
#endif
/* Add padding to workaround bugs in F5 terminators.
* See https://tools.ietf.org/html/draft-agl-tls-padding-03
*
* NB: because this code works out the length of all existing
* extensions it MUST always appear last.
*/
if (s->options & SSL_OP_TLSEXT_PADDING)
{
int hlen = ret - (unsigned char *)s->init_buf->data;
/* The code in s23_clnt.c to build ClientHello messages
* includes the 5-byte record header in the buffer, while
* the code in s3_clnt.c does not.
*/
if (s->state == SSL23_ST_CW_CLNT_HELLO_A)
hlen -= 5;
if (hlen > 0xff && hlen < 0x200)
{
hlen = 0x200 - hlen;
if (hlen >= 4)
hlen -= 4;
else
hlen = 0;
s2n(TLSEXT_TYPE_padding, ret);
s2n(hlen, ret);
memset(ret, 0, hlen);
ret += hlen;
}
}
if ((extdatalen = ret-orig-2)== 0)
return orig;
s2n(extdatalen, orig);
return ret;
}
| 17,258 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: GahpServer::Reaper(Service *,int pid,int status)
{
/* This should be much better.... for now, if our Gahp Server
goes away for any reason, we EXCEPT. */
GahpServer *dead_server = NULL;
GahpServer *next_server = NULL;
GahpServersById.startIterations();
while ( GahpServersById.iterate( next_server ) != 0 ) {
if ( pid == next_server->m_gahp_pid ) {
dead_server = next_server;
break;
}
}
std::string buf;
sprintf( buf, "Gahp Server (pid=%d) ", pid );
if( WIFSIGNALED(status) ) {
sprintf_cat( buf, "died due to %s",
daemonCore->GetExceptionString(status) );
} else {
sprintf_cat( buf, "exited with status %d", WEXITSTATUS(status) );
}
if ( dead_server ) {
sprintf_cat( buf, " unexpectedly" );
EXCEPT( buf.c_str() );
} else {
sprintf_cat( buf, "\n" );
dprintf( D_ALWAYS, buf.c_str() );
}
}
Commit Message:
CWE ID: CWE-134 | GahpServer::Reaper(Service *,int pid,int status)
{
/* This should be much better.... for now, if our Gahp Server
goes away for any reason, we EXCEPT. */
GahpServer *dead_server = NULL;
GahpServer *next_server = NULL;
GahpServersById.startIterations();
while ( GahpServersById.iterate( next_server ) != 0 ) {
if ( pid == next_server->m_gahp_pid ) {
dead_server = next_server;
break;
}
}
std::string buf;
sprintf( buf, "Gahp Server (pid=%d) ", pid );
if( WIFSIGNALED(status) ) {
sprintf_cat( buf, "died due to %s",
daemonCore->GetExceptionString(status) );
} else {
sprintf_cat( buf, "exited with status %d", WEXITSTATUS(status) );
}
if ( dead_server ) {
sprintf_cat( buf, " unexpectedly" );
EXCEPT( "%s", buf.c_str() );
} else {
sprintf_cat( buf, "\n" );
dprintf( D_ALWAYS, "%s", buf.c_str() );
}
}
| 20,347 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int main(void)
{
FILE *f;
char *tmpname;
f = xfmkstemp(&tmpname, NULL);
unlink(tmpname);
free(tmpname);
fclose(f);
return EXIT_FAILURE;
}
Commit Message: chsh, chfn, vipw: fix filenames collision
The utils when compiled WITHOUT libuser then mkostemp()ing
"/etc/%s.XXXXXX" where the filename prefix is argv[0] basename.
An attacker could repeatedly execute the util with modified argv[0]
and after many many attempts mkostemp() may generate suffix which
makes sense. The result maybe temporary file with name like rc.status
ld.so.preload or krb5.keytab, etc.
Note that distros usually use libuser based ch{sh,fn} or stuff from
shadow-utils.
It's probably very minor security bug.
Addresses: CVE-2015-5224
Signed-off-by: Karel Zak <kzak@redhat.com>
CWE ID: CWE-264 | int main(void)
{
FILE *f;
char *tmpname;
f = xfmkstemp(&tmpname, NULL, "test");
unlink(tmpname);
free(tmpname);
fclose(f);
return EXIT_FAILURE;
}
| 4,727 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void WebContentsImpl::AttachInterstitialPage(
InterstitialPageImpl* interstitial_page) {
DCHECK(interstitial_page);
render_manager_.set_interstitial_page(interstitial_page);
FOR_EACH_OBSERVER(WebContentsObserver, observers_,
DidAttachInterstitialPage());
}
Commit Message: Cancel JavaScript dialogs when an interstitial appears.
BUG=295695
TEST=See bug for repro steps.
Review URL: https://chromiumcodereview.appspot.com/24360011
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@225026 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void WebContentsImpl::AttachInterstitialPage(
InterstitialPageImpl* interstitial_page) {
DCHECK(interstitial_page);
render_manager_.set_interstitial_page(interstitial_page);
// Cancel any visible dialogs so that they don't interfere with the
// interstitial.
if (dialog_manager_)
dialog_manager_->CancelActiveAndPendingDialogs(this);
FOR_EACH_OBSERVER(WebContentsObserver, observers_,
DidAttachInterstitialPage());
}
| 28,252 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
{
down_write(&tty->termios_rwsem);
tty->termios.c_line = num;
up_write(&tty->termios_rwsem);
}
Commit Message: tty: Prevent ldisc drivers from re-using stale tty fields
Line discipline drivers may mistakenly misuse ldisc-related fields
when initializing. For example, a failure to initialize tty->receive_room
in the N_GIGASET_M101 line discipline was recently found and fixed [1].
Now, the N_X25 line discipline has been discovered accessing the previous
line discipline's already-freed private data [2].
Harden the ldisc interface against misuse by initializing revelant
tty fields before instancing the new line discipline.
[1]
commit fd98e9419d8d622a4de91f76b306af6aa627aa9c
Author: Tilman Schmidt <tilman@imap.cc>
Date: Tue Jul 14 00:37:13 2015 +0200
isdn/gigaset: reset tty->receive_room when attaching ser_gigaset
[2] Report from Sasha Levin <sasha.levin@oracle.com>
[ 634.336761] ==================================================================
[ 634.338226] BUG: KASAN: use-after-free in x25_asy_open_tty+0x13d/0x490 at addr ffff8800a743efd0
[ 634.339558] Read of size 4 by task syzkaller_execu/8981
[ 634.340359] =============================================================================
[ 634.341598] BUG kmalloc-512 (Not tainted): kasan: bad access detected
...
[ 634.405018] Call Trace:
[ 634.405277] dump_stack (lib/dump_stack.c:52)
[ 634.405775] print_trailer (mm/slub.c:655)
[ 634.406361] object_err (mm/slub.c:662)
[ 634.406824] kasan_report_error (mm/kasan/report.c:138 mm/kasan/report.c:236)
[ 634.409581] __asan_report_load4_noabort (mm/kasan/report.c:279)
[ 634.411355] x25_asy_open_tty (drivers/net/wan/x25_asy.c:559 (discriminator 1))
[ 634.413997] tty_ldisc_open.isra.2 (drivers/tty/tty_ldisc.c:447)
[ 634.414549] tty_set_ldisc (drivers/tty/tty_ldisc.c:567)
[ 634.415057] tty_ioctl (drivers/tty/tty_io.c:2646 drivers/tty/tty_io.c:2879)
[ 634.423524] do_vfs_ioctl (fs/ioctl.c:43 fs/ioctl.c:607)
[ 634.427491] SyS_ioctl (fs/ioctl.c:622 fs/ioctl.c:613)
[ 634.427945] entry_SYSCALL_64_fastpath (arch/x86/entry/entry_64.S:188)
Cc: Tilman Schmidt <tilman@imap.cc>
Cc: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Peter Hurley <peter@hurleysoftware.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
CWE ID: CWE-200 | static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
{
down_write(&tty->termios_rwsem);
tty->termios.c_line = num;
up_write(&tty->termios_rwsem);
tty->disc_data = NULL;
tty->receive_room = 0;
}
| 13,972 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void WillDispatchTabUpdatedEvent(WebContents* contents,
Profile* profile,
const Extension* extension,
ListValue* event_args) {
DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue(
contents, extension);
}
Commit Message: Do not pass URLs in onUpdated events to extensions unless they have the
"tabs" permission.
BUG=168442
Review URL: https://chromiumcodereview.appspot.com/11824004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@176406 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-264 | static void WillDispatchTabUpdatedEvent(WebContents* contents,
static void WillDispatchTabUpdatedEvent(
WebContents* contents,
const DictionaryValue* changed_properties,
Profile* profile,
const Extension* extension,
ListValue* event_args) {
// Overwrite the second argument with the appropriate properties dictionary,
// depending on extension permissions.
DictionaryValue* properties_value = changed_properties->DeepCopy();
ExtensionTabUtil::ScrubTabValueForExtension(contents, extension,
properties_value);
event_args->Set(1, properties_value);
// Overwrite the third arg with our tab value as seen by this extension.
DictionaryValue* tab_value = ExtensionTabUtil::CreateTabValue(
contents, extension);
}
| 13,475 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: my_object_rec_arrays (MyObject *obj, GPtrArray *in, GPtrArray **ret, GError **error)
{
char **strs;
GArray *ints;
guint v_UINT;
if (in->len != 2)
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid array len");
return FALSE;
}
strs = g_ptr_array_index (in, 0);
if (!*strs || strcmp (*strs, "foo"))
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid string 0");
return FALSE;
}
strs++;
if (!*strs || strcmp (*strs, "bar"))
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid string 1");
return FALSE;
}
strs++;
if (*strs)
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid string array len in pos 0");
return FALSE;
}
strs = g_ptr_array_index (in, 1);
if (!*strs || strcmp (*strs, "baz"))
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid string 0");
return FALSE;
}
strs++;
if (!*strs || strcmp (*strs, "whee"))
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid string 1");
return FALSE;
}
strs++;
if (!*strs || strcmp (*strs, "moo"))
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid string 2");
return FALSE;
}
strs++;
if (*strs)
{
g_set_error (error,
MY_OBJECT_ERROR,
MY_OBJECT_ERROR_FOO,
"invalid string array len in pos 1");
return FALSE;
}
*ret = g_ptr_array_new ();
ints = g_array_new (TRUE, TRUE, sizeof (guint));
v_UINT = 10;
g_array_append_val (ints, v_UINT);
v_UINT = 42;
g_array_append_val (ints, v_UINT);
v_UINT = 27;
g_array_append_val (ints, v_UINT);
g_ptr_array_add (*ret, ints);
ints = g_array_new (TRUE, TRUE, sizeof (guint));
v_UINT = 30;
g_array_append_val (ints, v_UINT);
g_ptr_array_add (*ret, ints);
return TRUE;
}
Commit Message:
CWE ID: CWE-264 | my_object_rec_arrays (MyObject *obj, GPtrArray *in, GPtrArray **ret, GError **error)
| 11,363 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *done_key)
{
static DEFINE_SPINLOCK(lock);
unsigned long flags;
spin_lock_irqsave(&lock, flags);
if (*done) {
spin_unlock_irqrestore(&lock, flags);
return false;
}
get_random_bytes(buf, nbytes);
*done = true;
spin_unlock_irqrestore(&lock, flags);
__net_random_once_disable_jump(done_key);
return true;
}
Commit Message: net: avoid dependency of net_get_random_once on nop patching
net_get_random_once depends on the static keys infrastructure to patch up
the branch to the slow path during boot. This was realized by abusing the
static keys api and defining a new initializer to not enable the call
site while still indicating that the branch point should get patched
up. This was needed to have the fast path considered likely by gcc.
The static key initialization during boot up normally walks through all
the registered keys and either patches in ideal nops or enables the jump
site but omitted that step on x86 if ideal nops where already placed at
static_key branch points. Thus net_get_random_once branches not always
became active.
This patch switches net_get_random_once to the ordinary static_key
api and thus places the kernel fast path in the - by gcc considered -
unlikely path. Microbenchmarks on Intel and AMD x86-64 showed that
the unlikely path actually beats the likely path in terms of cycle cost
and that different nop patterns did not make much difference, thus this
switch should not be noticeable.
Fixes: a48e42920ff38b ("net: introduce new macro net_get_random_once")
Reported-by: Tuomas Räsänen <tuomasjjrasanen@tjjr.fi>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: CWE-200 | bool __net_get_random_once(void *buf, int nbytes, bool *done,
struct static_key *once_key)
{
static DEFINE_SPINLOCK(lock);
unsigned long flags;
spin_lock_irqsave(&lock, flags);
if (*done) {
spin_unlock_irqrestore(&lock, flags);
return false;
}
get_random_bytes(buf, nbytes);
*done = true;
spin_unlock_irqrestore(&lock, flags);
__net_random_once_disable_jump(once_key);
return true;
}
| 2,124 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
int tmp;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
if ((*end != '\0') && !isspace(*end))
return -EINVAL;
uml_exitcode = tmp;
return count;
}
Commit Message: uml: check length in exitcode_proc_write()
We don't cap the size of buffer from the user so we could write past the
end of the array here. Only root can write to this file.
Reported-by: Nico Golde <nico@ngolde.de>
Reported-by: Fabian Yamaguchi <fabs@goesec.de>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
CWE ID: CWE-119 | static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
size_t size;
int tmp;
size = min(count, sizeof(buf));
if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
if ((*end != '\0') && !isspace(*end))
return -EINVAL;
uml_exitcode = tmp;
return count;
}
| 19,258 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*restrict p;
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < splice_geometry.x; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*restrict p;
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
image->columns,1,exception);
if ((y < 0) || (y >= (ssize_t) splice_image->rows))
continue;
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < splice_geometry.x; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
Commit Message: Fixed out of bounds error in SpliceImage.
CWE ID: CWE-125 | MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*restrict p;
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,p) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,splice_image,1,1)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*restrict p;
register ssize_t
x;
register Quantum
*restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
if (GetPixelReadMask(image,q) == 0)
{
SetPixelBackgoundColor(splice_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransposeImage)
#endif
proceed=SetImageProgress(image,SpliceImageTag,progress++,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
| 15,977 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
AVFrame* frame) {
VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
if (format == VideoFrame::UNKNOWN)
return AVERROR(EINVAL);
DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
format == VideoFrame::YV12J);
gfx::Size size(codec_context->width, codec_context->height);
int ret;
if ((ret = av_image_check_size(size.width(), size.height(), 0, NULL)) < 0)
return ret;
gfx::Size natural_size;
if (codec_context->sample_aspect_ratio.num > 0) {
natural_size = GetNaturalSize(size,
codec_context->sample_aspect_ratio.num,
codec_context->sample_aspect_ratio.den);
} else {
natural_size = config_.natural_size();
}
if (!VideoFrame::IsValidConfig(format, size, gfx::Rect(size), natural_size))
return AVERROR(EINVAL);
scoped_refptr<VideoFrame> video_frame =
frame_pool_.CreateFrame(format, size, gfx::Rect(size),
natural_size, kNoTimestamp());
for (int i = 0; i < 3; i++) {
frame->base[i] = video_frame->data(i);
frame->data[i] = video_frame->data(i);
frame->linesize[i] = video_frame->stride(i);
}
frame->opaque = NULL;
video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
frame->type = FF_BUFFER_TYPE_USER;
frame->width = codec_context->width;
frame->height = codec_context->height;
frame->format = codec_context->pix_fmt;
return 0;
}
Commit Message: Replicate FFmpeg's video frame allocation strategy.
This should avoid accidental overreads and overwrites due to our
VideoFrame's not being as large as FFmpeg expects.
BUG=368980
TEST=new regression test
Review URL: https://codereview.chromium.org/270193002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@268831 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | int FFmpegVideoDecoder::GetVideoBuffer(AVCodecContext* codec_context,
AVFrame* frame) {
VideoFrame::Format format = PixelFormatToVideoFormat(codec_context->pix_fmt);
if (format == VideoFrame::UNKNOWN)
return AVERROR(EINVAL);
DCHECK(format == VideoFrame::YV12 || format == VideoFrame::YV16 ||
format == VideoFrame::YV12J);
gfx::Size size(codec_context->width, codec_context->height);
const int ret = av_image_check_size(size.width(), size.height(), 0, NULL);
if (ret < 0)
return ret;
gfx::Size natural_size;
if (codec_context->sample_aspect_ratio.num > 0) {
natural_size = GetNaturalSize(size,
codec_context->sample_aspect_ratio.num,
codec_context->sample_aspect_ratio.den);
} else {
natural_size = config_.natural_size();
}
// FFmpeg has specific requirements on the allocation size of the frame. The
// following logic replicates FFmpeg's allocation strategy to ensure buffers
// are not overread / overwritten. See ff_init_buffer_info() for details.
//
// When lowres is non-zero, dimensions should be divided by 2^(lowres), but
// since we don't use this, just DCHECK that it's zero.
DCHECK_EQ(codec_context->lowres, 0);
gfx::Size coded_size(std::max(size.width(), codec_context->coded_width),
std::max(size.height(), codec_context->coded_height));
if (!VideoFrame::IsValidConfig(
format, coded_size, gfx::Rect(size), natural_size))
return AVERROR(EINVAL);
scoped_refptr<VideoFrame> video_frame = frame_pool_.CreateFrame(
format, coded_size, gfx::Rect(size), natural_size, kNoTimestamp());
for (int i = 0; i < 3; i++) {
frame->base[i] = video_frame->data(i);
frame->data[i] = video_frame->data(i);
frame->linesize[i] = video_frame->stride(i);
}
frame->opaque = NULL;
video_frame.swap(reinterpret_cast<VideoFrame**>(&frame->opaque));
frame->type = FF_BUFFER_TYPE_USER;
frame->width = coded_size.width();
frame->height = coded_size.height();
frame->format = codec_context->pix_fmt;
return 0;
}
| 11,364 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: frag6_print(netdissect_options *ndo, register const u_char *bp, register const u_char *bp2)
{
register const struct ip6_frag *dp;
register const struct ip6_hdr *ip6;
dp = (const struct ip6_frag *)bp;
ip6 = (const struct ip6_hdr *)bp2;
ND_TCHECK(dp->ip6f_offlg);
if (ndo->ndo_vflag) {
ND_PRINT((ndo, "frag (0x%08x:%d|%ld)",
EXTRACT_32BITS(&dp->ip6f_ident),
EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK,
sizeof(struct ip6_hdr) + EXTRACT_16BITS(&ip6->ip6_plen) -
(long)(bp - bp2) - sizeof(struct ip6_frag)));
} else {
ND_PRINT((ndo, "frag (%d|%ld)",
EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK,
sizeof(struct ip6_hdr) + EXTRACT_16BITS(&ip6->ip6_plen) -
(long)(bp - bp2) - sizeof(struct ip6_frag)));
}
/* it is meaningless to decode non-first fragment */
if ((EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK) != 0)
return -1;
else
{
ND_PRINT((ndo, " "));
return sizeof(struct ip6_frag);
}
trunc:
ND_PRINT((ndo, "[|frag]"));
return -1;
}
Commit Message: CVE-2017-13031/Check for the presence of the entire IPv6 fragment header.
This fixes a buffer over-read discovered by Bhargava Shastry,
SecT/TU Berlin.
Add a test using the capture file supplied by the reporter(s), modified
so the capture file won't be rejected as an invalid capture.
Clean up some whitespace in tests/TESTLIST while we're at it.
CWE ID: CWE-125 | frag6_print(netdissect_options *ndo, register const u_char *bp, register const u_char *bp2)
{
register const struct ip6_frag *dp;
register const struct ip6_hdr *ip6;
dp = (const struct ip6_frag *)bp;
ip6 = (const struct ip6_hdr *)bp2;
ND_TCHECK(*dp);
if (ndo->ndo_vflag) {
ND_PRINT((ndo, "frag (0x%08x:%d|%ld)",
EXTRACT_32BITS(&dp->ip6f_ident),
EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK,
sizeof(struct ip6_hdr) + EXTRACT_16BITS(&ip6->ip6_plen) -
(long)(bp - bp2) - sizeof(struct ip6_frag)));
} else {
ND_PRINT((ndo, "frag (%d|%ld)",
EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK,
sizeof(struct ip6_hdr) + EXTRACT_16BITS(&ip6->ip6_plen) -
(long)(bp - bp2) - sizeof(struct ip6_frag)));
}
/* it is meaningless to decode non-first fragment */
if ((EXTRACT_16BITS(&dp->ip6f_offlg) & IP6F_OFF_MASK) != 0)
return -1;
else
{
ND_PRINT((ndo, " "));
return sizeof(struct ip6_frag);
}
trunc:
ND_PRINT((ndo, "[|frag]"));
return -1;
}
| 372 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int _snd_timer_stop(struct snd_timer_instance * timeri,
int keep_flag, int event)
{
struct snd_timer *timer;
unsigned long flags;
if (snd_BUG_ON(!timeri))
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
if (!keep_flag) {
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
spin_unlock_irqrestore(&slave_active_lock, flags);
}
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
if (!keep_flag)
timeri->flags &=
~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
snd_timer_notify1(timeri, event);
return 0;
}
Commit Message: ALSA: timer: Harden slave timer list handling
A slave timer instance might be still accessible in a racy way while
operating the master instance as it lacks of locking. Since the
master operation is mostly protected with timer->lock, we should cope
with it while changing the slave instance, too. Also, some linked
lists (active_list and ack_list) of slave instances aren't unlinked
immediately at stopping or closing, and this may lead to unexpected
accesses.
This patch tries to address these issues. It adds spin lock of
timer->lock (either from master or slave, which is equivalent) in a
few places. For avoiding a deadlock, we ensure that the global
slave_active_lock is always locked at first before each timer lock.
Also, ack and active_list of slave instances are properly unlinked at
snd_timer_stop() and snd_timer_close().
Last but not least, remove the superfluous call of _snd_timer_stop()
at removing slave links. This is a noop, and calling it may confuse
readers wrt locking. Further cleanup will follow in a later patch.
Actually we've got reports of use-after-free by syzkaller fuzzer, and
this hopefully fixes these issues.
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
CWE ID: CWE-20 | static int _snd_timer_stop(struct snd_timer_instance * timeri,
int keep_flag, int event)
{
struct snd_timer *timer;
unsigned long flags;
if (snd_BUG_ON(!timeri))
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
if (!keep_flag) {
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
spin_unlock_irqrestore(&slave_active_lock, flags);
}
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
if (!keep_flag)
timeri->flags &=
~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
snd_timer_notify1(timeri, event);
return 0;
}
| 1,202 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: std::vector<FilePath> GDataCache::GetCachePaths(
const FilePath& cache_root_path) {
std::vector<FilePath> cache_paths;
cache_paths.push_back(cache_root_path.Append(kGDataCacheMetaDir));
cache_paths.push_back(cache_root_path.Append(kGDataCachePinnedDir));
cache_paths.push_back(cache_root_path.Append(kGDataCacheOutgoingDir));
cache_paths.push_back(cache_root_path.Append(kGDataCachePersistentDir));
cache_paths.push_back(cache_root_path.Append(kGDataCacheTmpDir));
cache_paths.push_back(cache_root_path.Append(kGDataCacheTmpDownloadsDir));
cache_paths.push_back(cache_root_path.Append(kGDataCacheTmpDocumentsDir));
return cache_paths;
}
Commit Message: Revert 144993 - gdata: Remove invalid files in the cache directories
Broke linux_chromeos_valgrind:
http://build.chromium.org/p/chromium.memory.fyi/builders/Chromium%20OS%20%28valgrind%29%285%29/builds/8628/steps/memory%20test%3A%20unit/logs/stdio
In theory, we shouldn't have any invalid files left in the
cache directories, but things can go wrong and invalid files
may be left if the device shuts down unexpectedly, for instance.
Besides, it's good to be defensive.
BUG=134862
TEST=added unit tests
Review URL: https://chromiumcodereview.appspot.com/10693020
TBR=satorux@chromium.org
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@145029 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | std::vector<FilePath> GDataCache::GetCachePaths(
| 8,045 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int modbus_reply(modbus_t *ctx, const uint8_t *req,
int req_length, modbus_mapping_t *mb_mapping)
{
int offset;
int slave;
int function;
uint16_t address;
uint8_t rsp[MAX_MESSAGE_LENGTH];
int rsp_length = 0;
sft_t sft;
if (ctx == NULL) {
errno = EINVAL;
return -1;
}
offset = ctx->backend->header_length;
slave = req[offset - 1];
function = req[offset];
address = (req[offset + 1] << 8) + req[offset + 2];
sft.slave = slave;
sft.function = function;
sft.t_id = ctx->backend->prepare_response_tid(req, &req_length);
/* Data are flushed on illegal number of values errors. */
switch (function) {
case MODBUS_FC_READ_COILS:
case MODBUS_FC_READ_DISCRETE_INPUTS: {
unsigned int is_input = (function == MODBUS_FC_READ_DISCRETE_INPUTS);
int start_bits = is_input ? mb_mapping->start_input_bits : mb_mapping->start_bits;
int nb_bits = is_input ? mb_mapping->nb_input_bits : mb_mapping->nb_bits;
uint8_t *tab_bits = is_input ? mb_mapping->tab_input_bits : mb_mapping->tab_bits;
const char * const name = is_input ? "read_input_bits" : "read_bits";
int nb = (req[offset + 3] << 8) + req[offset + 4];
/* The mapping can be shifted to reduce memory consumption and it
doesn't always start at address zero. */
int mapping_address = address - start_bits;
if (nb < 1 || MODBUS_MAX_READ_BITS < nb) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal nb of values %d in %s (max %d)\n",
nb, name, MODBUS_MAX_READ_BITS);
} else if (mapping_address < 0 || (mapping_address + nb) > nb_bits) {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in %s\n",
mapping_address < 0 ? address : address + nb, name);
} else {
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
rsp[rsp_length++] = (nb / 8) + ((nb % 8) ? 1 : 0);
rsp_length = response_io_status(tab_bits, mapping_address, nb,
rsp, rsp_length);
}
}
break;
case MODBUS_FC_READ_HOLDING_REGISTERS:
case MODBUS_FC_READ_INPUT_REGISTERS: {
unsigned int is_input = (function == MODBUS_FC_READ_INPUT_REGISTERS);
int start_registers = is_input ? mb_mapping->start_input_registers : mb_mapping->start_registers;
int nb_registers = is_input ? mb_mapping->nb_input_registers : mb_mapping->nb_registers;
uint16_t *tab_registers = is_input ? mb_mapping->tab_input_registers : mb_mapping->tab_registers;
const char * const name = is_input ? "read_input_registers" : "read_registers";
int nb = (req[offset + 3] << 8) + req[offset + 4];
/* The mapping can be shifted to reduce memory consumption and it
doesn't always start at address zero. */
int mapping_address = address - start_registers;
if (nb < 1 || MODBUS_MAX_READ_REGISTERS < nb) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal nb of values %d in %s (max %d)\n",
nb, name, MODBUS_MAX_READ_REGISTERS);
} else if (mapping_address < 0 || (mapping_address + nb) > nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in %s\n",
mapping_address < 0 ? address : address + nb, name);
} else {
int i;
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
rsp[rsp_length++] = nb << 1;
for (i = mapping_address; i < mapping_address + nb; i++) {
rsp[rsp_length++] = tab_registers[i] >> 8;
rsp[rsp_length++] = tab_registers[i] & 0xFF;
}
}
}
break;
case MODBUS_FC_WRITE_SINGLE_COIL: {
int mapping_address = address - mb_mapping->start_bits;
if (mapping_address < 0 || mapping_address >= mb_mapping->nb_bits) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_bit\n",
address);
} else {
int data = (req[offset + 3] << 8) + req[offset + 4];
if (data == 0xFF00 || data == 0x0) {
mb_mapping->tab_bits[mapping_address] = data ? ON : OFF;
memcpy(rsp, req, req_length);
rsp_length = req_length;
} else {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, FALSE,
"Illegal data value 0x%0X in write_bit request at address %0X\n",
data, address);
}
}
}
break;
case MODBUS_FC_WRITE_SINGLE_REGISTER: {
int mapping_address = address - mb_mapping->start_registers;
if (mapping_address < 0 || mapping_address >= mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_register\n",
address);
} else {
int data = (req[offset + 3] << 8) + req[offset + 4];
mb_mapping->tab_registers[mapping_address] = data;
memcpy(rsp, req, req_length);
rsp_length = req_length;
}
}
break;
case MODBUS_FC_WRITE_MULTIPLE_COILS: {
int nb = (req[offset + 3] << 8) + req[offset + 4];
int mapping_address = address - mb_mapping->start_bits;
if (nb < 1 || MODBUS_MAX_WRITE_BITS < nb) {
/* May be the indication has been truncated on reading because of
* invalid address (eg. nb is 0 but the request contains values to
* write) so it's necessary to flush. */
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal number of values %d in write_bits (max %d)\n",
nb, MODBUS_MAX_WRITE_BITS);
} else if (mapping_address < 0 ||
(mapping_address + nb) > mb_mapping->nb_bits) {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_bits\n",
mapping_address < 0 ? address : address + nb);
} else {
/* 6 = byte count */
modbus_set_bits_from_bytes(mb_mapping->tab_bits, mapping_address, nb,
&req[offset + 6]);
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
/* 4 to copy the bit address (2) and the quantity of bits */
memcpy(rsp + rsp_length, req + rsp_length, 4);
rsp_length += 4;
}
}
break;
case MODBUS_FC_WRITE_MULTIPLE_REGISTERS: {
int nb = (req[offset + 3] << 8) + req[offset + 4];
int mapping_address = address - mb_mapping->start_registers;
if (nb < 1 || MODBUS_MAX_WRITE_REGISTERS < nb) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal number of values %d in write_registers (max %d)\n",
nb, MODBUS_MAX_WRITE_REGISTERS);
} else if (mapping_address < 0 ||
(mapping_address + nb) > mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_registers\n",
mapping_address < 0 ? address : address + nb);
} else {
int i, j;
for (i = mapping_address, j = 6; i < mapping_address + nb; i++, j += 2) {
/* 6 and 7 = first value */
mb_mapping->tab_registers[i] =
(req[offset + j] << 8) + req[offset + j + 1];
}
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
/* 4 to copy the address (2) and the no. of registers */
memcpy(rsp + rsp_length, req + rsp_length, 4);
rsp_length += 4;
}
}
break;
case MODBUS_FC_REPORT_SLAVE_ID: {
int str_len;
int byte_count_pos;
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
/* Skip byte count for now */
byte_count_pos = rsp_length++;
rsp[rsp_length++] = _REPORT_SLAVE_ID;
/* Run indicator status to ON */
rsp[rsp_length++] = 0xFF;
/* LMB + length of LIBMODBUS_VERSION_STRING */
str_len = 3 + strlen(LIBMODBUS_VERSION_STRING);
memcpy(rsp + rsp_length, "LMB" LIBMODBUS_VERSION_STRING, str_len);
rsp_length += str_len;
rsp[byte_count_pos] = rsp_length - byte_count_pos - 1;
}
break;
case MODBUS_FC_READ_EXCEPTION_STATUS:
if (ctx->debug) {
fprintf(stderr, "FIXME Not implemented\n");
}
errno = ENOPROTOOPT;
return -1;
break;
case MODBUS_FC_MASK_WRITE_REGISTER: {
int mapping_address = address - mb_mapping->start_registers;
if (mapping_address < 0 || mapping_address >= mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_register\n",
address);
} else {
uint16_t data = mb_mapping->tab_registers[mapping_address];
uint16_t and = (req[offset + 3] << 8) + req[offset + 4];
uint16_t or = (req[offset + 5] << 8) + req[offset + 6];
data = (data & and) | (or & (~and));
mb_mapping->tab_registers[mapping_address] = data;
memcpy(rsp, req, req_length);
rsp_length = req_length;
}
}
break;
case MODBUS_FC_WRITE_AND_READ_REGISTERS: {
int nb = (req[offset + 3] << 8) + req[offset + 4];
uint16_t address_write = (req[offset + 5] << 8) + req[offset + 6];
int nb_write = (req[offset + 7] << 8) + req[offset + 8];
int nb_write_bytes = req[offset + 9];
int mapping_address = address - mb_mapping->start_registers;
int mapping_address_write = address_write - mb_mapping->start_registers;
if (nb_write < 1 || MODBUS_MAX_WR_WRITE_REGISTERS < nb_write ||
nb < 1 || MODBUS_MAX_WR_READ_REGISTERS < nb ||
nb_write_bytes != nb_write * 2) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal nb of values (W%d, R%d) in write_and_read_registers (max W%d, R%d)\n",
nb_write, nb, MODBUS_MAX_WR_WRITE_REGISTERS, MODBUS_MAX_WR_READ_REGISTERS);
} else if (mapping_address < 0 ||
(mapping_address + nb) > mb_mapping->nb_registers ||
mapping_address < 0 ||
(mapping_address_write + nb_write) > mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data read address 0x%0X or write address 0x%0X write_and_read_registers\n",
mapping_address < 0 ? address : address + nb,
mapping_address_write < 0 ? address_write : address_write + nb_write);
} else {
int i, j;
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
rsp[rsp_length++] = nb << 1;
/* Write first.
10 and 11 are the offset of the first values to write */
for (i = mapping_address_write, j = 10;
i < mapping_address_write + nb_write; i++, j += 2) {
mb_mapping->tab_registers[i] =
(req[offset + j] << 8) + req[offset + j + 1];
}
/* and read the data for the response */
for (i = mapping_address; i < mapping_address + nb; i++) {
rsp[rsp_length++] = mb_mapping->tab_registers[i] >> 8;
rsp[rsp_length++] = mb_mapping->tab_registers[i] & 0xFF;
}
}
}
break;
default:
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_FUNCTION, rsp, TRUE,
"Unknown Modbus function code: 0x%0X\n", function);
break;
}
/* Suppress any responses when the request was a broadcast */
return (ctx->backend->backend_type == _MODBUS_BACKEND_TYPE_RTU &&
slave == MODBUS_BROADCAST_ADDRESS) ? 0 : send_msg(ctx, rsp, rsp_length);
}
Commit Message: Fix VD-1301 and VD-1302 vulnerabilities
This patch was contributed by Maor Vermucht and Or Peles from
VDOO Connected Trust.
CWE ID: CWE-125 | int modbus_reply(modbus_t *ctx, const uint8_t *req,
int req_length, modbus_mapping_t *mb_mapping)
{
int offset;
int slave;
int function;
uint16_t address;
uint8_t rsp[MAX_MESSAGE_LENGTH];
int rsp_length = 0;
sft_t sft;
if (ctx == NULL) {
errno = EINVAL;
return -1;
}
offset = ctx->backend->header_length;
slave = req[offset - 1];
function = req[offset];
address = (req[offset + 1] << 8) + req[offset + 2];
sft.slave = slave;
sft.function = function;
sft.t_id = ctx->backend->prepare_response_tid(req, &req_length);
/* Data are flushed on illegal number of values errors. */
switch (function) {
case MODBUS_FC_READ_COILS:
case MODBUS_FC_READ_DISCRETE_INPUTS: {
unsigned int is_input = (function == MODBUS_FC_READ_DISCRETE_INPUTS);
int start_bits = is_input ? mb_mapping->start_input_bits : mb_mapping->start_bits;
int nb_bits = is_input ? mb_mapping->nb_input_bits : mb_mapping->nb_bits;
uint8_t *tab_bits = is_input ? mb_mapping->tab_input_bits : mb_mapping->tab_bits;
const char * const name = is_input ? "read_input_bits" : "read_bits";
int nb = (req[offset + 3] << 8) + req[offset + 4];
/* The mapping can be shifted to reduce memory consumption and it
doesn't always start at address zero. */
int mapping_address = address - start_bits;
if (nb < 1 || MODBUS_MAX_READ_BITS < nb) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal nb of values %d in %s (max %d)\n",
nb, name, MODBUS_MAX_READ_BITS);
} else if (mapping_address < 0 || (mapping_address + nb) > nb_bits) {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in %s\n",
mapping_address < 0 ? address : address + nb, name);
} else {
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
rsp[rsp_length++] = (nb / 8) + ((nb % 8) ? 1 : 0);
rsp_length = response_io_status(tab_bits, mapping_address, nb,
rsp, rsp_length);
}
}
break;
case MODBUS_FC_READ_HOLDING_REGISTERS:
case MODBUS_FC_READ_INPUT_REGISTERS: {
unsigned int is_input = (function == MODBUS_FC_READ_INPUT_REGISTERS);
int start_registers = is_input ? mb_mapping->start_input_registers : mb_mapping->start_registers;
int nb_registers = is_input ? mb_mapping->nb_input_registers : mb_mapping->nb_registers;
uint16_t *tab_registers = is_input ? mb_mapping->tab_input_registers : mb_mapping->tab_registers;
const char * const name = is_input ? "read_input_registers" : "read_registers";
int nb = (req[offset + 3] << 8) + req[offset + 4];
/* The mapping can be shifted to reduce memory consumption and it
doesn't always start at address zero. */
int mapping_address = address - start_registers;
if (nb < 1 || MODBUS_MAX_READ_REGISTERS < nb) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal nb of values %d in %s (max %d)\n",
nb, name, MODBUS_MAX_READ_REGISTERS);
} else if (mapping_address < 0 || (mapping_address + nb) > nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in %s\n",
mapping_address < 0 ? address : address + nb, name);
} else {
int i;
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
rsp[rsp_length++] = nb << 1;
for (i = mapping_address; i < mapping_address + nb; i++) {
rsp[rsp_length++] = tab_registers[i] >> 8;
rsp[rsp_length++] = tab_registers[i] & 0xFF;
}
}
}
break;
case MODBUS_FC_WRITE_SINGLE_COIL: {
int mapping_address = address - mb_mapping->start_bits;
if (mapping_address < 0 || mapping_address >= mb_mapping->nb_bits) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_bit\n",
address);
} else {
int data = (req[offset + 3] << 8) + req[offset + 4];
if (data == 0xFF00 || data == 0x0) {
mb_mapping->tab_bits[mapping_address] = data ? ON : OFF;
memcpy(rsp, req, req_length);
rsp_length = req_length;
} else {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, FALSE,
"Illegal data value 0x%0X in write_bit request at address %0X\n",
data, address);
}
}
}
break;
case MODBUS_FC_WRITE_SINGLE_REGISTER: {
int mapping_address = address - mb_mapping->start_registers;
if (mapping_address < 0 || mapping_address >= mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_register\n",
address);
} else {
int data = (req[offset + 3] << 8) + req[offset + 4];
mb_mapping->tab_registers[mapping_address] = data;
memcpy(rsp, req, req_length);
rsp_length = req_length;
}
}
break;
case MODBUS_FC_WRITE_MULTIPLE_COILS: {
int nb = (req[offset + 3] << 8) + req[offset + 4];
int nb_bits = req[offset + 5];
int mapping_address = address - mb_mapping->start_bits;
if (nb < 1 || MODBUS_MAX_WRITE_BITS < nb || nb_bits * 8 < nb) {
/* May be the indication has been truncated on reading because of
* invalid address (eg. nb is 0 but the request contains values to
* write) so it's necessary to flush. */
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal number of values %d in write_bits (max %d)\n",
nb, MODBUS_MAX_WRITE_BITS);
} else if (mapping_address < 0 ||
(mapping_address + nb) > mb_mapping->nb_bits) {
rsp_length = response_exception(
ctx, &sft,
MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_bits\n",
mapping_address < 0 ? address : address + nb);
} else {
/* 6 = byte count */
modbus_set_bits_from_bytes(mb_mapping->tab_bits, mapping_address, nb,
&req[offset + 6]);
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
/* 4 to copy the bit address (2) and the quantity of bits */
memcpy(rsp + rsp_length, req + rsp_length, 4);
rsp_length += 4;
}
}
break;
case MODBUS_FC_WRITE_MULTIPLE_REGISTERS: {
int nb = (req[offset + 3] << 8) + req[offset + 4];
int nb_bytes = req[offset + 5];
int mapping_address = address - mb_mapping->start_registers;
if (nb < 1 || MODBUS_MAX_WRITE_REGISTERS < nb || nb_bytes * 8 < nb) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal number of values %d in write_registers (max %d)\n",
nb, MODBUS_MAX_WRITE_REGISTERS);
} else if (mapping_address < 0 ||
(mapping_address + nb) > mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_registers\n",
mapping_address < 0 ? address : address + nb);
} else {
int i, j;
for (i = mapping_address, j = 6; i < mapping_address + nb; i++, j += 2) {
/* 6 and 7 = first value */
mb_mapping->tab_registers[i] =
(req[offset + j] << 8) + req[offset + j + 1];
}
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
/* 4 to copy the address (2) and the no. of registers */
memcpy(rsp + rsp_length, req + rsp_length, 4);
rsp_length += 4;
}
}
break;
case MODBUS_FC_REPORT_SLAVE_ID: {
int str_len;
int byte_count_pos;
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
/* Skip byte count for now */
byte_count_pos = rsp_length++;
rsp[rsp_length++] = _REPORT_SLAVE_ID;
/* Run indicator status to ON */
rsp[rsp_length++] = 0xFF;
/* LMB + length of LIBMODBUS_VERSION_STRING */
str_len = 3 + strlen(LIBMODBUS_VERSION_STRING);
memcpy(rsp + rsp_length, "LMB" LIBMODBUS_VERSION_STRING, str_len);
rsp_length += str_len;
rsp[byte_count_pos] = rsp_length - byte_count_pos - 1;
}
break;
case MODBUS_FC_READ_EXCEPTION_STATUS:
if (ctx->debug) {
fprintf(stderr, "FIXME Not implemented\n");
}
errno = ENOPROTOOPT;
return -1;
break;
case MODBUS_FC_MASK_WRITE_REGISTER: {
int mapping_address = address - mb_mapping->start_registers;
if (mapping_address < 0 || mapping_address >= mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data address 0x%0X in write_register\n",
address);
} else {
uint16_t data = mb_mapping->tab_registers[mapping_address];
uint16_t and = (req[offset + 3] << 8) + req[offset + 4];
uint16_t or = (req[offset + 5] << 8) + req[offset + 6];
data = (data & and) | (or & (~and));
mb_mapping->tab_registers[mapping_address] = data;
memcpy(rsp, req, req_length);
rsp_length = req_length;
}
}
break;
case MODBUS_FC_WRITE_AND_READ_REGISTERS: {
int nb = (req[offset + 3] << 8) + req[offset + 4];
uint16_t address_write = (req[offset + 5] << 8) + req[offset + 6];
int nb_write = (req[offset + 7] << 8) + req[offset + 8];
int nb_write_bytes = req[offset + 9];
int mapping_address = address - mb_mapping->start_registers;
int mapping_address_write = address_write - mb_mapping->start_registers;
if (nb_write < 1 || MODBUS_MAX_WR_WRITE_REGISTERS < nb_write ||
nb < 1 || MODBUS_MAX_WR_READ_REGISTERS < nb ||
nb_write_bytes != nb_write * 2) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_VALUE, rsp, TRUE,
"Illegal nb of values (W%d, R%d) in write_and_read_registers (max W%d, R%d)\n",
nb_write, nb, MODBUS_MAX_WR_WRITE_REGISTERS, MODBUS_MAX_WR_READ_REGISTERS);
} else if (mapping_address < 0 ||
(mapping_address + nb) > mb_mapping->nb_registers ||
mapping_address < 0 ||
(mapping_address_write + nb_write) > mb_mapping->nb_registers) {
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_DATA_ADDRESS, rsp, FALSE,
"Illegal data read address 0x%0X or write address 0x%0X write_and_read_registers\n",
mapping_address < 0 ? address : address + nb,
mapping_address_write < 0 ? address_write : address_write + nb_write);
} else {
int i, j;
rsp_length = ctx->backend->build_response_basis(&sft, rsp);
rsp[rsp_length++] = nb << 1;
/* Write first.
10 and 11 are the offset of the first values to write */
for (i = mapping_address_write, j = 10;
i < mapping_address_write + nb_write; i++, j += 2) {
mb_mapping->tab_registers[i] =
(req[offset + j] << 8) + req[offset + j + 1];
}
/* and read the data for the response */
for (i = mapping_address; i < mapping_address + nb; i++) {
rsp[rsp_length++] = mb_mapping->tab_registers[i] >> 8;
rsp[rsp_length++] = mb_mapping->tab_registers[i] & 0xFF;
}
}
}
break;
default:
rsp_length = response_exception(
ctx, &sft, MODBUS_EXCEPTION_ILLEGAL_FUNCTION, rsp, TRUE,
"Unknown Modbus function code: 0x%0X\n", function);
break;
}
/* Suppress any responses when the request was a broadcast */
return (ctx->backend->backend_type == _MODBUS_BACKEND_TYPE_RTU &&
slave == MODBUS_BROADCAST_ADDRESS) ? 0 : send_msg(ctx, rsp, rsp_length);
}
| 10,617 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void RenderViewImpl::OnSwapOut(const ViewMsg_SwapOut_Params& params) {
OnStop();
if (!is_swapped_out_) {
SyncNavigationState();
webview()->dispatchUnloadEvent();
SetSwappedOut(true);
WebURLRequest request(GURL("about:swappedout"));
webview()->mainFrame()->loadRequest(request);
}
Send(new ViewHostMsg_SwapOut_ACK(routing_id_, params));
}
Commit Message: Use a new scheme for swapping out RenderViews.
BUG=118664
TEST=none
Review URL: http://codereview.chromium.org/9720004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@127986 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: | void RenderViewImpl::OnSwapOut(const ViewMsg_SwapOut_Params& params) {
OnStop();
if (!is_swapped_out_) {
SyncNavigationState();
webview()->dispatchUnloadEvent();
SetSwappedOut(true);
// to chrome::kSwappedOutURL. If that happens to be to the page we had been
GURL swappedOutURL(chrome::kSwappedOutURL);
WebURLRequest request(swappedOutURL);
webview()->mainFrame()->loadRequest(request);
}
Send(new ViewHostMsg_SwapOut_ACK(routing_id_, params));
}
| 12,544 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: spnego_gss_inquire_sec_context_by_oid(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
ret = gss_inquire_sec_context_by_oid(minor_status,
context_handle,
desired_object,
data_set);
return (ret);
}
Commit Message: Fix SPNEGO context aliasing bugs [CVE-2015-2695]
The SPNEGO mechanism currently replaces its context handle with the
mechanism context handle upon establishment, under the assumption that
most GSS functions are only called after context establishment. This
assumption is incorrect, and can lead to aliasing violations for some
programs. Maintain the SPNEGO context structure after context
establishment and refer to it in all GSS methods. Add initiate and
opened flags to the SPNEGO context structure for use in
gss_inquire_context() prior to context establishment.
CVE-2015-2695:
In MIT krb5 1.5 and later, applications which call
gss_inquire_context() on a partially-established SPNEGO context can
cause the GSS-API library to read from a pointer using the wrong type,
generally causing a process crash. This bug may go unnoticed, because
the most common SPNEGO authentication scenario establishes the context
after just one call to gss_accept_sec_context(). Java server
applications using the native JGSS provider are vulnerable to this
bug. A carefully crafted SPNEGO packet might allow the
gss_inquire_context() call to succeed with attacker-determined
results, but applications should not make access control decisions
based on gss_inquire_context() results prior to context establishment.
CVSSv2 Vector: AV:N/AC:M/Au:N/C:N/I:N/A:C/E:POC/RL:OF/RC:C
[ghudson@mit.edu: several bugfixes, style changes, and edge-case
behavior changes; commit message and CVE description]
ticket: 8244
target_version: 1.14
tags: pullup
CWE ID: CWE-18 | spnego_gss_inquire_sec_context_by_oid(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = (spnego_gss_ctx_id_t)context_handle;
/* There are no SPNEGO-specific OIDs for this function. */
if (sc->ctx_handle == GSS_C_NO_CONTEXT)
return (GSS_S_UNAVAILABLE);
ret = gss_inquire_sec_context_by_oid(minor_status,
sc->ctx_handle,
desired_object,
data_set);
return (ret);
}
| 17,488 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void DownloadCoreServiceImpl::SetDownloadManagerDelegateForTesting(
std::unique_ptr<ChromeDownloadManagerDelegate> new_delegate) {
manager_delegate_.swap(new_delegate);
DownloadManager* dm = BrowserContext::GetDownloadManager(profile_);
dm->SetDelegate(manager_delegate_.get());
manager_delegate_->SetDownloadManager(dm);
if (new_delegate)
new_delegate->Shutdown();
}
Commit Message: Don't downcast DownloadManagerDelegate to ChromeDownloadManagerDelegate.
DownloadManager has public SetDelegate method and tests and or other subsystems
can install their own implementations of the delegate.
Bug: 805905
Change-Id: Iecf1e0aceada0e1048bed1e2d2ceb29ca64295b8
TBR: tests updated to follow the API change.
Reviewed-on: https://chromium-review.googlesource.com/894702
Reviewed-by: David Vallet <dvallet@chromium.org>
Reviewed-by: Min Qin <qinmin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#533515}
CWE ID: CWE-125 | void DownloadCoreServiceImpl::SetDownloadManagerDelegateForTesting(
std::unique_ptr<ChromeDownloadManagerDelegate> new_delegate) {
manager_delegate_.swap(new_delegate);
DownloadManager* dm = BrowserContext::GetDownloadManager(profile_);
dm->SetDelegate(manager_delegate_.get());
if (manager_delegate_)
manager_delegate_->SetDownloadManager(dm);
download_manager_created_ = !!manager_delegate_;
if (new_delegate)
new_delegate->Shutdown();
}
| 4,024 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info,
Image *image)
{
char
filename[MaxTextExtent];
FILE
*file;
Image
*huffman_image;
ImageInfo
*write_info;
int
unique_file;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count;
TIFF
*tiff;
toff_t
*byte_count,
strip_size;
unsigned char
*buffer;
/*
Write image as CCITT Group4 TIFF image to a temporary file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
huffman_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (huffman_image == (Image *) NULL)
{
(void) CloseBlob(image);
return(MagickFalse);
}
huffman_image->endian=MSBEndian;
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
ThrowFileException(&image->exception,FileOpenError,
"UnableToCreateTemporaryFile",filename);
return(MagickFalse);
}
(void) FormatLocaleString(huffman_image->filename,MaxTextExtent,"tiff:%s",
filename);
(void) SetImageType(huffman_image,BilevelType);
write_info=CloneImageInfo((ImageInfo *) NULL);
SetImageInfoFile(write_info,file);
(void) SetImageType(image,BilevelType);
(void) SetImageDepth(image,1);
write_info->compression=Group4Compression;
write_info->type=BilevelType;
(void) SetImageOption(write_info,"quantum:polarity","min-is-white");
status=WriteTIFFImage(write_info,huffman_image);
(void) fflush(file);
write_info=DestroyImageInfo(write_info);
if (status == MagickFalse)
{
InheritException(&image->exception,&huffman_image->exception);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
return(MagickFalse);
}
tiff=TIFFOpen(filename,"rb");
if (tiff == (TIFF *) NULL)
{
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
ThrowFileException(&image->exception,FileOpenError,"UnableToOpenFile",
image_info->filename);
return(MagickFalse);
}
/*
Allocate raw strip buffer.
*/
if (TIFFGetField(tiff,TIFFTAG_STRIPBYTECOUNTS,&byte_count) != 1)
{
TIFFClose(tiff);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
return(MagickFalse);
}
strip_size=byte_count[0];
for (i=1; i < (ssize_t) TIFFNumberOfStrips(tiff); i++)
if (byte_count[i] > strip_size)
strip_size=byte_count[i];
buffer=(unsigned char *) AcquireQuantumMemory((size_t) strip_size,
sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
{
TIFFClose(tiff);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image_info->filename);
}
/*
Compress runlength encoded to 2D Huffman pixels.
*/
for (i=0; i < (ssize_t) TIFFNumberOfStrips(tiff); i++)
{
count=(ssize_t) TIFFReadRawStrip(tiff,(uint32) i,buffer,strip_size);
if (WriteBlob(image,(size_t) count,buffer) != count)
status=MagickFalse;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
TIFFClose(tiff);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
(void) CloseBlob(image);
return(status);
}
Commit Message: Prevent buffer overflow in SIXEL, PDB, MAP, and CALS coders (bug report from Donghai Zhu)
CWE ID: CWE-119 | static MagickBooleanType WriteGROUP4Image(const ImageInfo *image_info,
Image *image)
{
char
filename[MaxTextExtent];
FILE
*file;
Image
*huffman_image;
ImageInfo
*write_info;
int
unique_file;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count;
TIFF
*tiff;
toff_t
*byte_count,
strip_size;
unsigned char
*buffer;
/*
Write image as CCITT Group4 TIFF image to a temporary file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
huffman_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (huffman_image == (Image *) NULL)
{
(void) CloseBlob(image);
return(MagickFalse);
}
huffman_image->endian=MSBEndian;
file=(FILE *) NULL;
unique_file=AcquireUniqueFileResource(filename);
if (unique_file != -1)
file=fdopen(unique_file,"wb");
if ((unique_file == -1) || (file == (FILE *) NULL))
{
ThrowFileException(&image->exception,FileOpenError,
"UnableToCreateTemporaryFile",filename);
return(MagickFalse);
}
(void) FormatLocaleString(huffman_image->filename,MaxTextExtent,"tiff:%s",
filename);
(void) SetImageType(huffman_image,BilevelType);
write_info=CloneImageInfo((ImageInfo *) NULL);
SetImageInfoFile(write_info,file);
(void) SetImageDepth(image,1);
(void) SetImageType(image,BilevelType);
write_info->compression=Group4Compression;
write_info->type=BilevelType;
(void) SetImageOption(write_info,"quantum:polarity","min-is-white");
status=WriteTIFFImage(write_info,huffman_image);
(void) fflush(file);
write_info=DestroyImageInfo(write_info);
if (status == MagickFalse)
{
InheritException(&image->exception,&huffman_image->exception);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
return(MagickFalse);
}
tiff=TIFFOpen(filename,"rb");
if (tiff == (TIFF *) NULL)
{
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
ThrowFileException(&image->exception,FileOpenError,"UnableToOpenFile",
image_info->filename);
return(MagickFalse);
}
/*
Allocate raw strip buffer.
*/
if (TIFFGetField(tiff,TIFFTAG_STRIPBYTECOUNTS,&byte_count) != 1)
{
TIFFClose(tiff);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
return(MagickFalse);
}
strip_size=byte_count[0];
for (i=1; i < (ssize_t) TIFFNumberOfStrips(tiff); i++)
if (byte_count[i] > strip_size)
strip_size=byte_count[i];
buffer=(unsigned char *) AcquireQuantumMemory((size_t) strip_size,
sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
{
TIFFClose(tiff);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image_info->filename);
}
/*
Compress runlength encoded to 2D Huffman pixels.
*/
for (i=0; i < (ssize_t) TIFFNumberOfStrips(tiff); i++)
{
count=(ssize_t) TIFFReadRawStrip(tiff,(uint32) i,buffer,strip_size);
if (WriteBlob(image,(size_t) count,buffer) != count)
status=MagickFalse;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
TIFFClose(tiff);
huffman_image=DestroyImage(huffman_image);
(void) fclose(file);
(void) RelinquishUniqueFileResource(filename);
(void) CloseBlob(image);
return(status);
}
| 28,381 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void Chapters::Display::ShallowCopy(Display& rhs) const
{
rhs.m_string = m_string;
rhs.m_language = m_language;
rhs.m_country = m_country;
}
Commit Message: libwebm: Pull from upstream
Rolling mkvparser from upstream. Primarily for fixing a bug on parsing
failures with certain Opus WebM files.
Upstream commit hash of this pull: 574045edd4ecbeb802ee3f1d214b5510269852ae
The diff is so huge because there were some style clean ups upstream.
But it was ensured that there were no breaking changes when the style
clean ups was done upstream.
Change-Id: Ib6e907175484b4b0ae1b55ab39522ea3188ad039
CWE ID: CWE-119 | void Chapters::Display::ShallowCopy(Display& rhs) const
| 12,557 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void UsbFindDevicesFunction::OnGetDevicesComplete(
const std::vector<scoped_refptr<UsbDevice>>& devices) {
result_.reset(new base::ListValue());
barrier_ = base::BarrierClosure(
devices.size(), base::Bind(&UsbFindDevicesFunction::OpenComplete, this));
for (const scoped_refptr<UsbDevice>& device : devices) {
if (device->vendor_id() != vendor_id_ ||
device->product_id() != product_id_) {
barrier_.Run();
} else {
device->OpenInterface(
interface_id_,
base::Bind(&UsbFindDevicesFunction::OnDeviceOpened, this));
}
}
}
Commit Message: Remove fallback when requesting a single USB interface.
This reverts commit 2d475d0ed37bf8f19385537ad31e361f1b21624b. The
permission broker now supports opening devices that are partially
claimed through the OpenPath method and RequestPathAccess will always
fail for these devices so the fallback path from RequestPathAccess to
OpenPath is always taken.
BUG=500057
Review URL: https://codereview.chromium.org/1227313003
Cr-Commit-Position: refs/heads/master@{#338354}
CWE ID: CWE-399 | void UsbFindDevicesFunction::OnGetDevicesComplete(
const std::vector<scoped_refptr<UsbDevice>>& devices) {
result_.reset(new base::ListValue());
barrier_ = base::BarrierClosure(
devices.size(), base::Bind(&UsbFindDevicesFunction::OpenComplete, this));
for (const scoped_refptr<UsbDevice>& device : devices) {
if (device->vendor_id() != vendor_id_ ||
device->product_id() != product_id_) {
barrier_.Run();
} else {
device->Open(base::Bind(&UsbFindDevicesFunction::OnDeviceOpened, this));
}
}
}
| 19,202 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int dex_loadcode(RBinFile *arch, RBinDexObj *bin) {
struct r_bin_t *rbin = arch->rbin;
int i;
int *methods = NULL;
int sym_count = 0;
if (!bin || bin->methods_list) {
return false;
}
bin->code_from = UT64_MAX;
bin->code_to = 0;
bin->methods_list = r_list_newf ((RListFree)free);
if (!bin->methods_list) {
return false;
}
bin->imports_list = r_list_newf ((RListFree)free);
if (!bin->imports_list) {
r_list_free (bin->methods_list);
return false;
}
bin->classes_list = r_list_newf ((RListFree)__r_bin_class_free);
if (!bin->classes_list) {
r_list_free (bin->methods_list);
r_list_free (bin->imports_list);
return false;
}
if (bin->header.method_size>bin->size) {
bin->header.method_size = 0;
return false;
}
/* WrapDown the header sizes to avoid huge allocations */
bin->header.method_size = R_MIN (bin->header.method_size, bin->size);
bin->header.class_size = R_MIN (bin->header.class_size, bin->size);
bin->header.strings_size = R_MIN (bin->header.strings_size, bin->size);
if (bin->header.strings_size > bin->size) {
eprintf ("Invalid strings size\n");
return false;
}
if (bin->classes) {
ut64 amount = sizeof (int) * bin->header.method_size;
if (amount > UT32_MAX || amount < bin->header.method_size) {
return false;
}
methods = calloc (1, amount + 1);
for (i = 0; i < bin->header.class_size; i++) {
char *super_name, *class_name;
struct dex_class_t *c = &bin->classes[i];
class_name = dex_class_name (bin, c);
super_name = dex_class_super_name (bin, c);
if (dexdump) {
rbin->cb_printf ("Class #%d -\n", i);
}
parse_class (arch, bin, c, i, methods, &sym_count);
free (class_name);
free (super_name);
}
}
if (methods) {
int import_count = 0;
int sym_count = bin->methods_list->length;
for (i = 0; i < bin->header.method_size; i++) {
int len = 0;
if (methods[i]) {
continue;
}
if (bin->methods[i].class_id > bin->header.types_size - 1) {
continue;
}
if (is_class_idx_in_code_classes(bin, bin->methods[i].class_id)) {
continue;
}
char *class_name = getstr (
bin, bin->types[bin->methods[i].class_id]
.descriptor_id);
if (!class_name) {
free (class_name);
continue;
}
len = strlen (class_name);
if (len < 1) {
continue;
}
class_name[len - 1] = 0; // remove last char ";"
char *method_name = dex_method_name (bin, i);
char *signature = dex_method_signature (bin, i);
if (method_name && *method_name) {
RBinImport *imp = R_NEW0 (RBinImport);
imp->name = r_str_newf ("%s.method.%s%s", class_name, method_name, signature);
imp->type = r_str_const ("FUNC");
imp->bind = r_str_const ("NONE");
imp->ordinal = import_count++;
r_list_append (bin->imports_list, imp);
RBinSymbol *sym = R_NEW0 (RBinSymbol);
sym->name = r_str_newf ("imp.%s", imp->name);
sym->type = r_str_const ("FUNC");
sym->bind = r_str_const ("NONE");
sym->paddr = sym->vaddr = bin->b->base + bin->header.method_offset + (sizeof (struct dex_method_t) * i) ;
sym->ordinal = sym_count++;
r_list_append (bin->methods_list, sym);
sdb_num_set (mdb, sdb_fmt (0, "method.%d", i), sym->paddr, 0);
}
free (method_name);
free (signature);
free (class_name);
}
free (methods);
}
return true;
}
Commit Message: fix #6857
CWE ID: CWE-125 | static int dex_loadcode(RBinFile *arch, RBinDexObj *bin) {
struct r_bin_t *rbin = arch->rbin;
int i;
int *methods = NULL;
int sym_count = 0;
if (!bin || bin->methods_list) {
return false;
}
bin->code_from = UT64_MAX;
bin->code_to = 0;
bin->methods_list = r_list_newf ((RListFree)free);
if (!bin->methods_list) {
return false;
}
bin->imports_list = r_list_newf ((RListFree)free);
if (!bin->imports_list) {
r_list_free (bin->methods_list);
return false;
}
bin->classes_list = r_list_newf ((RListFree)__r_bin_class_free);
if (!bin->classes_list) {
r_list_free (bin->methods_list);
r_list_free (bin->imports_list);
return false;
}
if (bin->header.method_size>bin->size) {
bin->header.method_size = 0;
return false;
}
/* WrapDown the header sizes to avoid huge allocations */
bin->header.method_size = R_MIN (bin->header.method_size, bin->size);
bin->header.class_size = R_MIN (bin->header.class_size, bin->size);
bin->header.strings_size = R_MIN (bin->header.strings_size, bin->size);
if (bin->header.strings_size > bin->size) {
eprintf ("Invalid strings size\n");
return false;
}
if (bin->classes) {
ut64 amount = sizeof (int) * bin->header.method_size;
if (amount > UT32_MAX || amount < bin->header.method_size) {
return false;
}
methods = calloc (1, amount + 1);
for (i = 0; i < bin->header.class_size; i++) {
char *super_name, *class_name;
struct dex_class_t *c = &bin->classes[i];
class_name = dex_class_name (bin, c);
super_name = dex_class_super_name (bin, c);
if (dexdump) {
rbin->cb_printf ("Class #%d -\n", i);
}
parse_class (arch, bin, c, i, methods, &sym_count);
free (class_name);
free (super_name);
}
}
if (methods) {
int import_count = 0;
int sym_count = bin->methods_list->length;
for (i = 0; i < bin->header.method_size; i++) {
int len = 0;
if (methods[i]) {
continue;
}
if (bin->methods[i].class_id > bin->header.types_size) {
continue;
}
if (is_class_idx_in_code_classes(bin, bin->methods[i].class_id)) {
continue;
}
char *class_name = getstr (
bin, bin->types[bin->methods[i].class_id]
.descriptor_id);
if (!class_name) {
free (class_name);
continue;
}
len = strlen (class_name);
if (len < 1) {
continue;
}
class_name[len - 1] = 0; // remove last char ";"
char *method_name = dex_method_name (bin, i);
char *signature = dex_method_signature (bin, i);
if (method_name && *method_name) {
RBinImport *imp = R_NEW0 (RBinImport);
imp->name = r_str_newf ("%s.method.%s%s", class_name, method_name, signature);
imp->type = r_str_const ("FUNC");
imp->bind = r_str_const ("NONE");
imp->ordinal = import_count++;
r_list_append (bin->imports_list, imp);
RBinSymbol *sym = R_NEW0 (RBinSymbol);
sym->name = r_str_newf ("imp.%s", imp->name);
sym->type = r_str_const ("FUNC");
sym->bind = r_str_const ("NONE");
sym->paddr = sym->vaddr = bin->b->base + bin->header.method_offset + (sizeof (struct dex_method_t) * i) ;
sym->ordinal = sym_count++;
r_list_append (bin->methods_list, sym);
sdb_num_set (mdb, sdb_fmt (0, "method.%d", i), sym->paddr, 0);
}
free (method_name);
free (signature);
free (class_name);
}
free (methods);
}
return true;
}
| 216 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2)
{
int32 r1, g1, b1, a1, r2, g2, b2, a2, mask;
float fltsize = Fltsize;
#define CLAMP(v) ( (v<(float)0.) ? 0 \
: (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \
: (v>(float)24.2) ? 2047 \
: LogK1*log(v*LogK2) + 0.5 )
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
wp += 3;
ip += 3;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
}
} else if (stride == 4) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
a2 = wp[3] = (uint16) CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
wp += 4;
ip += 4;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1;
}
} else {
ip += n - 1; /* point to last one */
wp += n - 1; /* point to last one */
n -= stride;
while (n > 0) {
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]);
wp[stride] -= wp[0];
wp[stride] &= mask;
wp--; ip--)
n -= stride;
}
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp--; ip--)
}
}
}
Commit Message: * tools/tiffcrop.c: fix various out-of-bounds write vulnerabilities
in heap or stack allocated buffers. Reported as MSVR 35093,
MSVR 35096 and MSVR 35097. Discovered by Axel Souchet and Vishal
Chauhan from the MSRC Vulnerabilities & Mitigations team.
* tools/tiff2pdf.c: fix out-of-bounds write vulnerabilities in
heap allocate buffer in t2p_process_jpeg_strip(). Reported as MSVR
35098. Discovered by Axel Souchet and Vishal Chauhan from the MSRC
Vulnerabilities & Mitigations team.
* libtiff/tif_pixarlog.c: fix out-of-bounds write vulnerabilities
in heap allocated buffers. Reported as MSVR 35094. Discovered by
Axel Souchet and Vishal Chauhan from the MSRC Vulnerabilities &
Mitigations team.
* libtiff/tif_write.c: fix issue in error code path of TIFFFlushData1()
that didn't reset the tif_rawcc and tif_rawcp members. I'm not
completely sure if that could happen in practice outside of the odd
behaviour of t2p_seekproc() of tiff2pdf). The report points that a
better fix could be to check the return value of TIFFFlushData1() in
places where it isn't done currently, but it seems this patch is enough.
Reported as MSVR 35095. Discovered by Axel Souchet & Vishal Chauhan &
Suha Can from the MSRC Vulnerabilities & Mitigations team.
CWE ID: CWE-787 | horizontalDifferenceF(float *ip, int n, int stride, uint16 *wp, uint16 *FromLT2)
{
int32 r1, g1, b1, a1, r2, g2, b2, a2, mask;
float fltsize = Fltsize;
#define CLAMP(v) ( (v<(float)0.) ? 0 \
: (v<(float)2.) ? FromLT2[(int)(v*fltsize)] \
: (v>(float)24.2) ? 2047 \
: LogK1*log(v*LogK2) + 0.5 )
mask = CODE_MASK;
if (n >= stride) {
if (stride == 3) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
n -= 3;
while (n > 0) {
n -= 3;
wp += 3;
ip += 3;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
}
} else if (stride == 4) {
r2 = wp[0] = (uint16) CLAMP(ip[0]);
g2 = wp[1] = (uint16) CLAMP(ip[1]);
b2 = wp[2] = (uint16) CLAMP(ip[2]);
a2 = wp[3] = (uint16) CLAMP(ip[3]);
n -= 4;
while (n > 0) {
n -= 4;
wp += 4;
ip += 4;
r1 = (int32) CLAMP(ip[0]); wp[0] = (uint16)((r1-r2) & mask); r2 = r1;
g1 = (int32) CLAMP(ip[1]); wp[1] = (uint16)((g1-g2) & mask); g2 = g1;
b1 = (int32) CLAMP(ip[2]); wp[2] = (uint16)((b1-b2) & mask); b2 = b1;
a1 = (int32) CLAMP(ip[3]); wp[3] = (uint16)((a1-a2) & mask); a2 = a1;
}
} else {
REPEAT(stride, wp[0] = (uint16) CLAMP(ip[0]); wp++; ip++)
n -= stride;
while (n > 0) {
REPEAT(stride,
wp[0] = (uint16)(((int32)CLAMP(ip[0])-(int32)CLAMP(ip[-stride])) & mask);
wp++; ip++)
n -= stride;
}
}
}
}
| 27,657 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void balloon_process(struct work_struct *work)
{
enum bp_state state = BP_DONE;
long credit;
do {
mutex_lock(&balloon_mutex);
credit = current_credit();
if (credit > 0) {
if (balloon_is_inflated())
state = increase_reservation(credit);
else
state = reserve_additional_memory();
}
if (credit < 0)
state = decrease_reservation(-credit, GFP_BALLOON);
state = update_schedule(state);
mutex_unlock(&balloon_mutex);
cond_resched();
} while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
}
Commit Message: xen: let alloc_xenballooned_pages() fail if not enough memory free
commit a1078e821b605813b63bf6bca414a85f804d5c66 upstream.
Instead of trying to allocate pages with GFP_USER in
add_ballooned_pages() check the available free memory via
si_mem_available(). GFP_USER is far less limiting memory exhaustion
than the test via si_mem_available().
This will avoid dom0 running out of memory due to excessive foreign
page mappings especially on ARM and on x86 in PVH mode, as those don't
have a pre-ballooned area which can be used for foreign mappings.
As the normal ballooning suffers from the same problem don't balloon
down more than si_mem_available() pages in one iteration. At the same
time limit the default maximum number of retries.
This is part of XSA-300.
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
CWE ID: CWE-400 | static void balloon_process(struct work_struct *work)
{
enum bp_state state = BP_DONE;
long credit;
do {
mutex_lock(&balloon_mutex);
credit = current_credit();
if (credit > 0) {
if (balloon_is_inflated())
state = increase_reservation(credit);
else
state = reserve_additional_memory();
}
if (credit < 0) {
long n_pages;
n_pages = min(-credit, si_mem_available());
state = decrease_reservation(n_pages, GFP_BALLOON);
if (state == BP_DONE && n_pages != -credit &&
n_pages < totalreserve_pages)
state = BP_EAGAIN;
}
state = update_schedule(state);
mutex_unlock(&balloon_mutex);
cond_resched();
} while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
}
| 1,704 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void WorkerProcessLauncher::Core::StopWorker() {
DCHECK(caller_task_runner_->BelongsToCurrentThread());
if (launch_success_timer_->IsRunning()) {
launch_success_timer_->Stop();
launch_backoff_.InformOfRequest(false);
}
self_ = this;
ipc_enabled_ = false;
if (process_watcher_.GetWatchedObject() != NULL) {
launcher_delegate_->KillProcess(CONTROL_C_EXIT);
return;
}
DCHECK(process_watcher_.GetWatchedObject() == NULL);
ipc_error_timer_->Stop();
process_exit_event_.Close();
if (stopping_) {
ipc_error_timer_.reset();
launch_timer_.reset();
self_ = NULL;
return;
}
self_ = NULL;
DWORD exit_code = launcher_delegate_->GetExitCode();
if (kMinPermanentErrorExitCode <= exit_code &&
exit_code <= kMaxPermanentErrorExitCode) {
worker_delegate_->OnPermanentError();
return;
}
launch_timer_->Start(FROM_HERE, launch_backoff_.GetTimeUntilRelease(),
this, &Core::LaunchWorker);
}
Commit Message: Validate and report peer's PID to WorkerProcessIpcDelegate so it will be able to duplicate handles to and from the worker process.
As a side effect WorkerProcessLauncher::Delegate is now responsible for retrieving the client's PID and deciding whether a launch failed due to a permanent error condition.
BUG=134694
Review URL: https://chromiumcodereview.appspot.com/11143025
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@162778 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-399 | void WorkerProcessLauncher::Core::StopWorker() {
DCHECK(caller_task_runner_->BelongsToCurrentThread());
// Keep the object alive in case one of delegates decides to delete |this|.
scoped_refptr<Core> self = this;
if (launch_success_timer_->IsRunning()) {
launch_success_timer_->Stop();
launch_backoff_.InformOfRequest(false);
}
ipc_enabled_ = false;
if (process_watcher_.GetWatchedObject() != NULL) {
launcher_delegate_->KillProcess(CONTROL_C_EXIT);
// Wait until the process is actually stopped if the caller keeps
// a reference to |this|. Otherwise terminate everything right now - there
// won't be a second chance.
if (!stopping_)
return;
process_watcher_.StopWatching();
}
ipc_error_timer_->Stop();
process_exit_event_.Close();
if (stopping_) {
ipc_error_timer_.reset();
launch_timer_.reset();
return;
}
if (launcher_delegate_->IsPermanentError(launch_backoff_.failure_count())) {
if (!stopping_)
worker_delegate_->OnPermanentError();
} else {
// Schedule the next attempt to launch the worker process.
launch_timer_->Start(FROM_HERE, launch_backoff_.GetTimeUntilRelease(),
this, &Core::LaunchWorker);
}
}
| 3,185 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: SPL_METHOD(FilesystemIterator, current)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) {
spl_filesystem_object_get_file_name(intern TSRMLS_CC);
RETURN_STRINGL(intern->file_name, intern->file_name_len, 1);
} else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) {
spl_filesystem_object_get_file_name(intern TSRMLS_CC);
spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value TSRMLS_CC);
} else {
RETURN_ZVAL(getThis(), 1, 0);
/*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/
}
}
Commit Message: Fix bug #72262 - do not overflow int
CWE ID: CWE-190 | SPL_METHOD(FilesystemIterator, current)
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(getThis() TSRMLS_CC);
if (zend_parse_parameters_none() == FAILURE) {
return;
}
if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_PATHNAME)) {
spl_filesystem_object_get_file_name(intern TSRMLS_CC);
RETURN_STRINGL(intern->file_name, intern->file_name_len, 1);
} else if (SPL_FILE_DIR_CURRENT(intern, SPL_FILE_DIR_CURRENT_AS_FILEINFO)) {
spl_filesystem_object_get_file_name(intern TSRMLS_CC);
spl_filesystem_object_create_type(0, intern, SPL_FS_INFO, NULL, return_value TSRMLS_CC);
} else {
RETURN_ZVAL(getThis(), 1, 0);
/*RETURN_STRING(intern->u.dir.entry.d_name, 1);*/
}
}
| 15,281 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if ((id == 0x000003ed) && (PSDQuantum(count) < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+PSDQuantum(count)+12,length-
(PSDQuantum(count)+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(PSDQuantum(count)+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
Commit Message: Added check for out of bounds read (https://github.com/ImageMagick/ImageMagick/issues/108).
CWE ID: CWE-125 | static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
| 1,889 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void AssertObserverCount(int added_count, int removed_count,
int changed_count) {
ASSERT_EQ(added_count, added_count_);
ASSERT_EQ(removed_count, removed_count_);
ASSERT_EQ(changed_count, changed_count_);
}
Commit Message: Add OVERRIDE to ui::TreeModelObserver overridden methods.
BUG=None
TEST=None
R=sky@chromium.org
Review URL: http://codereview.chromium.org/7046093
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88827 0039d316-1c4b-4281-b951-d872f2087c98
CWE ID: CWE-119 | void AssertObserverCount(int added_count, int removed_count,
void AssertObserverCount(int added_count,
int removed_count,
int changed_count) {
ASSERT_EQ(added_count, added_count_);
ASSERT_EQ(removed_count, removed_count_);
ASSERT_EQ(changed_count, changed_count_);
}
| 24,263 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: ZEND_API zend_op_array *compile_file(zend_file_handle *file_handle, int type TSRMLS_DC)
{
zend_lex_state original_lex_state;
zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array));
zend_op_array *original_active_op_array = CG(active_op_array);
zend_op_array *retval=NULL;
int compiler_result;
zend_bool compilation_successful=0;
znode retval_znode;
zend_bool original_in_compilation = CG(in_compilation);
retval_znode.op_type = IS_CONST;
retval_znode.u.constant.type = IS_LONG;
retval_znode.u.constant.value.lval = 1;
Z_UNSET_ISREF(retval_znode.u.constant);
Z_SET_REFCOUNT(retval_znode.u.constant, 1);
zend_save_lexical_state(&original_lex_state TSRMLS_CC);
retval = op_array; /* success oriented */
if (open_file_for_scanning(file_handle TSRMLS_CC)==FAILURE) {
if (type==ZEND_REQUIRE) {
zend_message_dispatcher(ZMSG_FAILED_REQUIRE_FOPEN, file_handle->filename TSRMLS_CC);
zend_bailout();
} else {
zend_message_dispatcher(ZMSG_FAILED_INCLUDE_FOPEN, file_handle->filename TSRMLS_CC);
}
compilation_successful=0;
} else {
init_op_array(op_array, ZEND_USER_FUNCTION, INITIAL_OP_ARRAY_SIZE TSRMLS_CC);
CG(in_compilation) = 1;
CG(active_op_array) = op_array;
zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context)));
zend_init_compiler_context(TSRMLS_C);
compiler_result = zendparse(TSRMLS_C);
zend_do_return(&retval_znode, 0 TSRMLS_CC);
CG(in_compilation) = original_in_compilation;
if (compiler_result==1) { /* parser error */
zend_bailout();
}
compilation_successful=1;
}
if (retval) {
CG(active_op_array) = original_active_op_array;
if (compilation_successful) {
pass_two(op_array TSRMLS_CC);
zend_release_labels(0 TSRMLS_CC);
} else {
efree(op_array);
retval = NULL;
}
}
zend_restore_lexical_state(&original_lex_state TSRMLS_CC);
return retval;
}
Commit Message: fix bug #64660 - yyparse can return 2, not only 1
CWE ID: CWE-20 | ZEND_API zend_op_array *compile_file(zend_file_handle *file_handle, int type TSRMLS_DC)
{
zend_lex_state original_lex_state;
zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array));
zend_op_array *original_active_op_array = CG(active_op_array);
zend_op_array *retval=NULL;
int compiler_result;
zend_bool compilation_successful=0;
znode retval_znode;
zend_bool original_in_compilation = CG(in_compilation);
retval_znode.op_type = IS_CONST;
retval_znode.u.constant.type = IS_LONG;
retval_znode.u.constant.value.lval = 1;
Z_UNSET_ISREF(retval_znode.u.constant);
Z_SET_REFCOUNT(retval_znode.u.constant, 1);
zend_save_lexical_state(&original_lex_state TSRMLS_CC);
retval = op_array; /* success oriented */
if (open_file_for_scanning(file_handle TSRMLS_CC)==FAILURE) {
if (type==ZEND_REQUIRE) {
zend_message_dispatcher(ZMSG_FAILED_REQUIRE_FOPEN, file_handle->filename TSRMLS_CC);
zend_bailout();
} else {
zend_message_dispatcher(ZMSG_FAILED_INCLUDE_FOPEN, file_handle->filename TSRMLS_CC);
}
compilation_successful=0;
} else {
init_op_array(op_array, ZEND_USER_FUNCTION, INITIAL_OP_ARRAY_SIZE TSRMLS_CC);
CG(in_compilation) = 1;
CG(active_op_array) = op_array;
zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context)));
zend_init_compiler_context(TSRMLS_C);
compiler_result = zendparse(TSRMLS_C);
zend_do_return(&retval_znode, 0 TSRMLS_CC);
CG(in_compilation) = original_in_compilation;
if (compiler_result != 0) { /* parser error */
zend_bailout();
}
compilation_successful=1;
}
if (retval) {
CG(active_op_array) = original_active_op_array;
if (compilation_successful) {
pass_two(op_array TSRMLS_CC);
zend_release_labels(0 TSRMLS_CC);
} else {
efree(op_array);
retval = NULL;
}
}
zend_restore_lexical_state(&original_lex_state TSRMLS_CC);
return retval;
}
| 16,299 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void ChunkedUploadDataStream::AppendData(
const char* data, int data_len, bool is_done) {
DCHECK(!all_data_appended_);
DCHECK(data_len > 0 || is_done);
if (data_len > 0) {
DCHECK(data);
upload_data_.push_back(
base::MakeUnique<std::vector<char>>(data, data + data_len));
}
all_data_appended_ = is_done;
if (!read_buffer_.get())
return;
int result = ReadChunk(read_buffer_.get(), read_buffer_len_);
DCHECK_GE(result, 0);
read_buffer_ = NULL;
read_buffer_len_ = 0;
OnReadCompleted(result);
}
Commit Message: Replace base::MakeUnique with std::make_unique in net/.
base/memory/ptr_util.h includes will be cleaned up later.
Bug: 755727
Change-Id: Ibaf46f05c9b02b76f9a91e819984b087a8c0d434
Reviewed-on: https://chromium-review.googlesource.com/627300
Commit-Queue: Jeremy Roman <jbroman@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Bence Béky <bnc@chromium.org>
Cr-Commit-Position: refs/heads/master@{#498123}
CWE ID: CWE-311 | void ChunkedUploadDataStream::AppendData(
const char* data, int data_len, bool is_done) {
DCHECK(!all_data_appended_);
DCHECK(data_len > 0 || is_done);
if (data_len > 0) {
DCHECK(data);
upload_data_.push_back(
std::make_unique<std::vector<char>>(data, data + data_len));
}
all_data_appended_ = is_done;
if (!read_buffer_.get())
return;
int result = ReadChunk(read_buffer_.get(), read_buffer_len_);
DCHECK_GE(result, 0);
read_buffer_ = NULL;
read_buffer_len_ = 0;
OnReadCompleted(result);
}
| 5,229 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: bool WebMediaPlayerImpl::DidGetOpaqueResponseFromServiceWorker() const {
if (data_source_)
return data_source_->DidGetOpaqueResponseViaServiceWorker();
return false;
}
Commit Message: Simplify "WouldTaintOrigin" concept in media/blink
Currently WebMediaPlayer has three predicates:
- DidGetOpaqueResponseFromServiceWorker
- HasSingleSecurityOrigin
- DidPassCORSAccessCheck
. These are used to determine whether the response body is available
for scripts. They are known to be confusing, and actually
MediaElementAudioSourceHandler::WouldTaintOrigin misuses them.
This CL merges the three predicates to one, WouldTaintOrigin, to remove
the confusion. Now the "response type" concept is available and we
don't need a custom CORS check, so this CL removes
BaseAudioContext::WouldTaintOrigin. This CL also renames
URLData::has_opaque_data_ and its (direct and indirect) data accessors
to match the spec.
Bug: 849942, 875153
Change-Id: I6acf50169d7445c4ff614e80ac606f79ee577d2a
Reviewed-on: https://chromium-review.googlesource.com/c/1238098
Reviewed-by: Fredrik Hubinette <hubbe@chromium.org>
Reviewed-by: Kinuko Yasuda <kinuko@chromium.org>
Reviewed-by: Raymond Toy <rtoy@chromium.org>
Commit-Queue: Yutaka Hirano <yhirano@chromium.org>
Cr-Commit-Position: refs/heads/master@{#598258}
CWE ID: CWE-732 | bool WebMediaPlayerImpl::DidGetOpaqueResponseFromServiceWorker() const {
| 5,660 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: OMX_ERRORTYPE SoftAVCEncoder::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
int32_t indexFull = index;
switch (indexFull) {
case OMX_IndexParamVideoBitrate:
{
OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(OMX_VIDEO_PARAM_BITRATETYPE *) params;
if (bitRate->nPortIndex != 1 ||
bitRate->eControlRate != OMX_Video_ControlRateVariable) {
return OMX_ErrorUndefined;
}
mBitrate = bitRate->nTargetBitrate;
return OMX_ErrorNone;
}
case OMX_IndexParamVideoAvc:
{
OMX_VIDEO_PARAM_AVCTYPE *avcType =
(OMX_VIDEO_PARAM_AVCTYPE *)params;
if (avcType->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
if (avcType->eProfile != OMX_VIDEO_AVCProfileBaseline ||
avcType->nRefFrames != 1 ||
avcType->nBFrames != 0 ||
avcType->bUseHadamard != OMX_TRUE ||
(avcType->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) != 0 ||
avcType->nRefIdx10ActiveMinus1 != 0 ||
avcType->nRefIdx11ActiveMinus1 != 0 ||
avcType->bWeightedPPrediction != OMX_FALSE ||
avcType->bEntropyCodingCABAC != OMX_FALSE ||
avcType->bconstIpred != OMX_FALSE ||
avcType->bDirect8x8Inference != OMX_FALSE ||
avcType->bDirectSpatialTemporal != OMX_FALSE ||
avcType->nCabacInitIdc != 0) {
return OMX_ErrorUndefined;
}
if (OK != ConvertOmxAvcLevelToAvcSpecLevel(avcType->eLevel, &mAVCEncLevel)) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
default:
return SoftVideoEncoderOMXComponent::internalSetParameter(index, params);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftAVCEncoder::internalSetParameter(
OMX_INDEXTYPE index, const OMX_PTR params) {
int32_t indexFull = index;
switch (indexFull) {
case OMX_IndexParamVideoBitrate:
{
OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(OMX_VIDEO_PARAM_BITRATETYPE *) params;
if (!isValidOMXParam(bitRate)) {
return OMX_ErrorBadParameter;
}
if (bitRate->nPortIndex != 1 ||
bitRate->eControlRate != OMX_Video_ControlRateVariable) {
return OMX_ErrorUndefined;
}
mBitrate = bitRate->nTargetBitrate;
return OMX_ErrorNone;
}
case OMX_IndexParamVideoAvc:
{
OMX_VIDEO_PARAM_AVCTYPE *avcType =
(OMX_VIDEO_PARAM_AVCTYPE *)params;
if (!isValidOMXParam(avcType)) {
return OMX_ErrorBadParameter;
}
if (avcType->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
if (avcType->eProfile != OMX_VIDEO_AVCProfileBaseline ||
avcType->nRefFrames != 1 ||
avcType->nBFrames != 0 ||
avcType->bUseHadamard != OMX_TRUE ||
(avcType->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) != 0 ||
avcType->nRefIdx10ActiveMinus1 != 0 ||
avcType->nRefIdx11ActiveMinus1 != 0 ||
avcType->bWeightedPPrediction != OMX_FALSE ||
avcType->bEntropyCodingCABAC != OMX_FALSE ||
avcType->bconstIpred != OMX_FALSE ||
avcType->bDirect8x8Inference != OMX_FALSE ||
avcType->bDirectSpatialTemporal != OMX_FALSE ||
avcType->nCabacInitIdc != 0) {
return OMX_ErrorUndefined;
}
if (OK != ConvertOmxAvcLevelToAvcSpecLevel(avcType->eLevel, &mAVCEncLevel)) {
return OMX_ErrorUndefined;
}
return OMX_ErrorNone;
}
default:
return SoftVideoEncoderOMXComponent::internalSetParameter(index, params);
}
}
| 17,876 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: OMX_ERRORTYPE SoftAAC2::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamAudioAac:
{
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
if (aacParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
aacParams->nBitRate = 0;
aacParams->nAudioBandWidth = 0;
aacParams->nAACtools = 0;
aacParams->nAACERtools = 0;
aacParams->eAACProfile = OMX_AUDIO_AACObjectMain;
aacParams->eAACStreamFormat =
mIsADTS
? OMX_AUDIO_AACStreamFormatMP4ADTS
: OMX_AUDIO_AACStreamFormatMP4FF;
aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
if (!isConfigured()) {
aacParams->nChannels = 1;
aacParams->nSampleRate = 44100;
aacParams->nFrameLength = 0;
} else {
aacParams->nChannels = mStreamInfo->numChannels;
aacParams->nSampleRate = mStreamInfo->sampleRate;
aacParams->nFrameLength = mStreamInfo->frameSize;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
pcmParams->eChannelMapping[2] = OMX_AUDIO_ChannelCF;
pcmParams->eChannelMapping[3] = OMX_AUDIO_ChannelLFE;
pcmParams->eChannelMapping[4] = OMX_AUDIO_ChannelLS;
pcmParams->eChannelMapping[5] = OMX_AUDIO_ChannelRS;
if (!isConfigured()) {
pcmParams->nChannels = 1;
pcmParams->nSamplingRate = 44100;
} else {
pcmParams->nChannels = mStreamInfo->numChannels;
pcmParams->nSamplingRate = mStreamInfo->sampleRate;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
Commit Message: DO NOT MERGE Verify OMX buffer sizes prior to access
Bug: 27207275
Change-Id: I4412825d1ee233d993af0a67708bea54304ff62d
CWE ID: CWE-119 | OMX_ERRORTYPE SoftAAC2::internalGetParameter(
OMX_INDEXTYPE index, OMX_PTR params) {
switch (index) {
case OMX_IndexParamAudioAac:
{
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
if (!isValidOMXParam(aacParams)) {
return OMX_ErrorBadParameter;
}
if (aacParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
aacParams->nBitRate = 0;
aacParams->nAudioBandWidth = 0;
aacParams->nAACtools = 0;
aacParams->nAACERtools = 0;
aacParams->eAACProfile = OMX_AUDIO_AACObjectMain;
aacParams->eAACStreamFormat =
mIsADTS
? OMX_AUDIO_AACStreamFormatMP4ADTS
: OMX_AUDIO_AACStreamFormatMP4FF;
aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo;
if (!isConfigured()) {
aacParams->nChannels = 1;
aacParams->nSampleRate = 44100;
aacParams->nFrameLength = 0;
} else {
aacParams->nChannels = mStreamInfo->numChannels;
aacParams->nSampleRate = mStreamInfo->sampleRate;
aacParams->nFrameLength = mStreamInfo->frameSize;
}
return OMX_ErrorNone;
}
case OMX_IndexParamAudioPcm:
{
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
if (!isValidOMXParam(pcmParams)) {
return OMX_ErrorBadParameter;
}
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
pcmParams->eNumData = OMX_NumericalDataSigned;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
pcmParams->nBitPerSample = 16;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
pcmParams->eChannelMapping[2] = OMX_AUDIO_ChannelCF;
pcmParams->eChannelMapping[3] = OMX_AUDIO_ChannelLFE;
pcmParams->eChannelMapping[4] = OMX_AUDIO_ChannelLS;
pcmParams->eChannelMapping[5] = OMX_AUDIO_ChannelRS;
if (!isConfigured()) {
pcmParams->nChannels = 1;
pcmParams->nSamplingRate = 44100;
} else {
pcmParams->nChannels = mStreamInfo->numChannels;
pcmParams->nSamplingRate = mStreamInfo->sampleRate;
}
return OMX_ErrorNone;
}
default:
return SimpleSoftOMXComponent::internalGetParameter(index, params);
}
}
| 25,762 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
{
ext3_fsblk_t sb_block;
char *options = (char *) *data;
if (!options || strncmp(options, "sb=", 3) != 0)
return 1; /* Default location */
options += 3;
/*todo: use simple_strtoll with >32bit ext3 */
sb_block = simple_strtoul(options, &options, 0);
if (*options && *options != ',') {
ext3_msg(sb, "error: invalid sb specification: %s",
(char *) *data);
return 1;
}
if (*options == ',')
options++;
*data = (void *) options;
return sb_block;
}
Commit Message: ext3: Fix format string issues
ext3_msg() takes the printk prefix as the second parameter and the
format string as the third parameter. Two callers of ext3_msg omit the
prefix and pass the format string as the second parameter and the first
parameter to the format string as the third parameter. In both cases
this string comes from an arbitrary source. Which means the string may
contain format string characters, which will
lead to undefined and potentially harmful behavior.
The issue was introduced in commit 4cf46b67eb("ext3: Unify log messages
in ext3") and is fixed by this patch.
CC: stable@vger.kernel.org
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Jan Kara <jack@suse.cz>
CWE ID: CWE-20 | static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
{
ext3_fsblk_t sb_block;
char *options = (char *) *data;
if (!options || strncmp(options, "sb=", 3) != 0)
return 1; /* Default location */
options += 3;
/*todo: use simple_strtoll with >32bit ext3 */
sb_block = simple_strtoul(options, &options, 0);
if (*options && *options != ',') {
ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s",
(char *) *data);
return 1;
}
if (*options == ',')
options++;
*data = (void *) options;
return sb_block;
}
| 5,412 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void Editor::Transpose() {
if (!CanEdit())
//// TODO(yosin): We should move |Transpose()| into |ExecuteTranspose()| in
//// "EditorCommand.cpp"
return;
GetFrame().GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets();
const EphemeralRange& range = ComputeRangeForTranspose(GetFrame());
if (range.IsNull())
return;
const String& text = PlainText(range);
if (text.length() != 2)
return;
const String& transposed = text.Right(1) + text.Left(1);
if (DispatchBeforeInputInsertText(
EventTargetNodeForDocument(GetFrame().GetDocument()), transposed,
InputEvent::InputType::kInsertTranspose,
new StaticRangeVector(1, StaticRange::Create(range))) !=
DispatchEventResult::kNotCanceled)
return;
if (frame_->GetDocument()->GetFrame() != frame_)
return;
GetFrame().GetDocument()->UpdateStyleAndLayoutIgnorePendingStylesheets();
const EphemeralRange& new_range = ComputeRangeForTranspose(GetFrame());
if (new_range.IsNull())
return;
const String& new_text = PlainText(new_range);
if (new_text.length() != 2)
return;
const String& new_transposed = new_text.Right(1) + new_text.Left(1);
const SelectionInDOMTree& new_selection =
SelectionInDOMTree::Builder().SetBaseAndExtent(new_range).Build();
if (CreateVisibleSelection(new_selection) !=
GetFrame().Selection().ComputeVisibleSelectionInDOMTree())
GetFrame().Selection().SetSelection(new_selection);
ReplaceSelectionWithText(new_transposed, false, false,
InputEvent::InputType::kInsertTranspose);
}
Commit Message: Move Editor::Transpose() out of Editor class
This patch moves |Editor::Transpose()| out of |Editor| class as preparation of
expanding it into |ExecutTranspose()| in "EditorCommand.cpp" to make |Editor|
class simpler for improving code health.
Following patch will expand |Transpose()| into |ExecutTranspose()|.
Bug: 672405
Change-Id: Icde253623f31813d2b4517c4da7d4798bd5fadf6
Reviewed-on: https://chromium-review.googlesource.com/583880
Reviewed-by: Xiaocheng Hu <xiaochengh@chromium.org>
Commit-Queue: Yoshifumi Inoue <yosin@chromium.org>
Cr-Commit-Position: refs/heads/master@{#489518}
CWE ID: | void Editor::Transpose() {
//// TODO(yosin): We should move |Transpose()| into |ExecuteTranspose()| in
//// "EditorCommand.cpp"
void Transpose(LocalFrame& frame) {
Editor& editor = frame.GetEditor();
if (!editor.CanEdit())
return;
Document* const document = frame.GetDocument();
document->UpdateStyleAndLayoutIgnorePendingStylesheets();
const EphemeralRange& range = ComputeRangeForTranspose(frame);
if (range.IsNull())
return;
const String& text = PlainText(range);
if (text.length() != 2)
return;
const String& transposed = text.Right(1) + text.Left(1);
if (DispatchBeforeInputInsertText(
EventTargetNodeForDocument(document), transposed,
InputEvent::InputType::kInsertTranspose,
new StaticRangeVector(1, StaticRange::Create(range))) !=
DispatchEventResult::kNotCanceled)
return;
// 'beforeinput' event handler may destroy document->
if (frame.GetDocument() != document)
return;
document->UpdateStyleAndLayoutIgnorePendingStylesheets();
const EphemeralRange& new_range = ComputeRangeForTranspose(frame);
if (new_range.IsNull())
return;
const String& new_text = PlainText(new_range);
if (new_text.length() != 2)
return;
const String& new_transposed = new_text.Right(1) + new_text.Left(1);
const SelectionInDOMTree& new_selection =
SelectionInDOMTree::Builder().SetBaseAndExtent(new_range).Build();
if (CreateVisibleSelection(new_selection) !=
frame.Selection().ComputeVisibleSelectionInDOMTree())
frame.Selection().SetSelection(new_selection);
editor.ReplaceSelectionWithText(new_transposed, false, false,
InputEvent::InputType::kInsertTranspose);
}
| 7,912 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: find_referral_tgs(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request,
krb5_principal *krbtgt_princ)
{
krb5_error_code retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
char **realms = NULL, *hostname = NULL;
krb5_data srealm = request->server->realm;
if (!is_referral_req(kdc_active_realm, request))
goto cleanup;
hostname = data2string(krb5_princ_component(kdc_context,
request->server, 1));
if (hostname == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* If the hostname doesn't contain a '.', it's not a FQDN. */
if (strchr(hostname, '.') == NULL)
goto cleanup;
retval = krb5_get_host_realm(kdc_context, hostname, &realms);
if (retval) {
/* no match found */
kdc_err(kdc_context, retval, "unable to find realm of host");
goto cleanup;
}
/* Don't return a referral to the empty realm or the service realm. */
if (realms == NULL || realms[0] == '\0' ||
data_eq_string(srealm, realms[0])) {
retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto cleanup;
}
retval = krb5_build_principal(kdc_context, krbtgt_princ,
srealm.length, srealm.data,
"krbtgt", realms[0], (char *)0);
cleanup:
krb5_free_host_realm(kdc_context, realms);
free(hostname);
return retval;
}
Commit Message: KDC null deref due to referrals [CVE-2013-1417]
An authenticated remote client can cause a KDC to crash by making a
valid TGS-REQ to a KDC serving a realm with a single-component name.
The process_tgs_req() function dereferences a null pointer because an
unusual failure condition causes a helper function to return success.
While attempting to provide cross-realm referrals for host-based
service principals, the find_referral_tgs() function could return a
TGS principal for a zero-length realm name (indicating that the
hostname in the service principal has no known realm associated with
it).
Subsequently, the find_alternate_tgs() function would attempt to
construct a path to this empty-string realm, and return success along
with a null pointer in its output parameter. This happens because
krb5_walk_realm_tree() returns a list of length one when it attempts
to construct a transit path between a single-component realm and the
empty-string realm. This list causes a loop in find_alternate_tgs()
to iterate over zero elements, resulting in the unexpected output of a
null pointer, which process_tgs_req() proceeds to dereference because
there is no error condition.
Add an error condition to find_referral_tgs() when
krb5_get_host_realm() returns an empty realm name. Also add an error
condition to find_alternate_tgs() to handle the length-one output from
krb5_walk_realm_tree().
The vulnerable configuration is not likely to arise in practice.
(Realm names that have a single component are likely to be test
realms.) Releases prior to krb5-1.11 are not vulnerable.
Thanks to Sol Jerome for reporting this problem.
CVSSv2: AV:N/AC:M/Au:S/C:N/I:N/A:P/E:H/RL:O/RC:C
(cherry picked from commit 3c7f1c21ffaaf6c90f1045f0f5440303c766acc0)
ticket: 7668
version_fixed: 1.11.4
status: resolved
CWE ID: CWE-20 | find_referral_tgs(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request,
krb5_principal *krbtgt_princ)
{
krb5_error_code retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
char **realms = NULL, *hostname = NULL;
krb5_data srealm = request->server->realm;
if (!is_referral_req(kdc_active_realm, request))
goto cleanup;
hostname = data2string(krb5_princ_component(kdc_context,
request->server, 1));
if (hostname == NULL) {
retval = ENOMEM;
goto cleanup;
}
/* If the hostname doesn't contain a '.', it's not a FQDN. */
if (strchr(hostname, '.') == NULL)
goto cleanup;
retval = krb5_get_host_realm(kdc_context, hostname, &realms);
if (retval) {
/* no match found */
kdc_err(kdc_context, retval, "unable to find realm of host");
goto cleanup;
}
/* Don't return a referral to the empty realm or the service realm. */
if (realms == NULL || realms[0] == NULL || *realms[0] == '\0' ||
data_eq_string(srealm, realms[0])) {
retval = KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN;
goto cleanup;
}
retval = krb5_build_principal(kdc_context, krbtgt_princ,
srealm.length, srealm.data,
"krbtgt", realms[0], (char *)0);
cleanup:
krb5_free_host_realm(kdc_context, realms);
free(hostname);
return retval;
}
| 2,360 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: PHP_FUNCTION(pg_get_notify)
{
zval *pgsql_link;
int id = -1;
long result_type = PGSQL_ASSOC;
PGconn *pgsql;
PGnotify *pgsql_notify;
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "r|l",
&pgsql_link, &result_type) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE2(pgsql, PGconn *, &pgsql_link, id, "PostgreSQL link", le_link, le_plink);
if (!(result_type & PGSQL_BOTH)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid result type");
RETURN_FALSE;
}
PQconsumeInput(pgsql);
pgsql_notify = PQnotifies(pgsql);
if (!pgsql_notify) {
/* no notify message */
RETURN_FALSE;
}
array_init(return_value);
if (result_type & PGSQL_NUM) {
add_index_string(return_value, 0, pgsql_notify->relname, 1);
add_index_long(return_value, 1, pgsql_notify->be_pid);
#if HAVE_PQPROTOCOLVERSION && HAVE_PQPARAMETERSTATUS
if (PQprotocolVersion(pgsql) >= 3 && atof(PQparameterStatus(pgsql, "server_version")) >= 9.0) {
#else
if (atof(PG_VERSION) >= 9.0) {
#endif
#if HAVE_PQPARAMETERSTATUS
add_index_string(return_value, 2, pgsql_notify->extra, 1);
#endif
}
}
if (result_type & PGSQL_ASSOC) {
add_assoc_string(return_value, "message", pgsql_notify->relname, 1);
add_assoc_long(return_value, "pid", pgsql_notify->be_pid);
#if HAVE_PQPROTOCOLVERSION && HAVE_PQPARAMETERSTATUS
if (PQprotocolVersion(pgsql) >= 3 && atof(PQparameterStatus(pgsql, "server_version")) >= 9.0) {
#else
if (atof(PG_VERSION) >= 9.0) {
#endif
#if HAVE_PQPARAMETERSTATUS
add_assoc_string(return_value, "payload", pgsql_notify->extra, 1);
#endif
}
}
PQfreemem(pgsql_notify);
}
/* }}} */
/* {{{ proto int pg_get_pid([resource connection)
Get backend(server) pid */
PHP_FUNCTION(pg_get_pid)
{
zval *pgsql_link;
int id = -1;
PGconn *pgsql;
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "r",
&pgsql_link) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE2(pgsql, PGconn *, &pgsql_link, id, "PostgreSQL link", le_link, le_plink);
RETURN_LONG(PQbackendPID(pgsql));
}
/* }}} */
/* {{{ php_pgsql_meta_data
* TODO: Add meta_data cache for better performance
*/
PHP_PGSQL_API int php_pgsql_meta_data(PGconn *pg_link, const char *table_name, zval *meta TSRMLS_DC)
{
PGresult *pg_result;
char *src, *tmp_name, *tmp_name2 = NULL;
char *escaped;
smart_str querystr = {0};
size_t new_len;
int i, num_rows;
zval *elem;
if (!*table_name) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "The table name must be specified");
return FAILURE;
}
src = estrdup(table_name);
tmp_name = php_strtok_r(src, ".", &tmp_name2);
if (!tmp_name2 || !*tmp_name2) {
/* Default schema */
tmp_name2 = tmp_name;
"SELECT a.attname, a.attnum, t.typname, a.attlen, a.attnotnull, a.atthasdef, a.attndims, t.typtype = 'e' "
"FROM pg_class as c, pg_attribute a, pg_type t, pg_namespace n "
"WHERE a.attnum > 0 AND a.attrelid = c.oid AND c.relname = '");
escaped = (char *)safe_emalloc(strlen(tmp_name2), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name2, strlen(tmp_name2), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' AND c.relnamespace = n.oid AND n.nspname = '");
escaped = (char *)safe_emalloc(strlen(tmp_name), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name, strlen(tmp_name), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' AND a.atttypid = t.oid ORDER BY a.attnum;");
smart_str_0(&querystr);
efree(src);
pg_result = PQexec(pg_link, querystr.c);
if (PQresultStatus(pg_result) != PGRES_TUPLES_OK || (num_rows = PQntuples(pg_result)) == 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Table '%s' doesn't exists", table_name);
smart_str_free(&querystr);
PQclear(pg_result);
return FAILURE;
}
smart_str_free(&querystr);
for (i = 0; i < num_rows; i++) {
char *name;
MAKE_STD_ZVAL(elem);
array_init(elem);
add_assoc_long(elem, "num", atoi(PQgetvalue(pg_result,i,1)));
add_assoc_string(elem, "type", PQgetvalue(pg_result,i,2), 1);
add_assoc_long(elem, "len", atoi(PQgetvalue(pg_result,i,3)));
if (!strcmp(PQgetvalue(pg_result,i,4), "t")) {
add_assoc_bool(elem, "not null", 1);
}
else {
add_assoc_bool(elem, "not null", 0);
}
if (!strcmp(PQgetvalue(pg_result,i,5), "t")) {
add_assoc_bool(elem, "has default", 1);
}
else {
add_assoc_bool(elem, "has default", 0);
}
add_assoc_long(elem, "array dims", atoi(PQgetvalue(pg_result,i,6)));
if (!strcmp(PQgetvalue(pg_result,i,7), "t")) {
add_assoc_bool(elem, "is enum", 1);
}
else {
add_assoc_bool(elem, "is enum", 0);
}
name = PQgetvalue(pg_result,i,0);
add_assoc_zval(meta, name, elem);
}
PQclear(pg_result);
return SUCCESS;
}
/* }}} */
Commit Message:
CWE ID: | PHP_FUNCTION(pg_get_notify)
{
zval *pgsql_link;
int id = -1;
long result_type = PGSQL_ASSOC;
PGconn *pgsql;
PGnotify *pgsql_notify;
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "r|l",
&pgsql_link, &result_type) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE2(pgsql, PGconn *, &pgsql_link, id, "PostgreSQL link", le_link, le_plink);
if (!(result_type & PGSQL_BOTH)) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Invalid result type");
RETURN_FALSE;
}
PQconsumeInput(pgsql);
pgsql_notify = PQnotifies(pgsql);
if (!pgsql_notify) {
/* no notify message */
RETURN_FALSE;
}
array_init(return_value);
if (result_type & PGSQL_NUM) {
add_index_string(return_value, 0, pgsql_notify->relname, 1);
add_index_long(return_value, 1, pgsql_notify->be_pid);
#if HAVE_PQPROTOCOLVERSION && HAVE_PQPARAMETERSTATUS
if (PQprotocolVersion(pgsql) >= 3 && atof(PQparameterStatus(pgsql, "server_version")) >= 9.0) {
#else
if (atof(PG_VERSION) >= 9.0) {
#endif
#if HAVE_PQPARAMETERSTATUS
add_index_string(return_value, 2, pgsql_notify->extra, 1);
#endif
}
}
if (result_type & PGSQL_ASSOC) {
add_assoc_string(return_value, "message", pgsql_notify->relname, 1);
add_assoc_long(return_value, "pid", pgsql_notify->be_pid);
#if HAVE_PQPROTOCOLVERSION && HAVE_PQPARAMETERSTATUS
if (PQprotocolVersion(pgsql) >= 3 && atof(PQparameterStatus(pgsql, "server_version")) >= 9.0) {
#else
if (atof(PG_VERSION) >= 9.0) {
#endif
#if HAVE_PQPARAMETERSTATUS
add_assoc_string(return_value, "payload", pgsql_notify->extra, 1);
#endif
}
}
PQfreemem(pgsql_notify);
}
/* }}} */
/* {{{ proto int pg_get_pid([resource connection)
Get backend(server) pid */
PHP_FUNCTION(pg_get_pid)
{
zval *pgsql_link;
int id = -1;
PGconn *pgsql;
if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "r",
&pgsql_link) == FAILURE) {
RETURN_FALSE;
}
ZEND_FETCH_RESOURCE2(pgsql, PGconn *, &pgsql_link, id, "PostgreSQL link", le_link, le_plink);
RETURN_LONG(PQbackendPID(pgsql));
}
/* }}} */
/* {{{ php_pgsql_meta_data
* TODO: Add meta_data cache for better performance
*/
PHP_PGSQL_API int php_pgsql_meta_data(PGconn *pg_link, const char *table_name, zval *meta TSRMLS_DC)
{
PGresult *pg_result;
char *src, *tmp_name, *tmp_name2 = NULL;
char *escaped;
smart_str querystr = {0};
size_t new_len;
int i, num_rows;
zval *elem;
if (!*table_name) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "The table name must be specified");
return FAILURE;
}
src = estrdup(table_name);
tmp_name = php_strtok_r(src, ".", &tmp_name2);
if (!tmp_name) {
efree(src);
php_error_docref(NULL TSRMLS_CC, E_WARNING, "The table name must be specified");
return FAILURE;
}
if (!tmp_name2 || !*tmp_name2) {
/* Default schema */
tmp_name2 = tmp_name;
"SELECT a.attname, a.attnum, t.typname, a.attlen, a.attnotnull, a.atthasdef, a.attndims, t.typtype = 'e' "
"FROM pg_class as c, pg_attribute a, pg_type t, pg_namespace n "
"WHERE a.attnum > 0 AND a.attrelid = c.oid AND c.relname = '");
escaped = (char *)safe_emalloc(strlen(tmp_name2), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name2, strlen(tmp_name2), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' AND c.relnamespace = n.oid AND n.nspname = '");
escaped = (char *)safe_emalloc(strlen(tmp_name), 2, 1);
new_len = PQescapeStringConn(pg_link, escaped, tmp_name, strlen(tmp_name), NULL);
if (new_len) {
smart_str_appendl(&querystr, escaped, new_len);
}
efree(escaped);
smart_str_appends(&querystr, "' AND a.atttypid = t.oid ORDER BY a.attnum;");
smart_str_0(&querystr);
efree(src);
pg_result = PQexec(pg_link, querystr.c);
if (PQresultStatus(pg_result) != PGRES_TUPLES_OK || (num_rows = PQntuples(pg_result)) == 0) {
php_error_docref(NULL TSRMLS_CC, E_WARNING, "Table '%s' doesn't exists", table_name);
smart_str_free(&querystr);
PQclear(pg_result);
return FAILURE;
}
smart_str_free(&querystr);
for (i = 0; i < num_rows; i++) {
char *name;
MAKE_STD_ZVAL(elem);
array_init(elem);
add_assoc_long(elem, "num", atoi(PQgetvalue(pg_result,i,1)));
add_assoc_string(elem, "type", PQgetvalue(pg_result,i,2), 1);
add_assoc_long(elem, "len", atoi(PQgetvalue(pg_result,i,3)));
if (!strcmp(PQgetvalue(pg_result,i,4), "t")) {
add_assoc_bool(elem, "not null", 1);
}
else {
add_assoc_bool(elem, "not null", 0);
}
if (!strcmp(PQgetvalue(pg_result,i,5), "t")) {
add_assoc_bool(elem, "has default", 1);
}
else {
add_assoc_bool(elem, "has default", 0);
}
add_assoc_long(elem, "array dims", atoi(PQgetvalue(pg_result,i,6)));
if (!strcmp(PQgetvalue(pg_result,i,7), "t")) {
add_assoc_bool(elem, "is enum", 1);
}
else {
add_assoc_bool(elem, "is enum", 0);
}
name = PQgetvalue(pg_result,i,0);
add_assoc_zval(meta, name, elem);
}
PQclear(pg_result);
return SUCCESS;
}
/* }}} */
| 4,068 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static v8::Handle<v8::Value> methodWithSequenceArgCallback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.methodWithSequenceArg");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError();
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(sequence<ScriptProfile>*, sequenceArg, toNativeArray<ScriptProfile>(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)));
imp->methodWithSequenceArg(sequenceArg);
return v8::Handle<v8::Value>();
}
Commit Message: [V8] Pass Isolate to throwNotEnoughArgumentsError()
https://bugs.webkit.org/show_bug.cgi?id=86983
Reviewed by Adam Barth.
The objective is to pass Isolate around in V8 bindings.
This patch passes Isolate to throwNotEnoughArgumentsError().
No tests. No change in behavior.
* bindings/scripts/CodeGeneratorV8.pm:
(GenerateArgumentsCountCheck):
(GenerateEventConstructorCallback):
* bindings/scripts/test/V8/V8Float64Array.cpp:
(WebCore::Float64ArrayV8Internal::fooCallback):
* bindings/scripts/test/V8/V8TestActiveDOMObject.cpp:
(WebCore::TestActiveDOMObjectV8Internal::excitingFunctionCallback):
(WebCore::TestActiveDOMObjectV8Internal::postMessageCallback):
* bindings/scripts/test/V8/V8TestCustomNamedGetter.cpp:
(WebCore::TestCustomNamedGetterV8Internal::anotherFunctionCallback):
* bindings/scripts/test/V8/V8TestEventConstructor.cpp:
(WebCore::V8TestEventConstructor::constructorCallback):
* bindings/scripts/test/V8/V8TestEventTarget.cpp:
(WebCore::TestEventTargetV8Internal::itemCallback):
(WebCore::TestEventTargetV8Internal::dispatchEventCallback):
* bindings/scripts/test/V8/V8TestInterface.cpp:
(WebCore::TestInterfaceV8Internal::supplementalMethod2Callback):
(WebCore::V8TestInterface::constructorCallback):
* bindings/scripts/test/V8/V8TestMediaQueryListListener.cpp:
(WebCore::TestMediaQueryListListenerV8Internal::methodCallback):
* bindings/scripts/test/V8/V8TestNamedConstructor.cpp:
(WebCore::V8TestNamedConstructorConstructorCallback):
* bindings/scripts/test/V8/V8TestObj.cpp:
(WebCore::TestObjV8Internal::voidMethodWithArgsCallback):
(WebCore::TestObjV8Internal::intMethodWithArgsCallback):
(WebCore::TestObjV8Internal::objMethodWithArgsCallback):
(WebCore::TestObjV8Internal::methodWithSequenceArgCallback):
(WebCore::TestObjV8Internal::methodReturningSequenceCallback):
(WebCore::TestObjV8Internal::methodThatRequiresAllArgsAndThrowsCallback):
(WebCore::TestObjV8Internal::serializedValueCallback):
(WebCore::TestObjV8Internal::idbKeyCallback):
(WebCore::TestObjV8Internal::optionsObjectCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndOptionalArgCallback):
(WebCore::TestObjV8Internal::methodWithNonOptionalArgAndTwoOptionalArgsCallback):
(WebCore::TestObjV8Internal::methodWithCallbackArgCallback):
(WebCore::TestObjV8Internal::methodWithNonCallbackArgAndCallbackArgCallback):
(WebCore::TestObjV8Internal::overloadedMethod1Callback):
(WebCore::TestObjV8Internal::overloadedMethod2Callback):
(WebCore::TestObjV8Internal::overloadedMethod3Callback):
(WebCore::TestObjV8Internal::overloadedMethod4Callback):
(WebCore::TestObjV8Internal::overloadedMethod5Callback):
(WebCore::TestObjV8Internal::overloadedMethod6Callback):
(WebCore::TestObjV8Internal::overloadedMethod7Callback):
(WebCore::TestObjV8Internal::overloadedMethod11Callback):
(WebCore::TestObjV8Internal::overloadedMethod12Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod1Callback):
(WebCore::TestObjV8Internal::enabledAtRuntimeMethod2Callback):
(WebCore::TestObjV8Internal::convert1Callback):
(WebCore::TestObjV8Internal::convert2Callback):
(WebCore::TestObjV8Internal::convert3Callback):
(WebCore::TestObjV8Internal::convert4Callback):
(WebCore::TestObjV8Internal::convert5Callback):
(WebCore::TestObjV8Internal::strictFunctionCallback):
(WebCore::V8TestObj::constructorCallback):
* bindings/scripts/test/V8/V8TestSerializedScriptValueInterface.cpp:
(WebCore::TestSerializedScriptValueInterfaceV8Internal::acceptTransferListCallback):
(WebCore::V8TestSerializedScriptValueInterface::constructorCallback):
* bindings/v8/ScriptController.cpp:
(WebCore::setValueAndClosePopupCallback):
* bindings/v8/V8Proxy.cpp:
(WebCore::V8Proxy::throwNotEnoughArgumentsError):
* bindings/v8/V8Proxy.h:
(V8Proxy):
* bindings/v8/custom/V8AudioContextCustom.cpp:
(WebCore::V8AudioContext::constructorCallback):
* bindings/v8/custom/V8DataViewCustom.cpp:
(WebCore::V8DataView::getInt8Callback):
(WebCore::V8DataView::getUint8Callback):
(WebCore::V8DataView::setInt8Callback):
(WebCore::V8DataView::setUint8Callback):
* bindings/v8/custom/V8DirectoryEntryCustom.cpp:
(WebCore::V8DirectoryEntry::getDirectoryCallback):
(WebCore::V8DirectoryEntry::getFileCallback):
* bindings/v8/custom/V8IntentConstructor.cpp:
(WebCore::V8Intent::constructorCallback):
* bindings/v8/custom/V8SVGLengthCustom.cpp:
(WebCore::V8SVGLength::convertToSpecifiedUnitsCallback):
* bindings/v8/custom/V8WebGLRenderingContextCustom.cpp:
(WebCore::getObjectParameter):
(WebCore::V8WebGLRenderingContext::getAttachedShadersCallback):
(WebCore::V8WebGLRenderingContext::getExtensionCallback):
(WebCore::V8WebGLRenderingContext::getFramebufferAttachmentParameterCallback):
(WebCore::V8WebGLRenderingContext::getParameterCallback):
(WebCore::V8WebGLRenderingContext::getProgramParameterCallback):
(WebCore::V8WebGLRenderingContext::getShaderParameterCallback):
(WebCore::V8WebGLRenderingContext::getUniformCallback):
(WebCore::vertexAttribAndUniformHelperf):
(WebCore::uniformHelperi):
(WebCore::uniformMatrixHelper):
* bindings/v8/custom/V8WebKitMutationObserverCustom.cpp:
(WebCore::V8WebKitMutationObserver::constructorCallback):
(WebCore::V8WebKitMutationObserver::observeCallback):
* bindings/v8/custom/V8WebSocketCustom.cpp:
(WebCore::V8WebSocket::constructorCallback):
(WebCore::V8WebSocket::sendCallback):
* bindings/v8/custom/V8XMLHttpRequestCustom.cpp:
(WebCore::V8XMLHttpRequest::openCallback):
git-svn-id: svn://svn.chromium.org/blink/trunk@117736 bbb929c8-8fbe-4397-9dbb-9b2b20218538
CWE ID: | static v8::Handle<v8::Value> methodWithSequenceArgCallback(const v8::Arguments& args)
{
INC_STATS("DOM.TestObj.methodWithSequenceArg");
if (args.Length() < 1)
return V8Proxy::throwNotEnoughArgumentsError(args.GetIsolate());
TestObj* imp = V8TestObj::toNative(args.Holder());
EXCEPTION_BLOCK(sequence<ScriptProfile>*, sequenceArg, toNativeArray<ScriptProfile>(MAYBE_MISSING_PARAMETER(args, 0, DefaultIsUndefined)));
imp->methodWithSequenceArg(sequenceArg);
return v8::Handle<v8::Value>();
}
| 29,578 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: xmlDocPtr soap_xmlParseMemory(const void *buf, size_t buf_size)
{
xmlParserCtxtPtr ctxt = NULL;
xmlDocPtr ret;
/*
xmlInitParser();
*/
ctxt = xmlCreateMemoryParserCtxt(buf, buf_size);
if (ctxt) {
ctxt->options -= XML_PARSE_DTDLOAD;
ctxt->sax->ignorableWhitespace = soap_ignorableWhitespace;
ctxt->sax->comment = soap_Comment;
ctxt->sax->warning = NULL;
ctxt->sax->error = NULL;
/*ctxt->sax->fatalError = NULL;*/
#if LIBXML_VERSION >= 20703
ctxt->options |= XML_PARSE_HUGE;
#endif
xmlParseDocument(ctxt);
if (ctxt->wellFormed) {
ret = ctxt->myDoc;
if (ret->URL == NULL && ctxt->directory != NULL) {
ret->URL = xmlCharStrdup(ctxt->directory);
}
} else {
ret = NULL;
xmlFreeDoc(ctxt->myDoc);
ctxt->myDoc = NULL;
}
xmlFreeParserCtxt(ctxt);
} else {
ret = NULL;
}
/*
xmlCleanupParser();
*/
/*
if (ret) {
cleanup_xml_node((xmlNodePtr)ret);
}
*/
return ret;
}
Commit Message:
CWE ID: CWE-200 | xmlDocPtr soap_xmlParseMemory(const void *buf, size_t buf_size)
{
xmlParserCtxtPtr ctxt = NULL;
xmlDocPtr ret;
/*
xmlInitParser();
*/
ctxt = xmlCreateMemoryParserCtxt(buf, buf_size);
if (ctxt) {
ctxt->options &= ~XML_PARSE_DTDLOAD;
ctxt->sax->ignorableWhitespace = soap_ignorableWhitespace;
ctxt->sax->comment = soap_Comment;
ctxt->sax->warning = NULL;
ctxt->sax->error = NULL;
/*ctxt->sax->fatalError = NULL;*/
#if LIBXML_VERSION >= 20703
ctxt->options |= XML_PARSE_HUGE;
#endif
xmlParseDocument(ctxt);
if (ctxt->wellFormed) {
ret = ctxt->myDoc;
if (ret->URL == NULL && ctxt->directory != NULL) {
ret->URL = xmlCharStrdup(ctxt->directory);
}
} else {
ret = NULL;
xmlFreeDoc(ctxt->myDoc);
ctxt->myDoc = NULL;
}
xmlFreeParserCtxt(ctxt);
} else {
ret = NULL;
}
/*
xmlCleanupParser();
*/
/*
if (ret) {
cleanup_xml_node((xmlNodePtr)ret);
}
*/
return ret;
}
| 13,261 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int crypto_rng_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_rng *rng = __crypto_rng_cast(tfm);
struct rng_alg *alg = crypto_rng_alg(rng);
struct old_rng_alg *oalg = crypto_old_rng_alg(rng);
if (oalg->rng_make_random) {
rng->generate = generate;
rng->seed = rngapi_reset;
rng->seedsize = oalg->seedsize;
return 0;
}
rng->generate = alg->generate;
rng->seed = alg->seed;
rng->seedsize = alg->seedsize;
return 0;
}
Commit Message: crypto: rng - Remove old low-level rng interface
Now that all rng implementations have switched over to the new
interface, we can remove the old low-level interface.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
CWE ID: CWE-476 | static int crypto_rng_init_tfm(struct crypto_tfm *tfm)
{
return 0;
}
| 5,550 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static Image *ReadSUNImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define RMT_EQUAL_RGB 1
#define RMT_NONE 0
#define RMT_RAW 2
#define RT_STANDARD 1
#define RT_ENCODED 2
#define RT_FORMAT_RGB 3
typedef struct _SUNInfo
{
unsigned int
magic,
width,
height,
depth,
length,
type,
maptype,
maplength;
} SUNInfo;
Image
*image;
int
bit;
MagickBooleanType
status;
MagickSizeType
number_pixels;
register Quantum
*q;
register ssize_t
i,
x;
register unsigned char
*p;
size_t
bytes_per_line,
extent,
length;
ssize_t
count,
y;
SUNInfo
sun_info;
unsigned char
*sun_data,
*sun_pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read SUN raster header.
*/
(void) ResetMagickMemory(&sun_info,0,sizeof(sun_info));
sun_info.magic=ReadBlobMSBLong(image);
do
{
/*
Verify SUN identifier.
*/
if (sun_info.magic != 0x59a66a95)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
sun_info.width=ReadBlobMSBLong(image);
sun_info.height=ReadBlobMSBLong(image);
sun_info.depth=ReadBlobMSBLong(image);
sun_info.length=ReadBlobMSBLong(image);
sun_info.type=ReadBlobMSBLong(image);
sun_info.maptype=ReadBlobMSBLong(image);
sun_info.maplength=ReadBlobMSBLong(image);
extent=sun_info.height*sun_info.width;
if ((sun_info.height != 0) && (sun_info.width != extent/sun_info.height))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.type != RT_STANDARD) && (sun_info.type != RT_ENCODED) &&
(sun_info.type != RT_FORMAT_RGB))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.maptype == RMT_NONE) && (sun_info.maplength != 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.depth == 0) || (sun_info.depth > 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.maptype != RMT_NONE) && (sun_info.maptype != RMT_EQUAL_RGB) &&
(sun_info.maptype != RMT_RAW))
ThrowReaderException(CoderError,"ColormapTypeNotSupported");
image->columns=sun_info.width;
image->rows=sun_info.height;
image->depth=sun_info.depth <= 8 ? sun_info.depth :
MAGICKCORE_QUANTUM_DEPTH;
if (sun_info.depth < 24)
{
size_t
one;
image->storage_class=PseudoClass;
image->colors=sun_info.maplength;
one=1;
if (sun_info.maptype == RMT_NONE)
image->colors=one << sun_info.depth;
if (sun_info.maptype == RMT_EQUAL_RGB)
image->colors=sun_info.maplength/3;
}
switch (sun_info.maptype)
{
case RMT_NONE:
{
if (sun_info.depth < 24)
{
/*
Create linear color ramp.
*/
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
break;
}
case RMT_EQUAL_RGB:
{
unsigned char
*sun_colormap;
/*
Read SUN raster colormap.
*/
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
sun_colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
sizeof(*sun_colormap));
if (sun_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,image->colors,sun_colormap);
if (count != (ssize_t) image->colors)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
sun_colormap[i]);
count=ReadBlob(image,image->colors,sun_colormap);
if (count != (ssize_t) image->colors)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
sun_colormap[i]);
count=ReadBlob(image,image->colors,sun_colormap);
if (count != (ssize_t) image->colors)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
sun_colormap[i]);
sun_colormap=(unsigned char *) RelinquishMagickMemory(sun_colormap);
break;
}
case RMT_RAW:
{
unsigned char
*sun_colormap;
/*
Read SUN raster colormap.
*/
sun_colormap=(unsigned char *) AcquireQuantumMemory(sun_info.maplength,
sizeof(*sun_colormap));
if (sun_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,sun_info.maplength,sun_colormap);
if (count != (ssize_t) sun_info.maplength)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
sun_colormap=(unsigned char *) RelinquishMagickMemory(sun_colormap);
break;
}
default:
ThrowReaderException(CoderError,"ColormapTypeNotSupported");
}
image->alpha_trait=sun_info.depth == 32 ? BlendPixelTrait :
UndefinedPixelTrait;
image->columns=sun_info.width;
image->rows=sun_info.height;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if ((sun_info.length*sizeof(*sun_data))/sizeof(*sun_data) !=
sun_info.length || !sun_info.length)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=(MagickSizeType) image->columns*image->rows;
if ((sun_info.type != RT_ENCODED) && (sun_info.depth >= 8) &&
((number_pixels*((sun_info.depth+7)/8)) > sun_info.length))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
bytes_per_line=sun_info.width*sun_info.depth;
sun_data=(unsigned char *) AcquireQuantumMemory((size_t) MagickMax(
sun_info.length,bytes_per_line*sun_info.width),sizeof(*sun_data));
if (sun_data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=(ssize_t) ReadBlob(image,sun_info.length,sun_data);
if (count != (ssize_t) sun_info.length)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
sun_pixels=sun_data;
bytes_per_line=0;
if (sun_info.type == RT_ENCODED)
{
size_t
height;
/*
Read run-length encoded raster pixels.
*/
height=sun_info.height;
if ((height == 0) || (sun_info.width == 0) || (sun_info.depth == 0) ||
((bytes_per_line/sun_info.depth) != sun_info.width))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
bytes_per_line+=15;
bytes_per_line<<=1;
if ((bytes_per_line >> 1) != (sun_info.width*sun_info.depth+15))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
bytes_per_line>>=4;
sun_pixels=(unsigned char *) AcquireQuantumMemory(height,
bytes_per_line*sizeof(*sun_pixels));
if (sun_pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) DecodeImage(sun_data,sun_info.length,sun_pixels,bytes_per_line*
height);
sun_data=(unsigned char *) RelinquishMagickMemory(sun_data);
}
/*
Convert SUN raster image to pixel packets.
*/
p=sun_pixels;
if (sun_info.depth == 1)
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=7; bit >= 0; bit--)
{
SetPixelIndex(image,(Quantum) ((*p) & (0x01 << bit) ? 0x00 : 0x01),
q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=7; bit >= (int) (8-(image->columns % 8)); bit--)
{
SetPixelIndex(image,(Quantum) ((*p) & (0x01 << bit) ? 0x00 :
0x01),q);
q+=GetPixelChannels(image);
}
p++;
}
if ((((image->columns/8)+(image->columns % 8 ? 1 : 0)) % 2) != 0)
p++;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (image->storage_class == PseudoClass)
{
if (bytes_per_line == 0)
bytes_per_line=image->columns;
length=image->rows*(image->columns+image->columns % 2);
if (((sun_info.type == RT_ENCODED) &&
(length > (bytes_per_line*image->rows))) ||
((sun_info.type != RT_ENCODED) && (length > sun_info.length)))
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,*p++,q);
q+=GetPixelChannels(image);
}
if ((image->columns % 2) != 0)
p++;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
size_t
bytes_per_pixel;
bytes_per_pixel=3;
if (image->alpha_trait != UndefinedPixelTrait)
bytes_per_pixel++;
if (bytes_per_line == 0)
bytes_per_line=bytes_per_pixel*image->columns;
length=image->rows*(bytes_per_line+image->columns % 2);
if (((sun_info.type == RT_ENCODED) &&
(length > (bytes_per_line*image->rows))) ||
((sun_info.type != RT_ENCODED) && (length > sun_info.length)))
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
if (sun_info.type == RT_STANDARD)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
}
else
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
}
if (image->colors != 0)
{
SetPixelRed(image,ClampToQuantum(image->colormap[(ssize_t)
GetPixelRed(image,q)].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[(ssize_t)
GetPixelGreen(image,q)].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[(ssize_t)
GetPixelBlue(image,q)].blue),q);
}
q+=GetPixelChannels(image);
}
if (((bytes_per_pixel*image->columns) % 2) != 0)
p++;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
if (image->storage_class == PseudoClass)
(void) SyncImage(image,exception);
sun_pixels=(unsigned char *) RelinquishMagickMemory(sun_pixels);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
sun_info.magic=ReadBlobMSBLong(image);
if (sun_info.magic == 0x59a66a95)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (sun_info.magic == 0x59a66a95);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
Commit Message: http://www.imagemagick.org/discourse-server/viewtopic.php?f=3&t=26848
CWE ID: CWE-119 | static Image *ReadSUNImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define RMT_EQUAL_RGB 1
#define RMT_NONE 0
#define RMT_RAW 2
#define RT_STANDARD 1
#define RT_ENCODED 2
#define RT_FORMAT_RGB 3
typedef struct _SUNInfo
{
unsigned int
magic,
width,
height,
depth,
length,
type,
maptype,
maplength;
} SUNInfo;
Image
*image;
int
bit;
MagickBooleanType
status;
MagickSizeType
number_pixels;
register Quantum
*q;
register ssize_t
i,
x;
register unsigned char
*p;
size_t
bytes_per_line,
extent,
length;
ssize_t
count,
y;
SUNInfo
sun_info;
unsigned char
*sun_data,
*sun_pixels;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read SUN raster header.
*/
(void) ResetMagickMemory(&sun_info,0,sizeof(sun_info));
sun_info.magic=ReadBlobMSBLong(image);
do
{
/*
Verify SUN identifier.
*/
if (sun_info.magic != 0x59a66a95)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
sun_info.width=ReadBlobMSBLong(image);
sun_info.height=ReadBlobMSBLong(image);
sun_info.depth=ReadBlobMSBLong(image);
sun_info.length=ReadBlobMSBLong(image);
sun_info.type=ReadBlobMSBLong(image);
sun_info.maptype=ReadBlobMSBLong(image);
sun_info.maplength=ReadBlobMSBLong(image);
extent=sun_info.height*sun_info.width;
if ((sun_info.height != 0) && (sun_info.width != extent/sun_info.height))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.type != RT_STANDARD) && (sun_info.type != RT_ENCODED) &&
(sun_info.type != RT_FORMAT_RGB))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.maptype == RMT_NONE) && (sun_info.maplength != 0))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.depth == 0) || (sun_info.depth > 32))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if ((sun_info.maptype != RMT_NONE) && (sun_info.maptype != RMT_EQUAL_RGB) &&
(sun_info.maptype != RMT_RAW))
ThrowReaderException(CoderError,"ColormapTypeNotSupported");
image->columns=sun_info.width;
image->rows=sun_info.height;
image->depth=sun_info.depth <= 8 ? sun_info.depth :
MAGICKCORE_QUANTUM_DEPTH;
if (sun_info.depth < 24)
{
size_t
one;
image->colors=sun_info.maplength;
one=1;
if (sun_info.maptype == RMT_NONE)
image->colors=one << sun_info.depth;
if (sun_info.maptype == RMT_EQUAL_RGB)
image->colors=sun_info.maplength/3;
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
switch (sun_info.maptype)
{
case RMT_EQUAL_RGB:
{
unsigned char
*sun_colormap;
/*
Read SUN raster colormap.
*/
sun_colormap=(unsigned char *) AcquireQuantumMemory(image->colors,
sizeof(*sun_colormap));
if (sun_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,image->colors,sun_colormap);
if (count != (ssize_t) image->colors)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(
sun_colormap[i]);
count=ReadBlob(image,image->colors,sun_colormap);
if (count != (ssize_t) image->colors)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(
sun_colormap[i]);
count=ReadBlob(image,image->colors,sun_colormap);
if (count != (ssize_t) image->colors)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(
sun_colormap[i]);
sun_colormap=(unsigned char *) RelinquishMagickMemory(sun_colormap);
break;
}
case RMT_RAW:
{
unsigned char
*sun_colormap;
/*
Read SUN raster colormap.
*/
sun_colormap=(unsigned char *) AcquireQuantumMemory(sun_info.maplength,
sizeof(*sun_colormap));
if (sun_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,sun_info.maplength,sun_colormap);
if (count != (ssize_t) sun_info.maplength)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
sun_colormap=(unsigned char *) RelinquishMagickMemory(sun_colormap);
break;
}
default:
ThrowReaderException(CoderError,"ColormapTypeNotSupported");
}
image->alpha_trait=sun_info.depth == 32 ? BlendPixelTrait :
UndefinedPixelTrait;
image->columns=sun_info.width;
image->rows=sun_info.height;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if ((sun_info.length*sizeof(*sun_data))/sizeof(*sun_data) !=
sun_info.length || !sun_info.length)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=(MagickSizeType) image->columns*image->rows;
if ((sun_info.type != RT_ENCODED) && (sun_info.depth >= 8) &&
((number_pixels*((sun_info.depth+7)/8)) > sun_info.length))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
bytes_per_line=sun_info.width*sun_info.depth;
sun_data=(unsigned char *) AcquireQuantumMemory((size_t) MagickMax(
sun_info.length,bytes_per_line*sun_info.width),sizeof(*sun_data));
if (sun_data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=(ssize_t) ReadBlob(image,sun_info.length,sun_data);
if (count != (ssize_t) sun_info.length)
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
sun_pixels=sun_data;
bytes_per_line=0;
if (sun_info.type == RT_ENCODED)
{
size_t
height;
/*
Read run-length encoded raster pixels.
*/
height=sun_info.height;
if ((height == 0) || (sun_info.width == 0) || (sun_info.depth == 0) ||
((bytes_per_line/sun_info.depth) != sun_info.width))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
bytes_per_line+=15;
bytes_per_line<<=1;
if ((bytes_per_line >> 1) != (sun_info.width*sun_info.depth+15))
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
bytes_per_line>>=4;
sun_pixels=(unsigned char *) AcquireQuantumMemory(height,
bytes_per_line*sizeof(*sun_pixels));
if (sun_pixels == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) DecodeImage(sun_data,sun_info.length,sun_pixels,bytes_per_line*
height);
sun_data=(unsigned char *) RelinquishMagickMemory(sun_data);
}
/*
Convert SUN raster image to pixel packets.
*/
p=sun_pixels;
if (sun_info.depth == 1)
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=7; bit >= 0; bit--)
{
SetPixelIndex(image,(Quantum) ((*p) & (0x01 << bit) ? 0x00 : 0x01),
q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=7; bit >= (int) (8-(image->columns % 8)); bit--)
{
SetPixelIndex(image,(Quantum) ((*p) & (0x01 << bit) ? 0x00 :
0x01),q);
q+=GetPixelChannels(image);
}
p++;
}
if ((((image->columns/8)+(image->columns % 8 ? 1 : 0)) % 2) != 0)
p++;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
else
if (image->storage_class == PseudoClass)
{
if (bytes_per_line == 0)
bytes_per_line=image->columns;
length=image->rows*(image->columns+image->columns % 2);
if (((sun_info.type == RT_ENCODED) &&
(length > (bytes_per_line*image->rows))) ||
((sun_info.type != RT_ENCODED) && (length > sun_info.length)))
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,*p++,q);
q+=GetPixelChannels(image);
}
if ((image->columns % 2) != 0)
p++;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
else
{
size_t
bytes_per_pixel;
bytes_per_pixel=3;
if (image->alpha_trait != UndefinedPixelTrait)
bytes_per_pixel++;
if (bytes_per_line == 0)
bytes_per_line=bytes_per_pixel*image->columns;
length=image->rows*(bytes_per_line+image->columns % 2);
if (((sun_info.type == RT_ENCODED) &&
(length > (bytes_per_line*image->rows))) ||
((sun_info.type != RT_ENCODED) && (length > sun_info.length)))
ThrowReaderException(CorruptImageError,"UnableToReadImageData");
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum(*p++),q);
if (sun_info.type == RT_STANDARD)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
}
else
{
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
}
if (image->colors != 0)
{
SetPixelRed(image,ClampToQuantum(image->colormap[(ssize_t)
GetPixelRed(image,q)].red),q);
SetPixelGreen(image,ClampToQuantum(image->colormap[(ssize_t)
GetPixelGreen(image,q)].green),q);
SetPixelBlue(image,ClampToQuantum(image->colormap[(ssize_t)
GetPixelBlue(image,q)].blue),q);
}
q+=GetPixelChannels(image);
}
if (((bytes_per_pixel*image->columns) % 2) != 0)
p++;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
}
if (image->storage_class == PseudoClass)
(void) SyncImage(image,exception);
sun_pixels=(unsigned char *) RelinquishMagickMemory(sun_pixels);
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
sun_info.magic=ReadBlobMSBLong(image);
if (sun_info.magic == 0x59a66a95)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (sun_info.magic == 0x59a66a95);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
| 1,500 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd != SIOCDEVPRIVATE)
return -EINVAL;
switch (ioctl_cmd) {
case SIOCYAMRESERVED:
return -EINVAL; /* unused */
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
return -ENOBUFS;
if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
kfree(ym);
return -EFAULT;
}
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
/* setting predef as 0 for loading userdefined mcs data */
add_mcs(ym->bits, ym->bitrate, 0);
kfree(ym);
break;
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if (yi.cfg.mask & YAM_IOBASE) {
yp->iobase = yi.cfg.iobase;
dev->base_addr = yi.cfg.iobase;
}
if (yi.cfg.mask & YAM_IRQ) {
if (yi.cfg.irq > 15)
return -EINVAL;
yp->irq = yi.cfg.irq;
dev->irq = yi.cfg.irq;
}
if (yi.cfg.mask & YAM_BITRATE) {
if (yi.cfg.bitrate > YAM_MAXBITRATE)
return -EINVAL;
yp->bitrate = yi.cfg.bitrate;
}
if (yi.cfg.mask & YAM_BAUDRATE) {
if (yi.cfg.baudrate > YAM_MAXBAUDRATE)
return -EINVAL;
yp->baudrate = yi.cfg.baudrate;
}
if (yi.cfg.mask & YAM_MODE) {
if (yi.cfg.mode > YAM_MAXMODE)
return -EINVAL;
yp->dupmode = yi.cfg.mode;
}
if (yi.cfg.mask & YAM_HOLDDLY) {
if (yi.cfg.holddly > YAM_MAXHOLDDLY)
return -EINVAL;
yp->holdd = yi.cfg.holddly;
}
if (yi.cfg.mask & YAM_TXDELAY) {
if (yi.cfg.txdelay > YAM_MAXTXDELAY)
return -EINVAL;
yp->txd = yi.cfg.txdelay;
}
if (yi.cfg.mask & YAM_TXTAIL) {
if (yi.cfg.txtail > YAM_MAXTXTAIL)
return -EINVAL;
yp->txtail = yi.cfg.txtail;
}
if (yi.cfg.mask & YAM_PERSIST) {
if (yi.cfg.persist > YAM_MAXPERSIST)
return -EINVAL;
yp->pers = yi.cfg.persist;
}
if (yi.cfg.mask & YAM_SLOTTIME) {
if (yi.cfg.slottime > YAM_MAXSLOTTIME)
return -EINVAL;
yp->slot = yi.cfg.slottime;
yp->slotcnt = yp->slot / 10;
}
break;
case SIOCYAMGCFG:
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
yi.cfg.bitrate = yp->bitrate;
yi.cfg.baudrate = yp->baudrate;
yi.cfg.mode = yp->dupmode;
yi.cfg.txdelay = yp->txd;
yi.cfg.holddly = yp->holdd;
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
break;
default:
return -EINVAL;
}
return 0;
}
Commit Message: hamradio/yam: fix info leak in ioctl
The yam_ioctl() code fails to initialise the cmd field
of the struct yamdrv_ioctl_cfg. Add an explicit memset(0)
before filling the structure to avoid the 4-byte info leak.
Signed-off-by: Salva Peiró <speiro@ai2.upv.es>
Signed-off-by: David S. Miller <davem@davemloft.net>
CWE ID: CWE-399 | static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct yam_port *yp = netdev_priv(dev);
struct yamdrv_ioctl_cfg yi;
struct yamdrv_ioctl_mcs *ym;
int ioctl_cmd;
if (copy_from_user(&ioctl_cmd, ifr->ifr_data, sizeof(int)))
return -EFAULT;
if (yp->magic != YAM_MAGIC)
return -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (cmd != SIOCDEVPRIVATE)
return -EINVAL;
switch (ioctl_cmd) {
case SIOCYAMRESERVED:
return -EINVAL; /* unused */
case SIOCYAMSMCS:
if (netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
return -ENOBUFS;
if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
kfree(ym);
return -EFAULT;
}
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
/* setting predef as 0 for loading userdefined mcs data */
add_mcs(ym->bits, ym->bitrate, 0);
kfree(ym);
break;
case SIOCYAMSCFG:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BITRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_BAUDRATE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if (yi.cfg.mask & YAM_IOBASE) {
yp->iobase = yi.cfg.iobase;
dev->base_addr = yi.cfg.iobase;
}
if (yi.cfg.mask & YAM_IRQ) {
if (yi.cfg.irq > 15)
return -EINVAL;
yp->irq = yi.cfg.irq;
dev->irq = yi.cfg.irq;
}
if (yi.cfg.mask & YAM_BITRATE) {
if (yi.cfg.bitrate > YAM_MAXBITRATE)
return -EINVAL;
yp->bitrate = yi.cfg.bitrate;
}
if (yi.cfg.mask & YAM_BAUDRATE) {
if (yi.cfg.baudrate > YAM_MAXBAUDRATE)
return -EINVAL;
yp->baudrate = yi.cfg.baudrate;
}
if (yi.cfg.mask & YAM_MODE) {
if (yi.cfg.mode > YAM_MAXMODE)
return -EINVAL;
yp->dupmode = yi.cfg.mode;
}
if (yi.cfg.mask & YAM_HOLDDLY) {
if (yi.cfg.holddly > YAM_MAXHOLDDLY)
return -EINVAL;
yp->holdd = yi.cfg.holddly;
}
if (yi.cfg.mask & YAM_TXDELAY) {
if (yi.cfg.txdelay > YAM_MAXTXDELAY)
return -EINVAL;
yp->txd = yi.cfg.txdelay;
}
if (yi.cfg.mask & YAM_TXTAIL) {
if (yi.cfg.txtail > YAM_MAXTXTAIL)
return -EINVAL;
yp->txtail = yi.cfg.txtail;
}
if (yi.cfg.mask & YAM_PERSIST) {
if (yi.cfg.persist > YAM_MAXPERSIST)
return -EINVAL;
yp->pers = yi.cfg.persist;
}
if (yi.cfg.mask & YAM_SLOTTIME) {
if (yi.cfg.slottime > YAM_MAXSLOTTIME)
return -EINVAL;
yp->slot = yi.cfg.slottime;
yp->slotcnt = yp->slot / 10;
}
break;
case SIOCYAMGCFG:
memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
yi.cfg.bitrate = yp->bitrate;
yi.cfg.baudrate = yp->baudrate;
yi.cfg.mode = yp->dupmode;
yi.cfg.txdelay = yp->txd;
yi.cfg.holddly = yp->holdd;
yi.cfg.txtail = yp->txtail;
yi.cfg.persist = yp->pers;
yi.cfg.slottime = yp->slot;
if (copy_to_user(ifr->ifr_data, &yi, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
break;
default:
return -EINVAL;
}
return 0;
}
| 28,385 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
struct buffer_head *head, enum migrate_mode mode,
int extra_count)
{
int expected_count = 1 + extra_count;
void **pslot;
if (!mapping) {
/* Anonymous page without mapping */
if (page_count(page) != expected_count)
return -EAGAIN;
/* No turning back from here */
set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
SetPageSwapBacked(newpage);
return MIGRATEPAGE_SUCCESS;
}
spin_lock_irq(&mapping->tree_lock);
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
expected_count += 1 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
if (!page_freeze_refs(page, expected_count)) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
/*
* In the async migration case of moving a page with buffers, lock the
* buffers using trylock before the mapping is moved. If the mapping
* was moved, we later failed to lock the buffers and could not move
* the mapping back due to an elevated page count, we would have to
* block waiting on other references to be dropped.
*/
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
page_unfreeze_refs(page, expected_count);
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
/*
* Now we know that no one else is looking at the page:
* no turning back from here.
*/
set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
SetPageSwapBacked(newpage);
get_page(newpage); /* add cache reference */
if (PageSwapCache(page)) {
SetPageSwapCache(newpage);
set_page_private(newpage, page_private(page));
}
radix_tree_replace_slot(pslot, newpage);
/*
* Drop cache reference from old page by unfreezing
* to one less reference.
* We know this isn't the last reference.
*/
page_unfreeze_refs(page, expected_count - 1);
/*
* If moved to a different zone then also account
* the page for that zone. Other VM counters will be
* taken care of when we establish references to the
* new page and drop references to the old page.
*
* Note that anonymous pages are accounted for
* via NR_FILE_PAGES and NR_ANON_PAGES if they
* are mapped to swap space.
*/
__dec_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(newpage, NR_FILE_PAGES);
if (!PageSwapCache(page) && PageSwapBacked(page)) {
__dec_zone_page_state(page, NR_SHMEM);
__inc_zone_page_state(newpage, NR_SHMEM);
}
spin_unlock_irq(&mapping->tree_lock);
return MIGRATEPAGE_SUCCESS;
}
Commit Message: mm: migrate dirty page without clear_page_dirty_for_io etc
clear_page_dirty_for_io() has accumulated writeback and memcg subtleties
since v2.6.16 first introduced page migration; and the set_page_dirty()
which completed its migration of PageDirty, later had to be moderated to
__set_page_dirty_nobuffers(); then PageSwapBacked had to skip that too.
No actual problems seen with this procedure recently, but if you look into
what the clear_page_dirty_for_io(page)+set_page_dirty(newpage) is actually
achieving, it turns out to be nothing more than moving the PageDirty flag,
and its NR_FILE_DIRTY stat from one zone to another.
It would be good to avoid a pile of irrelevant decrementations and
incrementations, and improper event counting, and unnecessary descent of
the radix_tree under tree_lock (to set the PAGECACHE_TAG_DIRTY which
radix_tree_replace_slot() left in place anyway).
Do the NR_FILE_DIRTY movement, like the other stats movements, while
interrupts still disabled in migrate_page_move_mapping(); and don't even
bother if the zone is the same. Do the PageDirty movement there under
tree_lock too, where old page is frozen and newpage not yet visible:
bearing in mind that as soon as newpage becomes visible in radix_tree, an
un-page-locked set_page_dirty() might interfere (or perhaps that's just
not possible: anything doing so should already hold an additional
reference to the old page, preventing its migration; but play safe).
But we do still need to transfer PageDirty in migrate_page_copy(), for
those who don't go the mapping route through migrate_page_move_mapping().
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
CWE ID: CWE-476 | int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
struct buffer_head *head, enum migrate_mode mode,
int extra_count)
{
struct zone *oldzone, *newzone;
int dirty;
int expected_count = 1 + extra_count;
void **pslot;
if (!mapping) {
/* Anonymous page without mapping */
if (page_count(page) != expected_count)
return -EAGAIN;
/* No turning back from here */
set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
SetPageSwapBacked(newpage);
return MIGRATEPAGE_SUCCESS;
}
oldzone = page_zone(page);
newzone = page_zone(newpage);
spin_lock_irq(&mapping->tree_lock);
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
expected_count += 1 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
if (!page_freeze_refs(page, expected_count)) {
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
/*
* In the async migration case of moving a page with buffers, lock the
* buffers using trylock before the mapping is moved. If the mapping
* was moved, we later failed to lock the buffers and could not move
* the mapping back due to an elevated page count, we would have to
* block waiting on other references to be dropped.
*/
if (mode == MIGRATE_ASYNC && head &&
!buffer_migrate_lock_buffers(head, mode)) {
page_unfreeze_refs(page, expected_count);
spin_unlock_irq(&mapping->tree_lock);
return -EAGAIN;
}
/*
* Now we know that no one else is looking at the page:
* no turning back from here.
*/
set_page_memcg(newpage, page_memcg(page));
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
SetPageSwapBacked(newpage);
get_page(newpage); /* add cache reference */
if (PageSwapCache(page)) {
SetPageSwapCache(newpage);
set_page_private(newpage, page_private(page));
}
/* Move dirty while page refs frozen and newpage not yet exposed */
dirty = PageDirty(page);
if (dirty) {
ClearPageDirty(page);
SetPageDirty(newpage);
}
radix_tree_replace_slot(pslot, newpage);
/*
* Drop cache reference from old page by unfreezing
* to one less reference.
* We know this isn't the last reference.
*/
page_unfreeze_refs(page, expected_count - 1);
spin_unlock(&mapping->tree_lock);
/* Leave irq disabled to prevent preemption while updating stats */
/*
* If moved to a different zone then also account
* the page for that zone. Other VM counters will be
* taken care of when we establish references to the
* new page and drop references to the old page.
*
* Note that anonymous pages are accounted for
* via NR_FILE_PAGES and NR_ANON_PAGES if they
* are mapped to swap space.
*/
if (newzone != oldzone) {
__dec_zone_state(oldzone, NR_FILE_PAGES);
__inc_zone_state(newzone, NR_FILE_PAGES);
if (PageSwapBacked(page) && !PageSwapCache(page)) {
__dec_zone_state(oldzone, NR_SHMEM);
__inc_zone_state(newzone, NR_SHMEM);
}
if (dirty && mapping_cap_account_dirty(mapping)) {
__dec_zone_state(oldzone, NR_FILE_DIRTY);
__inc_zone_state(newzone, NR_FILE_DIRTY);
}
}
local_irq_enable();
return MIGRATEPAGE_SUCCESS;
}
| 14,143 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: void TabStrip::ChangeTabGroup(int model_index,
base::Optional<int> old_group,
base::Optional<int> new_group) {
if (new_group.has_value() && !group_headers_[new_group.value()]) {
const TabGroupData* group_data =
controller_->GetDataForGroup(new_group.value());
auto header = std::make_unique<TabGroupHeader>(group_data->title());
header->set_owned_by_client();
AddChildView(header.get());
group_headers_[new_group.value()] = std::move(header);
}
if (old_group.has_value() &&
controller_->ListTabsInGroup(old_group.value()).size() == 0) {
group_headers_.erase(old_group.value());
}
UpdateIdealBounds();
AnimateToIdealBounds();
}
Commit Message: Paint tab groups with the group color.
* The background of TabGroupHeader now uses the group color.
* The backgrounds of tabs in the group are tinted with the group color.
This treatment, along with the colors chosen, are intended to be
a placeholder.
Bug: 905491
Change-Id: Ic808548f8eba23064606e7fb8c9bba281d0d117f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1610504
Commit-Queue: Bret Sepulveda <bsep@chromium.org>
Reviewed-by: Taylor Bergquist <tbergquist@chromium.org>
Cr-Commit-Position: refs/heads/master@{#660498}
CWE ID: CWE-20 | void TabStrip::ChangeTabGroup(int model_index,
base::Optional<int> old_group,
base::Optional<int> new_group) {
tab_at(model_index)->SetGroup(new_group);
if (new_group.has_value() && !group_headers_[new_group.value()]) {
auto header = std::make_unique<TabGroupHeader>(this, new_group.value());
header->set_owned_by_client();
AddChildView(header.get());
group_headers_[new_group.value()] = std::move(header);
}
if (old_group.has_value() &&
controller_->ListTabsInGroup(old_group.value()).size() == 0) {
group_headers_.erase(old_group.value());
}
UpdateIdealBounds();
AnimateToIdealBounds();
}
| 4,703 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
if (file->f_flags & O_DSYNC)
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL ||
(inode->i_flock->fl_start == 0 &&
inode->i_flock->fl_end == OFFSET_MAX &&
inode->i_flock->fl_type != F_RDLCK)))
return 1;
return 0;
}
Commit Message: nfs: always make sure page is up-to-date before extending a write to cover the entire page
We should always make sure the cached page is up-to-date when we're
determining whether we can extend a write to cover the full page -- even
if we've received a write delegation from the server.
Commit c7559663 added logic to skip this check if we have a write
delegation, which can lead to data corruption such as the following
scenario if client B receives a write delegation from the NFS server:
Client A:
# echo 123456789 > /mnt/file
Client B:
# echo abcdefghi >> /mnt/file
# cat /mnt/file
0�D0�abcdefghi
Just because we hold a write delegation doesn't mean that we've read in
the entire page contents.
Cc: <stable@vger.kernel.org> # v3.11+
Signed-off-by: Scott Mayhew <smayhew@redhat.com>
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
CWE ID: CWE-20 | static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
{
if (file->f_flags & O_DSYNC)
return 0;
if (!nfs_write_pageuptodate(page, inode))
return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1;
if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 &&
inode->i_flock->fl_end == OFFSET_MAX &&
inode->i_flock->fl_type != F_RDLCK))
return 1;
return 0;
}
| 24,859 |
Analyze the following vulnerable code snippet, the commit message, and the CWE ID. Determine the impact category of this vulnerability. Output the fixed function only. | Code: R_API RConfigNode* r_config_set(RConfig *cfg, const char *name, const char *value) {
RConfigNode *node = NULL;
char *ov = NULL;
ut64 oi;
if (!cfg || STRNULL (name)) {
return NULL;
}
node = r_config_node_get (cfg, name);
if (node) {
if (node->flags & CN_RO) {
eprintf ("(error: '%s' config key is read only)\n", name);
return node;
}
oi = node->i_value;
if (node->value) {
ov = strdup (node->value);
if (!ov) {
goto beach;
}
} else {
free (node->value);
node->value = strdup ("");
}
if (node->flags & CN_BOOL) {
bool b = is_true (value);
node->i_value = (ut64) b? 1: 0;
char *value = strdup (r_str_bool (b));
if (value) {
free (node->value);
node->value = value;
}
} else {
if (!value) {
free (node->value);
node->value = strdup ("");
node->i_value = 0;
} else {
if (node->value == value) {
goto beach;
}
free (node->value);
node->value = strdup (value);
if (IS_DIGIT (*value)) {
if (strchr (value, '/')) {
node->i_value = r_num_get (cfg->num, value);
} else {
node->i_value = r_num_math (cfg->num, value);
}
} else {
node->i_value = 0;
}
node->flags |= CN_INT;
}
}
} else { // Create a new RConfigNode
oi = UT64_MAX;
if (!cfg->lock) {
node = r_config_node_new (name, value);
if (node) {
if (value && is_bool (value)) {
node->flags |= CN_BOOL;
node->i_value = is_true (value)? 1: 0;
}
if (cfg->ht) {
ht_insert (cfg->ht, node->name, node);
r_list_append (cfg->nodes, node);
cfg->n_nodes++;
}
} else {
eprintf ("r_config_set: unable to create a new RConfigNode\n");
}
} else {
eprintf ("r_config_set: variable '%s' not found\n", name);
}
}
if (node && node->setter) {
int ret = node->setter (cfg->user, node);
if (ret == false) {
if (oi != UT64_MAX) {
node->i_value = oi;
}
free (node->value);
node->value = strdup (ov? ov: "");
}
}
beach:
free (ov);
return node;
}
Commit Message: Fix #7698 - UAF in r_config_set when loading a dex
CWE ID: CWE-416 | R_API RConfigNode* r_config_set(RConfig *cfg, const char *name, const char *value) {
RConfigNode *node = NULL;
char *ov = NULL;
ut64 oi;
if (!cfg || STRNULL (name)) {
return NULL;
}
node = r_config_node_get (cfg, name);
if (node) {
if (node->flags & CN_RO) {
eprintf ("(error: '%s' config key is read only)\n", name);
return node;
}
oi = node->i_value;
if (node->value) {
ov = strdup (node->value);
if (!ov) {
goto beach;
}
} else {
free (node->value);
node->value = strdup ("");
}
if (node->flags & CN_BOOL) {
bool b = is_true (value);
node->i_value = (ut64) b? 1: 0;
char *value = strdup (r_str_bool (b));
if (value) {
free (node->value);
node->value = value;
}
} else {
if (!value) {
free (node->value);
node->value = strdup ("");
node->i_value = 0;
} else {
if (node->value == value) {
goto beach;
}
char *tmp = node->value;
node->value = strdup (value);
free (tmp);
if (IS_DIGIT (*value)) {
if (strchr (value, '/')) {
node->i_value = r_num_get (cfg->num, value);
} else {
node->i_value = r_num_math (cfg->num, value);
}
} else {
node->i_value = 0;
}
node->flags |= CN_INT;
}
}
} else { // Create a new RConfigNode
oi = UT64_MAX;
if (!cfg->lock) {
node = r_config_node_new (name, value);
if (node) {
if (value && is_bool (value)) {
node->flags |= CN_BOOL;
node->i_value = is_true (value)? 1: 0;
}
if (cfg->ht) {
ht_insert (cfg->ht, node->name, node);
r_list_append (cfg->nodes, node);
cfg->n_nodes++;
}
} else {
eprintf ("r_config_set: unable to create a new RConfigNode\n");
}
} else {
eprintf ("r_config_set: variable '%s' not found\n", name);
}
}
if (node && node->setter) {
int ret = node->setter (cfg->user, node);
if (ret == false) {
if (oi != UT64_MAX) {
node->i_value = oi;
}
free (node->value);
node->value = strdup (ov? ov: "");
}
}
beach:
free (ov);
return node;
}
| 6,361 |