CVE ID
stringlengths 13
43
⌀ | CVE Page
stringlengths 45
48
⌀ | CWE ID
stringclasses 90
values | codeLink
stringlengths 46
139
| commit_id
stringlengths 6
81
| commit_message
stringlengths 3
13.3k
⌀ | func_after
stringlengths 14
241k
| func_before
stringlengths 14
241k
| lang
stringclasses 3
values | project
stringclasses 309
values | vul
int8 0
1
|
---|---|---|---|---|---|---|---|---|---|---|
CVE-2017-7586 | https://www.cvedetails.com/cve/CVE-2017-7586/ | CWE-119 | https://github.com/erikd/libsndfile/commit/708e996c87c5fae77b104ccfeb8f6db784c32074 | 708e996c87c5fae77b104ccfeb8f6db784c32074 | src/ : Move to a variable length header buffer
Previously, the `psf->header` buffer was a fixed length specified by
`SF_HEADER_LEN` which was set to `12292`. This was problematic for
two reasons; this value was un-necessarily large for the majority
of files and too small for some others.
Now the size of the header buffer starts at 256 bytes and grows as
necessary up to a maximum of 100k. | psf_get_date_str (char *str, int maxlen)
{ time_t current ;
struct tm timedata, *tmptr ;
time (¤t) ;
#if defined (HAVE_GMTIME_R)
/* If the re-entrant version is available, use it. */
tmptr = gmtime_r (¤t, &timedata) ;
#elif defined (HAVE_GMTIME)
/* Otherwise use the standard one and copy the data to local storage. */
tmptr = gmtime (¤t) ;
memcpy (&timedata, tmptr, sizeof (timedata)) ;
#else
tmptr = NULL ;
#endif
if (tmptr)
snprintf (str, maxlen, "%4d-%02d-%02d %02d:%02d:%02d UTC",
1900 + timedata.tm_year, timedata.tm_mon, timedata.tm_mday,
timedata.tm_hour, timedata.tm_min, timedata.tm_sec) ;
else
snprintf (str, maxlen, "Unknown date") ;
return ;
} /* psf_get_date_str */
| psf_get_date_str (char *str, int maxlen)
{ time_t current ;
struct tm timedata, *tmptr ;
time (¤t) ;
#if defined (HAVE_GMTIME_R)
/* If the re-entrant version is available, use it. */
tmptr = gmtime_r (¤t, &timedata) ;
#elif defined (HAVE_GMTIME)
/* Otherwise use the standard one and copy the data to local storage. */
tmptr = gmtime (¤t) ;
memcpy (&timedata, tmptr, sizeof (timedata)) ;
#else
tmptr = NULL ;
#endif
if (tmptr)
snprintf (str, maxlen, "%4d-%02d-%02d %02d:%02d:%02d UTC",
1900 + timedata.tm_year, timedata.tm_mon, timedata.tm_mday,
timedata.tm_hour, timedata.tm_min, timedata.tm_sec) ;
else
snprintf (str, maxlen, "Unknown date") ;
return ;
} /* psf_get_date_str */
| C | libsndfile | 0 |
CVE-2018-18352 | https://www.cvedetails.com/cve/CVE-2018-18352/ | CWE-732 | https://github.com/chromium/chromium/commit/a9cbaa7a40e2b2723cfc2f266c42f4980038a949 | a9cbaa7a40e2b2723cfc2f266c42f4980038a949 | Simplify "WouldTaintOrigin" concept in media/blink
Currently WebMediaPlayer has three predicates:
- DidGetOpaqueResponseFromServiceWorker
- HasSingleSecurityOrigin
- DidPassCORSAccessCheck
. These are used to determine whether the response body is available
for scripts. They are known to be confusing, and actually
MediaElementAudioSourceHandler::WouldTaintOrigin misuses them.
This CL merges the three predicates to one, WouldTaintOrigin, to remove
the confusion. Now the "response type" concept is available and we
don't need a custom CORS check, so this CL removes
BaseAudioContext::WouldTaintOrigin. This CL also renames
URLData::has_opaque_data_ and its (direct and indirect) data accessors
to match the spec.
Bug: 849942, 875153
Change-Id: I6acf50169d7445c4ff614e80ac606f79ee577d2a
Reviewed-on: https://chromium-review.googlesource.com/c/1238098
Reviewed-by: Fredrik Hubinette <hubbe@chromium.org>
Reviewed-by: Kinuko Yasuda <kinuko@chromium.org>
Reviewed-by: Raymond Toy <rtoy@chromium.org>
Commit-Queue: Yutaka Hirano <yhirano@chromium.org>
Cr-Commit-Position: refs/heads/master@{#598258} | void MultibufferDataSource::CreateResourceLoader(int64_t first_byte_position,
int64_t last_byte_position) {
DCHECK(render_task_runner_->BelongsToCurrentThread());
SetReader(new MultiBufferReader(
url_data()->multibuffer(), first_byte_position, last_byte_position,
base::Bind(&MultibufferDataSource::ProgressCallback, weak_ptr_)));
reader_->SetIsClientAudioElement(is_client_audio_element_);
UpdateBufferSizes();
}
| void MultibufferDataSource::CreateResourceLoader(int64_t first_byte_position,
int64_t last_byte_position) {
DCHECK(render_task_runner_->BelongsToCurrentThread());
SetReader(new MultiBufferReader(
url_data()->multibuffer(), first_byte_position, last_byte_position,
base::Bind(&MultibufferDataSource::ProgressCallback, weak_ptr_)));
reader_->SetIsClientAudioElement(is_client_audio_element_);
UpdateBufferSizes();
}
| C | Chrome | 0 |
CVE-2010-1166 | https://www.cvedetails.com/cve/CVE-2010-1166/ | CWE-189 | https://cgit.freedesktop.org/xorg/xserver/commit/?id=d2f813f7db | d2f813f7db157fc83abc4b3726821c36ee7e40b1 | null | fbStore_a2r2g2b2 (FbBits *bits, const CARD32 *values, int x, int width, miIndexedPtr indexed)
{
int i;
CARD8 *pixel = ((CARD8 *) bits) + x;
for (i = 0; i < width; ++i) {
Splita(READ(values + i));
WRITE(pixel++, ((a ) & 0xc0) |
((r >> 2) & 0x30) |
((g >> 4) & 0x0c) |
((b >> 6) ));
}
}
| fbStore_a2r2g2b2 (FbBits *bits, const CARD32 *values, int x, int width, miIndexedPtr indexed)
{
int i;
CARD8 *pixel = ((CARD8 *) bits) + x;
for (i = 0; i < width; ++i) {
Splita(READ(values + i));
WRITE(pixel++, ((a ) & 0xc0) |
((r >> 2) & 0x30) |
((g >> 4) & 0x0c) |
((b >> 6) ));
}
}
| C | xserver | 0 |
null | null | null | https://github.com/chromium/chromium/commit/610f904d8215075c4681be4eb413f4348860bf9f | 610f904d8215075c4681be4eb413f4348860bf9f | Retrieve per host storage usage from QuotaManager.
R=kinuko@chromium.org
BUG=none
TEST=QuotaManagerTest.GetUsage
Review URL: http://codereview.chromium.org/8079004
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@103921 0039d316-1c4b-4281-b951-d872f2087c98 | void UsageTracker::DidGetClientGlobalUsage(StorageType type,
int64 usage,
int64 unlimited_usage) {
DCHECK_EQ(type, type_);
global_usage_.usage += usage;
global_usage_.unlimited_usage += unlimited_usage;
if (--global_usage_.pending_clients == 0) {
if (global_usage_.usage < 0)
global_usage_.usage = 0;
if (global_usage_.unlimited_usage > global_usage_.usage)
global_usage_.unlimited_usage = global_usage_.usage;
else if (global_usage_.unlimited_usage < 0)
global_usage_.unlimited_usage = 0;
global_usage_callbacks_.Run(type, global_usage_.usage,
global_usage_.unlimited_usage);
}
}
| void UsageTracker::DidGetClientGlobalUsage(StorageType type,
int64 usage,
int64 unlimited_usage) {
DCHECK_EQ(type, type_);
global_usage_.usage += usage;
global_usage_.unlimited_usage += unlimited_usage;
if (--global_usage_.pending_clients == 0) {
if (global_usage_.usage < 0)
global_usage_.usage = 0;
if (global_usage_.unlimited_usage > global_usage_.usage)
global_usage_.unlimited_usage = global_usage_.usage;
else if (global_usage_.unlimited_usage < 0)
global_usage_.unlimited_usage = 0;
global_usage_callbacks_.Run(type, global_usage_.usage,
global_usage_.unlimited_usage);
}
}
| C | Chrome | 0 |
null | null | null | https://github.com/chromium/chromium/commit/957973753ec4159003ff7930d946b7e89c7e09f3 | 957973753ec4159003ff7930d946b7e89c7e09f3 | Make NotifyHeadersComplete the last call in the function.
BUG=82903
Review URL: http://codereview.chromium.org/7038017
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85719 0039d316-1c4b-4281-b951-d872f2087c98 | void BlobURLRequestJob::DidRead(int result) {
if (result < 0) {
NotifyFailure(net::ERR_FAILED);
return;
}
SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
AdvanceBytesRead(result);
if (!read_buf_remaining_bytes_) {
int bytes_read = ReadCompleted();
NotifyReadComplete(bytes_read);
return;
}
int bytes_read = 0;
if (ReadLoop(&bytes_read))
NotifyReadComplete(bytes_read);
}
| void BlobURLRequestJob::DidRead(int result) {
if (result < 0) {
NotifyFailure(net::ERR_FAILED);
return;
}
SetStatus(net::URLRequestStatus()); // Clear the IO_PENDING status
AdvanceBytesRead(result);
if (!read_buf_remaining_bytes_) {
int bytes_read = ReadCompleted();
NotifyReadComplete(bytes_read);
return;
}
int bytes_read = 0;
if (ReadLoop(&bytes_read))
NotifyReadComplete(bytes_read);
}
| C | Chrome | 0 |
CVE-2017-11462 | https://www.cvedetails.com/cve/CVE-2017-11462/ | CWE-415 | https://github.com/krb5/krb5/commit/56f7b1bc95a2a3eeb420e069e7655fb181ade5cf | 56f7b1bc95a2a3eeb420e069e7655fb181ade5cf | Preserve GSS context on init/accept failure
After gss_init_sec_context() or gss_accept_sec_context() has created a
context, don't delete the mechglue context on failures from subsequent
calls, even if the mechanism deletes the mech-specific context (which
is allowed by RFC 2744 but not preferred). Check for union contexts
with no mechanism context in each GSS function which accepts a
gss_ctx_id_t.
CVE-2017-11462:
RFC 2744 permits a GSS-API implementation to delete an existing
security context on a second or subsequent call to
gss_init_sec_context() or gss_accept_sec_context() if the call results
in an error. This API behavior has been found to be dangerous,
leading to the possibility of memory errors in some callers. For
safety, GSS-API implementations should instead preserve existing
security contexts on error until the caller deletes them.
All versions of MIT krb5 prior to this change may delete acceptor
contexts on error. Versions 1.13.4 through 1.13.7, 1.14.1 through
1.14.5, and 1.15 through 1.15.1 may also delete initiator contexts on
error.
ticket: 8598 (new)
target_version: 1.15-next
target_version: 1.14-next
tags: pullup | gss_get_mic (minor_status,
context_handle,
qop_req,
message_buffer,
msg_token)
OM_uint32 * minor_status;
gss_ctx_id_t context_handle;
gss_qop_t qop_req;
gss_buffer_t message_buffer;
gss_buffer_t msg_token;
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_get_mic_args(minor_status, context_handle,
qop_req, message_buffer, msg_token);
if (status != GSS_S_COMPLETE)
return (status);
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT)
return (GSS_S_NO_CONTEXT);
mech = gssint_get_mechanism (ctx->mech_type);
if (mech) {
if (mech->gss_get_mic) {
status = mech->gss_get_mic(
minor_status,
ctx->internal_ctx_id,
qop_req,
message_buffer,
msg_token);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return(status);
}
return (GSS_S_BAD_MECH);
}
| gss_get_mic (minor_status,
context_handle,
qop_req,
message_buffer,
msg_token)
OM_uint32 * minor_status;
gss_ctx_id_t context_handle;
gss_qop_t qop_req;
gss_buffer_t message_buffer;
gss_buffer_t msg_token;
{
OM_uint32 status;
gss_union_ctx_id_t ctx;
gss_mechanism mech;
status = val_get_mic_args(minor_status, context_handle,
qop_req, message_buffer, msg_token);
if (status != GSS_S_COMPLETE)
return (status);
/*
* select the approprate underlying mechanism routine and
* call it.
*/
ctx = (gss_union_ctx_id_t) context_handle;
mech = gssint_get_mechanism (ctx->mech_type);
if (mech) {
if (mech->gss_get_mic) {
status = mech->gss_get_mic(
minor_status,
ctx->internal_ctx_id,
qop_req,
message_buffer,
msg_token);
if (status != GSS_S_COMPLETE)
map_error(minor_status, mech);
} else
status = GSS_S_UNAVAILABLE;
return(status);
}
return (GSS_S_BAD_MECH);
}
| C | krb5 | 1 |
CVE-2013-0910 | https://www.cvedetails.com/cve/CVE-2013-0910/ | CWE-287 | https://github.com/chromium/chromium/commit/ac8bd041b81e46e4e4fcd5021aaa5499703952e6 | ac8bd041b81e46e4e4fcd5021aaa5499703952e6 | Follow-on fixes and naming changes for https://codereview.chromium.org/12086077/
BUG=172573
Review URL: https://codereview.chromium.org/12177018
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180600 0039d316-1c4b-4281-b951-d872f2087c98 | void PluginServiceImpl::RegisterFilePathWatcher(FilePathWatcher* watcher,
const FilePath& path) {
bool result = watcher->Watch(path, false,
base::Bind(&NotifyPluginDirChanged));
DCHECK(result);
}
| void PluginServiceImpl::RegisterFilePathWatcher(FilePathWatcher* watcher,
const FilePath& path) {
bool result = watcher->Watch(path, false,
base::Bind(&NotifyPluginDirChanged));
DCHECK(result);
}
| C | Chrome | 0 |
CVE-2015-5195 | https://www.cvedetails.com/cve/CVE-2015-5195/ | CWE-20 | https://github.com/ntp-project/ntp/commit/52e977d79a0c4ace997e5c74af429844da2f27be | 52e977d79a0c4ace997e5c74af429844da2f27be | [Bug 1773] openssl not detected during ./configure.
[Bug 1774] Segfaults if cryptostats enabled and built without OpenSSL. | record_loop_stats(
double offset, /* offset */
double freq, /* frequency (PPM) */
double jitter, /* jitter */
double wander, /* wander (PPM) */
int spoll
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&loopstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (loopstats.fp != NULL) {
fprintf(loopstats.fp, "%lu %s %.9f %.3f %.9f %.6f %d\n",
day, ulfptoa(&now, 3), offset, freq * 1e6, jitter,
wander * 1e6, spoll);
fflush(loopstats.fp);
}
}
| record_loop_stats(
double offset, /* offset */
double freq, /* frequency (PPM) */
double jitter, /* jitter */
double wander, /* wander (PPM) */
int spoll
)
{
l_fp now;
u_long day;
if (!stats_control)
return;
get_systime(&now);
filegen_setup(&loopstats, now.l_ui);
day = now.l_ui / 86400 + MJD_1900;
now.l_ui %= 86400;
if (loopstats.fp != NULL) {
fprintf(loopstats.fp, "%lu %s %.9f %.3f %.9f %.6f %d\n",
day, ulfptoa(&now, 3), offset, freq * 1e6, jitter,
wander * 1e6, spoll);
fflush(loopstats.fp);
}
}
| C | ntp | 0 |
CVE-2017-18222 | https://www.cvedetails.com/cve/CVE-2017-18222/ | CWE-119 | https://github.com/torvalds/linux/commit/412b65d15a7f8a93794653968308fc100f2aa87c | 412b65d15a7f8a93794653968308fc100f2aa87c | net: hns: fix ethtool_get_strings overflow in hns driver
hns_get_sset_count() returns HNS_NET_STATS_CNT and the data space allocated
is not enough for ethtool_get_strings(), which will cause random memory
corruption.
When SLAB and DEBUG_SLAB are both enabled, memory corruptions like the
the following can be observed without this patch:
[ 43.115200] Slab corruption (Not tainted): Acpi-ParseExt start=ffff801fb0b69030, len=80
[ 43.115206] Redzone: 0x9f911029d006462/0x5f78745f31657070.
[ 43.115208] Last user: [<5f7272655f746b70>](0x5f7272655f746b70)
[ 43.115214] 010: 70 70 65 31 5f 74 78 5f 70 6b 74 00 6b 6b 6b 6b ppe1_tx_pkt.kkkk
[ 43.115217] 030: 70 70 65 31 5f 74 78 5f 70 6b 74 5f 6f 6b 00 6b ppe1_tx_pkt_ok.k
[ 43.115218] Next obj: start=ffff801fb0b69098, len=80
[ 43.115220] Redzone: 0x706d655f6f666966/0x9f911029d74e35b.
[ 43.115229] Last user: [<ffff0000084b11b0>](acpi_os_release_object+0x28/0x38)
[ 43.115231] 000: 74 79 00 6b 6b 6b 6b 6b 70 70 65 31 5f 74 78 5f ty.kkkkkppe1_tx_
[ 43.115232] 010: 70 6b 74 5f 65 72 72 5f 63 73 75 6d 5f 66 61 69 pkt_err_csum_fai
Signed-off-by: Timmy Li <lixiaoping3@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
{
u32 clr_vlue = 0xfffffffful;
u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
}
| static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
{
u32 clr_vlue = 0xfffffffful;
u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
}
| C | linux | 0 |
CVE-2018-6077 | https://www.cvedetails.com/cve/CVE-2018-6077/ | CWE-200 | https://github.com/chromium/chromium/commit/6ed26f014f76f10e76e80636027a2db9dcbe1664 | 6ed26f014f76f10e76e80636027a2db9dcbe1664 | [PE] Distinguish between tainting due to canvas content and filter.
A filter on a canvas can itself lead to origin tainting, for reasons
other than that the canvas contents are tainted. This CL changes
to distinguish these two causes, so that we recompute filters
on content-tainting change.
Bug: 778506
Change-Id: I3cec8ef3b2772f2af78cdd4b290520113092cca6
Reviewed-on: https://chromium-review.googlesource.com/811767
Reviewed-by: Fredrik Söderquist <fs@opera.com>
Commit-Queue: Chris Harrelson <chrishtr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#522274} | void BaseRenderingContext2D::clip(Path2D* dom_path,
const String& winding_rule_string) {
ClipInternal(dom_path->GetPath(), winding_rule_string);
}
| void BaseRenderingContext2D::clip(Path2D* dom_path,
const String& winding_rule_string) {
ClipInternal(dom_path->GetPath(), winding_rule_string);
}
| C | Chrome | 0 |
CVE-2014-6269 | https://www.cvedetails.com/cve/CVE-2014-6269/ | CWE-189 | https://git.haproxy.org/?p=haproxy-1.5.git;a=commitdiff;h=b4d05093bc89f71377230228007e69a1434c1a0c | b4d05093bc89f71377230228007e69a1434c1a0c | null | int http_process_request(struct session *s, struct channel *req, int an_bit)
{
struct http_txn *txn = &s->txn;
struct http_msg *msg = &txn->req;
struct connection *cli_conn = objt_conn(req->prod->end);
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
/* we need more data */
channel_dont_connect(req);
return 0;
}
DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%d analysers=%02x\n",
now_ms, __FUNCTION__,
s,
req,
req->rex, req->wex,
req->flags,
req->buf->i,
req->analysers);
if (s->fe->comp || s->be->comp)
select_compression_request_header(s, req->buf);
/*
* Right now, we know that we have processed the entire headers
* and that unwanted requests have been filtered out. We can do
* whatever we want with the remaining request. Also, now we
* may have separate values for ->fe, ->be.
*/
/*
* If HTTP PROXY is set we simply get remote server address parsing
* incoming request. Note that this requires that a connection is
* allocated on the server side.
*/
if ((s->be->options & PR_O_HTTP_PROXY) && !(s->flags & SN_ADDR_SET)) {
struct connection *conn;
char *path;
/* Note that for now we don't reuse existing proxy connections */
if (unlikely((conn = si_alloc_conn(req->cons, 0)) == NULL)) {
txn->req.msg_state = HTTP_MSG_ERROR;
txn->status = 500;
req->analysers = 0;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_500));
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_RESOURCE;
if (!(s->flags & SN_FINST_MASK))
s->flags |= SN_FINST_R;
return 0;
}
path = http_get_path(txn);
url2sa(req->buf->p + msg->sl.rq.u,
path ? path - (req->buf->p + msg->sl.rq.u) : msg->sl.rq.u_l,
&conn->addr.to, NULL);
/* if the path was found, we have to remove everything between
* req->buf->p + msg->sl.rq.u and path (excluded). If it was not
* found, we need to replace from req->buf->p + msg->sl.rq.u for
* u_l characters by a single "/".
*/
if (path) {
char *cur_ptr = req->buf->p;
char *cur_end = cur_ptr + txn->req.sl.rq.l;
int delta;
delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u, path, NULL, 0);
http_msg_move_end(&txn->req, delta);
cur_end += delta;
if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL)
goto return_bad_req;
}
else {
char *cur_ptr = req->buf->p;
char *cur_end = cur_ptr + txn->req.sl.rq.l;
int delta;
delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u,
req->buf->p + msg->sl.rq.u + msg->sl.rq.u_l, "/", 1);
http_msg_move_end(&txn->req, delta);
cur_end += delta;
if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL)
goto return_bad_req;
}
}
/*
* 7: Now we can work with the cookies.
* Note that doing so might move headers in the request, but
* the fields will stay coherent and the URI will not move.
* This should only be performed in the backend.
*/
if ((s->be->cookie_name || s->be->appsession_name || s->fe->capture_name)
&& !(txn->flags & (TX_CLDENY|TX_CLTARPIT)))
manage_client_side_cookies(s, req);
/*
* 8: the appsession cookie was looked up very early in 1.2,
* so let's do the same now.
*/
/* It needs to look into the URI unless persistence must be ignored */
if ((txn->sessid == NULL) && s->be->appsession_name && !(s->flags & SN_IGNORE_PRST)) {
get_srv_from_appsession(s, req->buf->p + msg->sl.rq.u, msg->sl.rq.u_l);
}
/* add unique-id if "header-unique-id" is specified */
if (!LIST_ISEMPTY(&s->fe->format_unique_id)) {
if ((s->unique_id = pool_alloc2(pool2_uniqueid)) == NULL)
goto return_bad_req;
s->unique_id[0] = '\0';
build_logline(s, s->unique_id, UNIQUEID_LEN, &s->fe->format_unique_id);
}
if (s->fe->header_unique_id && s->unique_id) {
chunk_printf(&trash, "%s: %s", s->fe->header_unique_id, s->unique_id);
if (trash.len < 0)
goto return_bad_req;
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, trash.len) < 0))
goto return_bad_req;
}
/*
* 9: add X-Forwarded-For if either the frontend or the backend
* asks for it.
*/
if ((s->fe->options | s->be->options) & PR_O_FWDFOR) {
struct hdr_ctx ctx = { .idx = 0 };
if (!((s->fe->options | s->be->options) & PR_O_FF_ALWAYS) &&
http_find_header2(s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_name : s->fe->fwdfor_hdr_name,
s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_len : s->fe->fwdfor_hdr_len,
req->buf->p, &txn->hdr_idx, &ctx)) {
/* The header is set to be added only if none is present
* and we found it, so don't do anything.
*/
}
else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) {
/* Add an X-Forwarded-For header unless the source IP is
* in the 'except' network range.
*/
if ((!s->fe->except_mask.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->fe->except_mask.s_addr)
!= s->fe->except_net.s_addr) &&
(!s->be->except_mask.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->be->except_mask.s_addr)
!= s->be->except_net.s_addr)) {
int len;
unsigned char *pn;
pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr;
/* Note: we rely on the backend to get the header name to be used for
* x-forwarded-for, because the header is really meant for the backends.
* However, if the backend did not specify any option, we have to rely
* on the frontend's header name.
*/
if (s->be->fwdfor_hdr_len) {
len = s->be->fwdfor_hdr_len;
memcpy(trash.str, s->be->fwdfor_hdr_name, len);
} else {
len = s->fe->fwdfor_hdr_len;
memcpy(trash.str, s->fe->fwdfor_hdr_name, len);
}
len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0))
goto return_bad_req;
}
}
else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET6) {
/* FIXME: for the sake of completeness, we should also support
* 'except' here, although it is mostly useless in this case.
*/
int len;
char pn[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6,
(const void *)&((struct sockaddr_in6 *)(&cli_conn->addr.from))->sin6_addr,
pn, sizeof(pn));
/* Note: we rely on the backend to get the header name to be used for
* x-forwarded-for, because the header is really meant for the backends.
* However, if the backend did not specify any option, we have to rely
* on the frontend's header name.
*/
if (s->be->fwdfor_hdr_len) {
len = s->be->fwdfor_hdr_len;
memcpy(trash.str, s->be->fwdfor_hdr_name, len);
} else {
len = s->fe->fwdfor_hdr_len;
memcpy(trash.str, s->fe->fwdfor_hdr_name, len);
}
len += snprintf(trash.str + len, trash.size - len, ": %s", pn);
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0))
goto return_bad_req;
}
}
/*
* 10: add X-Original-To if either the frontend or the backend
* asks for it.
*/
if ((s->fe->options | s->be->options) & PR_O_ORGTO) {
/* FIXME: don't know if IPv6 can handle that case too. */
if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) {
/* Add an X-Original-To header unless the destination IP is
* in the 'except' network range.
*/
conn_get_to_addr(cli_conn);
if (cli_conn->addr.to.ss_family == AF_INET &&
((!s->fe->except_mask_to.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->fe->except_mask_to.s_addr)
!= s->fe->except_to.s_addr) &&
(!s->be->except_mask_to.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->be->except_mask_to.s_addr)
!= s->be->except_to.s_addr))) {
int len;
unsigned char *pn;
pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr;
/* Note: we rely on the backend to get the header name to be used for
* x-original-to, because the header is really meant for the backends.
* However, if the backend did not specify any option, we have to rely
* on the frontend's header name.
*/
if (s->be->orgto_hdr_len) {
len = s->be->orgto_hdr_len;
memcpy(trash.str, s->be->orgto_hdr_name, len);
} else {
len = s->fe->orgto_hdr_len;
memcpy(trash.str, s->fe->orgto_hdr_name, len);
}
len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0))
goto return_bad_req;
}
}
}
/* 11: add "Connection: close" or "Connection: keep-alive" if needed and not yet set.
* If an "Upgrade" token is found, the header is left untouched in order not to have
* to deal with some servers bugs : some of them fail an Upgrade if anything but
* "Upgrade" is present in the Connection header.
*/
if (!(txn->flags & TX_HDR_CONN_UPG) &&
(((txn->flags & TX_CON_WANT_MSK) != TX_CON_WANT_TUN) ||
((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL ||
(s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL))) {
unsigned int want_flags = 0;
if (msg->flags & HTTP_MSGF_VER_11) {
if (((txn->flags & TX_CON_WANT_MSK) >= TX_CON_WANT_SCL ||
((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL ||
(s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL)) &&
!((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA))
want_flags |= TX_CON_CLO_SET;
} else {
if (((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL &&
((s->fe->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL &&
(s->be->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL)) ||
((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA))
want_flags |= TX_CON_KAL_SET;
}
if (want_flags != (txn->flags & (TX_CON_CLO_SET|TX_CON_KAL_SET)))
http_change_connection_header(txn, msg, want_flags);
}
/* If we have no server assigned yet and we're balancing on url_param
* with a POST request, we may be interested in checking the body for
* that parameter. This will be done in another analyser.
*/
if (!(s->flags & (SN_ASSIGNED|SN_DIRECT)) &&
s->txn.meth == HTTP_METH_POST && s->be->url_param_name != NULL &&
(msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) {
channel_dont_connect(req);
req->analysers |= AN_REQ_HTTP_BODY;
}
if (msg->flags & HTTP_MSGF_XFER_LEN) {
req->analysers |= AN_REQ_HTTP_XFER_BODY;
#ifdef TCP_QUICKACK
/* We expect some data from the client. Unless we know for sure
* we already have a full request, we have to re-enable quick-ack
* in case we previously disabled it, otherwise we might cause
* the client to delay further data.
*/
if ((s->listener->options & LI_O_NOQUICKACK) &&
cli_conn && conn_ctrl_ready(cli_conn) &&
((msg->flags & HTTP_MSGF_TE_CHNK) ||
(msg->body_len > req->buf->i - txn->req.eoh - 2)))
setsockopt(cli_conn->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
#endif
}
/*************************************************************
* OK, that's finished for the headers. We have done what we *
* could. Let's switch to the DATA state. *
************************************************************/
req->analyse_exp = TICK_ETERNITY;
req->analysers &= ~an_bit;
/* if the server closes the connection, we want to immediately react
* and close the socket to save packets and syscalls.
*/
if (!(req->analysers & AN_REQ_HTTP_XFER_BODY))
req->cons->flags |= SI_FL_NOHALF;
s->logs.tv_request = now;
/* OK let's go on with the BODY now */
return 1;
return_bad_req: /* let's centralize all bad requests */
if (unlikely(msg->msg_state == HTTP_MSG_ERROR) || msg->err_pos >= 0) {
/* we detected a parsing error. We want to archive this request
* in the dedicated proxy area for later troubleshooting.
*/
http_capture_bad_message(&s->fe->invalid_req, s, msg, msg->msg_state, s->fe);
}
txn->req.msg_state = HTTP_MSG_ERROR;
txn->status = 400;
req->analysers = 0;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400));
s->fe->fe_counters.failed_req++;
if (s->listener->counters)
s->listener->counters->failed_req++;
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_PRXCOND;
if (!(s->flags & SN_FINST_MASK))
s->flags |= SN_FINST_R;
return 0;
}
| int http_process_request(struct session *s, struct channel *req, int an_bit)
{
struct http_txn *txn = &s->txn;
struct http_msg *msg = &txn->req;
struct connection *cli_conn = objt_conn(req->prod->end);
if (unlikely(msg->msg_state < HTTP_MSG_BODY)) {
/* we need more data */
channel_dont_connect(req);
return 0;
}
DPRINTF(stderr,"[%u] %s: session=%p b=%p, exp(r,w)=%u,%u bf=%08x bh=%d analysers=%02x\n",
now_ms, __FUNCTION__,
s,
req,
req->rex, req->wex,
req->flags,
req->buf->i,
req->analysers);
if (s->fe->comp || s->be->comp)
select_compression_request_header(s, req->buf);
/*
* Right now, we know that we have processed the entire headers
* and that unwanted requests have been filtered out. We can do
* whatever we want with the remaining request. Also, now we
* may have separate values for ->fe, ->be.
*/
/*
* If HTTP PROXY is set we simply get remote server address parsing
* incoming request. Note that this requires that a connection is
* allocated on the server side.
*/
if ((s->be->options & PR_O_HTTP_PROXY) && !(s->flags & SN_ADDR_SET)) {
struct connection *conn;
char *path;
/* Note that for now we don't reuse existing proxy connections */
if (unlikely((conn = si_alloc_conn(req->cons, 0)) == NULL)) {
txn->req.msg_state = HTTP_MSG_ERROR;
txn->status = 500;
req->analysers = 0;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_500));
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_RESOURCE;
if (!(s->flags & SN_FINST_MASK))
s->flags |= SN_FINST_R;
return 0;
}
path = http_get_path(txn);
url2sa(req->buf->p + msg->sl.rq.u,
path ? path - (req->buf->p + msg->sl.rq.u) : msg->sl.rq.u_l,
&conn->addr.to, NULL);
/* if the path was found, we have to remove everything between
* req->buf->p + msg->sl.rq.u and path (excluded). If it was not
* found, we need to replace from req->buf->p + msg->sl.rq.u for
* u_l characters by a single "/".
*/
if (path) {
char *cur_ptr = req->buf->p;
char *cur_end = cur_ptr + txn->req.sl.rq.l;
int delta;
delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u, path, NULL, 0);
http_msg_move_end(&txn->req, delta);
cur_end += delta;
if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL)
goto return_bad_req;
}
else {
char *cur_ptr = req->buf->p;
char *cur_end = cur_ptr + txn->req.sl.rq.l;
int delta;
delta = buffer_replace2(req->buf, req->buf->p + msg->sl.rq.u,
req->buf->p + msg->sl.rq.u + msg->sl.rq.u_l, "/", 1);
http_msg_move_end(&txn->req, delta);
cur_end += delta;
if (http_parse_reqline(&txn->req, HTTP_MSG_RQMETH, cur_ptr, cur_end + 1, NULL, NULL) == NULL)
goto return_bad_req;
}
}
/*
* 7: Now we can work with the cookies.
* Note that doing so might move headers in the request, but
* the fields will stay coherent and the URI will not move.
* This should only be performed in the backend.
*/
if ((s->be->cookie_name || s->be->appsession_name || s->fe->capture_name)
&& !(txn->flags & (TX_CLDENY|TX_CLTARPIT)))
manage_client_side_cookies(s, req);
/*
* 8: the appsession cookie was looked up very early in 1.2,
* so let's do the same now.
*/
/* It needs to look into the URI unless persistence must be ignored */
if ((txn->sessid == NULL) && s->be->appsession_name && !(s->flags & SN_IGNORE_PRST)) {
get_srv_from_appsession(s, req->buf->p + msg->sl.rq.u, msg->sl.rq.u_l);
}
/* add unique-id if "header-unique-id" is specified */
if (!LIST_ISEMPTY(&s->fe->format_unique_id)) {
if ((s->unique_id = pool_alloc2(pool2_uniqueid)) == NULL)
goto return_bad_req;
s->unique_id[0] = '\0';
build_logline(s, s->unique_id, UNIQUEID_LEN, &s->fe->format_unique_id);
}
if (s->fe->header_unique_id && s->unique_id) {
chunk_printf(&trash, "%s: %s", s->fe->header_unique_id, s->unique_id);
if (trash.len < 0)
goto return_bad_req;
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, trash.len) < 0))
goto return_bad_req;
}
/*
* 9: add X-Forwarded-For if either the frontend or the backend
* asks for it.
*/
if ((s->fe->options | s->be->options) & PR_O_FWDFOR) {
struct hdr_ctx ctx = { .idx = 0 };
if (!((s->fe->options | s->be->options) & PR_O_FF_ALWAYS) &&
http_find_header2(s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_name : s->fe->fwdfor_hdr_name,
s->be->fwdfor_hdr_len ? s->be->fwdfor_hdr_len : s->fe->fwdfor_hdr_len,
req->buf->p, &txn->hdr_idx, &ctx)) {
/* The header is set to be added only if none is present
* and we found it, so don't do anything.
*/
}
else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) {
/* Add an X-Forwarded-For header unless the source IP is
* in the 'except' network range.
*/
if ((!s->fe->except_mask.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->fe->except_mask.s_addr)
!= s->fe->except_net.s_addr) &&
(!s->be->except_mask.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr.s_addr & s->be->except_mask.s_addr)
!= s->be->except_net.s_addr)) {
int len;
unsigned char *pn;
pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.from)->sin_addr;
/* Note: we rely on the backend to get the header name to be used for
* x-forwarded-for, because the header is really meant for the backends.
* However, if the backend did not specify any option, we have to rely
* on the frontend's header name.
*/
if (s->be->fwdfor_hdr_len) {
len = s->be->fwdfor_hdr_len;
memcpy(trash.str, s->be->fwdfor_hdr_name, len);
} else {
len = s->fe->fwdfor_hdr_len;
memcpy(trash.str, s->fe->fwdfor_hdr_name, len);
}
len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0))
goto return_bad_req;
}
}
else if (cli_conn && cli_conn->addr.from.ss_family == AF_INET6) {
/* FIXME: for the sake of completeness, we should also support
* 'except' here, although it is mostly useless in this case.
*/
int len;
char pn[INET6_ADDRSTRLEN];
inet_ntop(AF_INET6,
(const void *)&((struct sockaddr_in6 *)(&cli_conn->addr.from))->sin6_addr,
pn, sizeof(pn));
/* Note: we rely on the backend to get the header name to be used for
* x-forwarded-for, because the header is really meant for the backends.
* However, if the backend did not specify any option, we have to rely
* on the frontend's header name.
*/
if (s->be->fwdfor_hdr_len) {
len = s->be->fwdfor_hdr_len;
memcpy(trash.str, s->be->fwdfor_hdr_name, len);
} else {
len = s->fe->fwdfor_hdr_len;
memcpy(trash.str, s->fe->fwdfor_hdr_name, len);
}
len += snprintf(trash.str + len, trash.size - len, ": %s", pn);
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0))
goto return_bad_req;
}
}
/*
* 10: add X-Original-To if either the frontend or the backend
* asks for it.
*/
if ((s->fe->options | s->be->options) & PR_O_ORGTO) {
/* FIXME: don't know if IPv6 can handle that case too. */
if (cli_conn && cli_conn->addr.from.ss_family == AF_INET) {
/* Add an X-Original-To header unless the destination IP is
* in the 'except' network range.
*/
conn_get_to_addr(cli_conn);
if (cli_conn->addr.to.ss_family == AF_INET &&
((!s->fe->except_mask_to.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->fe->except_mask_to.s_addr)
!= s->fe->except_to.s_addr) &&
(!s->be->except_mask_to.s_addr ||
(((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr.s_addr & s->be->except_mask_to.s_addr)
!= s->be->except_to.s_addr))) {
int len;
unsigned char *pn;
pn = (unsigned char *)&((struct sockaddr_in *)&cli_conn->addr.to)->sin_addr;
/* Note: we rely on the backend to get the header name to be used for
* x-original-to, because the header is really meant for the backends.
* However, if the backend did not specify any option, we have to rely
* on the frontend's header name.
*/
if (s->be->orgto_hdr_len) {
len = s->be->orgto_hdr_len;
memcpy(trash.str, s->be->orgto_hdr_name, len);
} else {
len = s->fe->orgto_hdr_len;
memcpy(trash.str, s->fe->orgto_hdr_name, len);
}
len += snprintf(trash.str + len, trash.size - len, ": %d.%d.%d.%d", pn[0], pn[1], pn[2], pn[3]);
if (unlikely(http_header_add_tail2(&txn->req, &txn->hdr_idx, trash.str, len) < 0))
goto return_bad_req;
}
}
}
/* 11: add "Connection: close" or "Connection: keep-alive" if needed and not yet set.
* If an "Upgrade" token is found, the header is left untouched in order not to have
* to deal with some servers bugs : some of them fail an Upgrade if anything but
* "Upgrade" is present in the Connection header.
*/
if (!(txn->flags & TX_HDR_CONN_UPG) &&
(((txn->flags & TX_CON_WANT_MSK) != TX_CON_WANT_TUN) ||
((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL ||
(s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL))) {
unsigned int want_flags = 0;
if (msg->flags & HTTP_MSGF_VER_11) {
if (((txn->flags & TX_CON_WANT_MSK) >= TX_CON_WANT_SCL ||
((s->fe->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL ||
(s->be->options & PR_O_HTTP_MODE) == PR_O_HTTP_PCL)) &&
!((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA))
want_flags |= TX_CON_CLO_SET;
} else {
if (((txn->flags & TX_CON_WANT_MSK) == TX_CON_WANT_KAL &&
((s->fe->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL &&
(s->be->options & PR_O_HTTP_MODE) != PR_O_HTTP_PCL)) ||
((s->fe->options2|s->be->options2) & PR_O2_FAKE_KA))
want_flags |= TX_CON_KAL_SET;
}
if (want_flags != (txn->flags & (TX_CON_CLO_SET|TX_CON_KAL_SET)))
http_change_connection_header(txn, msg, want_flags);
}
/* If we have no server assigned yet and we're balancing on url_param
* with a POST request, we may be interested in checking the body for
* that parameter. This will be done in another analyser.
*/
if (!(s->flags & (SN_ASSIGNED|SN_DIRECT)) &&
s->txn.meth == HTTP_METH_POST && s->be->url_param_name != NULL &&
(msg->flags & (HTTP_MSGF_CNT_LEN|HTTP_MSGF_TE_CHNK))) {
channel_dont_connect(req);
req->analysers |= AN_REQ_HTTP_BODY;
}
if (msg->flags & HTTP_MSGF_XFER_LEN) {
req->analysers |= AN_REQ_HTTP_XFER_BODY;
#ifdef TCP_QUICKACK
/* We expect some data from the client. Unless we know for sure
* we already have a full request, we have to re-enable quick-ack
* in case we previously disabled it, otherwise we might cause
* the client to delay further data.
*/
if ((s->listener->options & LI_O_NOQUICKACK) &&
cli_conn && conn_ctrl_ready(cli_conn) &&
((msg->flags & HTTP_MSGF_TE_CHNK) ||
(msg->body_len > req->buf->i - txn->req.eoh - 2)))
setsockopt(cli_conn->t.sock.fd, IPPROTO_TCP, TCP_QUICKACK, &one, sizeof(one));
#endif
}
/*************************************************************
* OK, that's finished for the headers. We have done what we *
* could. Let's switch to the DATA state. *
************************************************************/
req->analyse_exp = TICK_ETERNITY;
req->analysers &= ~an_bit;
/* if the server closes the connection, we want to immediately react
* and close the socket to save packets and syscalls.
*/
if (!(req->analysers & AN_REQ_HTTP_XFER_BODY))
req->cons->flags |= SI_FL_NOHALF;
s->logs.tv_request = now;
/* OK let's go on with the BODY now */
return 1;
return_bad_req: /* let's centralize all bad requests */
if (unlikely(msg->msg_state == HTTP_MSG_ERROR) || msg->err_pos >= 0) {
/* we detected a parsing error. We want to archive this request
* in the dedicated proxy area for later troubleshooting.
*/
http_capture_bad_message(&s->fe->invalid_req, s, msg, msg->msg_state, s->fe);
}
txn->req.msg_state = HTTP_MSG_ERROR;
txn->status = 400;
req->analysers = 0;
stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_400));
s->fe->fe_counters.failed_req++;
if (s->listener->counters)
s->listener->counters->failed_req++;
if (!(s->flags & SN_ERR_MASK))
s->flags |= SN_ERR_PRXCOND;
if (!(s->flags & SN_FINST_MASK))
s->flags |= SN_FINST_R;
return 0;
}
| C | haproxy | 0 |
CVE-2013-7421 | https://www.cvedetails.com/cve/CVE-2013-7421/ | CWE-264 | https://github.com/torvalds/linux/commit/5d26a105b5a73e5635eae0629b42fa0a90e07b7b | 5d26a105b5a73e5635eae0629b42fa0a90e07b7b | crypto: prefix module autoloading with "crypto-"
This prefixes all crypto module loading with "crypto-" so we never run
the risk of exposing module auto-loading to userspace via a crypto API,
as demonstrated by Mathias Krause:
https://lkml.org/lkml/2013/3/4/70
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> | void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
{
aes_enc_blk(ctx, dst, src);
}
| void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
{
aes_enc_blk(ctx, dst, src);
}
| C | linux | 0 |
CVE-2016-5219 | https://www.cvedetails.com/cve/CVE-2016-5219/ | CWE-416 | https://github.com/chromium/chromium/commit/a4150b688a754d3d10d2ca385155b1c95d77d6ae | a4150b688a754d3d10d2ca385155b1c95d77d6ae | Add GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
This makes the query of GL_COMPLETION_STATUS_KHR to programs much
cheaper by minimizing the round-trip to the GPU thread.
Bug: 881152, 957001
Change-Id: Iadfa798af29225e752c710ca5c25f50b3dd3101a
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1586630
Commit-Queue: Kenneth Russell <kbr@chromium.org>
Reviewed-by: Kentaro Hara <haraken@chromium.org>
Reviewed-by: Geoff Lang <geofflang@chromium.org>
Reviewed-by: Kenneth Russell <kbr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#657568} | error::Error GLES2DecoderPassthroughImpl::DoDeletePathsCHROMIUM(GLuint path,
GLsizei range) {
NOTIMPLEMENTED();
return error::kNoError;
}
| error::Error GLES2DecoderPassthroughImpl::DoDeletePathsCHROMIUM(GLuint path,
GLsizei range) {
NOTIMPLEMENTED();
return error::kNoError;
}
| C | Chrome | 0 |
CVE-2018-6063 | https://www.cvedetails.com/cve/CVE-2018-6063/ | CWE-787 | https://github.com/chromium/chromium/commit/673ce95d481ea9368c4d4d43ac756ba1d6d9e608 | 673ce95d481ea9368c4d4d43ac756ba1d6d9e608 | Correct mojo::WrapSharedMemoryHandle usage
Fixes some incorrect uses of mojo::WrapSharedMemoryHandle which
were assuming that the call actually has any control over the memory
protection applied to a handle when mapped.
Where fixing usage is infeasible for this CL, TODOs are added to
annotate follow-up work.
Also updates the API and documentation to (hopefully) improve clarity
and avoid similar mistakes from being made in the future.
BUG=792900
Cq-Include-Trybots: master.tryserver.chromium.android:android_optional_gpu_tests_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel
Change-Id: I0578aaa9ca3bfcb01aaf2451315d1ede95458477
Reviewed-on: https://chromium-review.googlesource.com/818282
Reviewed-by: Wei Li <weili@chromium.org>
Reviewed-by: Lei Zhang <thestig@chromium.org>
Reviewed-by: John Abd-El-Malek <jam@chromium.org>
Reviewed-by: Daniel Cheng <dcheng@chromium.org>
Reviewed-by: Sadrul Chowdhury <sadrul@chromium.org>
Reviewed-by: Yuzhu Shen <yzshen@chromium.org>
Reviewed-by: Robert Sesek <rsesek@chromium.org>
Commit-Queue: Ken Rockot <rockot@chromium.org>
Cr-Commit-Position: refs/heads/master@{#530268} | void VideoCaptureImpl::OnBufferCreated(int32_t buffer_id,
mojo::ScopedSharedBufferHandle handle) {
DVLOG(1) << __func__ << " buffer_id: " << buffer_id;
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(handle.is_valid());
base::SharedMemoryHandle memory_handle;
size_t memory_size = 0;
mojo::UnwrappedSharedMemoryHandleProtection protection;
const MojoResult result = mojo::UnwrapSharedMemoryHandle(
std::move(handle), &memory_handle, &memory_size, &protection);
DCHECK_EQ(MOJO_RESULT_OK, result);
DCHECK_GT(memory_size, 0u);
// TODO(https://crbug.com/803136): We should also be able to assert that the
// unwrapped handle was shared for read-only mapping. That condition is not
// currently guaranteed to be met.
std::unique_ptr<base::SharedMemory> shm(
new base::SharedMemory(memory_handle, true /* read_only */));
if (!shm->Map(memory_size)) {
DLOG(ERROR) << "OnBufferCreated: Map failed.";
return;
}
const bool inserted =
client_buffers_
.insert(std::make_pair(buffer_id,
new ClientBuffer(std::move(shm), memory_size)))
.second;
DCHECK(inserted);
}
| void VideoCaptureImpl::OnBufferCreated(int32_t buffer_id,
mojo::ScopedSharedBufferHandle handle) {
DVLOG(1) << __func__ << " buffer_id: " << buffer_id;
DCHECK(io_thread_checker_.CalledOnValidThread());
DCHECK(handle.is_valid());
base::SharedMemoryHandle memory_handle;
size_t memory_size = 0;
bool read_only_flag = false;
const MojoResult result = mojo::UnwrapSharedMemoryHandle(
std::move(handle), &memory_handle, &memory_size, &read_only_flag);
DCHECK_EQ(MOJO_RESULT_OK, result);
DCHECK_GT(memory_size, 0u);
std::unique_ptr<base::SharedMemory> shm(
new base::SharedMemory(memory_handle, true /* read_only */));
if (!shm->Map(memory_size)) {
DLOG(ERROR) << "OnBufferCreated: Map failed.";
return;
}
const bool inserted =
client_buffers_
.insert(std::make_pair(buffer_id,
new ClientBuffer(std::move(shm), memory_size)))
.second;
DCHECK(inserted);
}
| C | Chrome | 1 |
CVE-2011-2843 | https://www.cvedetails.com/cve/CVE-2011-2843/ | CWE-119 | https://github.com/chromium/chromium/commit/d304b5ec1b16766ea2cb552a27dc14df848d6a0e | d304b5ec1b16766ea2cb552a27dc14df848d6a0e | Don't forget the ffmpeg input buffer padding when allocating a codec's
extradata buffer.
BUG=82438
Review URL: http://codereview.chromium.org/7137002
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@88354 0039d316-1c4b-4281-b951-d872f2087c98 | void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
scoped_refptr<VideoFrame> video_frame;
AVPacket packet;
av_init_packet(&packet);
packet.data = const_cast<uint8*>(buffer->GetData());
packet.size = buffer->GetDataSize();
PipelineStatistics statistics;
statistics.video_bytes_decoded = buffer->GetDataSize();
codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds();
av_frame_->reordered_opaque = codec_context_->reordered_opaque;
int frame_decoded = 0;
int result = avcodec_decode_video2(codec_context_,
av_frame_.get(),
&frame_decoded,
&packet);
if (result < 0) {
LOG(ERROR) << "Error decoding a video frame with timestamp: "
<< buffer->GetTimestamp().InMicroseconds() << " us, duration: "
<< buffer->GetDuration().InMicroseconds() << " us, packet size: "
<< buffer->GetDataSize() << " bytes";
event_handler_->OnError();
return;
}
if (frame_decoded == 0) {
if (buffer->IsEndOfStream()) { // We had started flushing.
event_handler_->ConsumeVideoFrame(video_frame, statistics);
output_eos_reached_ = true;
} else {
ReadInput();
}
return;
}
if (!av_frame_->data[VideoFrame::kYPlane] ||
!av_frame_->data[VideoFrame::kUPlane] ||
!av_frame_->data[VideoFrame::kVPlane]) {
event_handler_->OnError();
return;
}
DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check.
AVRational doubled_time_base;
doubled_time_base.num = frame_rate_denominator_;
doubled_time_base.den = frame_rate_numerator_ * 2;
base::TimeDelta timestamp =
base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque);
base::TimeDelta duration =
ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict);
DCHECK(frame_queue_available_.size());
video_frame = frame_queue_available_.front();
frame_queue_available_.pop_front();
size_t height = codec_context_->height;
CopyPlane(VideoFrame::kYPlane, video_frame.get(), av_frame_.get(), height);
CopyPlane(VideoFrame::kUPlane, video_frame.get(), av_frame_.get(), height);
CopyPlane(VideoFrame::kVPlane, video_frame.get(), av_frame_.get(), height);
video_frame->SetTimestamp(timestamp);
video_frame->SetDuration(duration);
pending_output_buffers_--;
event_handler_->ConsumeVideoFrame(video_frame, statistics);
}
| void FFmpegVideoDecodeEngine::DecodeFrame(scoped_refptr<Buffer> buffer) {
scoped_refptr<VideoFrame> video_frame;
AVPacket packet;
av_init_packet(&packet);
packet.data = const_cast<uint8*>(buffer->GetData());
packet.size = buffer->GetDataSize();
PipelineStatistics statistics;
statistics.video_bytes_decoded = buffer->GetDataSize();
codec_context_->reordered_opaque = buffer->GetTimestamp().InMicroseconds();
av_frame_->reordered_opaque = codec_context_->reordered_opaque;
int frame_decoded = 0;
int result = avcodec_decode_video2(codec_context_,
av_frame_.get(),
&frame_decoded,
&packet);
if (result < 0) {
LOG(ERROR) << "Error decoding a video frame with timestamp: "
<< buffer->GetTimestamp().InMicroseconds() << " us, duration: "
<< buffer->GetDuration().InMicroseconds() << " us, packet size: "
<< buffer->GetDataSize() << " bytes";
event_handler_->OnError();
return;
}
if (frame_decoded == 0) {
if (buffer->IsEndOfStream()) { // We had started flushing.
event_handler_->ConsumeVideoFrame(video_frame, statistics);
output_eos_reached_ = true;
} else {
ReadInput();
}
return;
}
if (!av_frame_->data[VideoFrame::kYPlane] ||
!av_frame_->data[VideoFrame::kUPlane] ||
!av_frame_->data[VideoFrame::kVPlane]) {
event_handler_->OnError();
return;
}
DCHECK_LE(av_frame_->repeat_pict, 2); // Sanity check.
AVRational doubled_time_base;
doubled_time_base.num = frame_rate_denominator_;
doubled_time_base.den = frame_rate_numerator_ * 2;
base::TimeDelta timestamp =
base::TimeDelta::FromMicroseconds(av_frame_->reordered_opaque);
base::TimeDelta duration =
ConvertFromTimeBase(doubled_time_base, 2 + av_frame_->repeat_pict);
DCHECK(frame_queue_available_.size());
video_frame = frame_queue_available_.front();
frame_queue_available_.pop_front();
size_t height = codec_context_->height;
CopyPlane(VideoFrame::kYPlane, video_frame.get(), av_frame_.get(), height);
CopyPlane(VideoFrame::kUPlane, video_frame.get(), av_frame_.get(), height);
CopyPlane(VideoFrame::kVPlane, video_frame.get(), av_frame_.get(), height);
video_frame->SetTimestamp(timestamp);
video_frame->SetDuration(duration);
pending_output_buffers_--;
event_handler_->ConsumeVideoFrame(video_frame, statistics);
}
| C | Chrome | 0 |
CVE-2018-6159 | https://www.cvedetails.com/cve/CVE-2018-6159/ | CWE-200 | https://github.com/chromium/chromium/commit/01b42e2bc2aac531b17596729ae4e5c223ae7124 | 01b42e2bc2aac531b17596729ae4e5c223ae7124 | Fix timing allow check algorithm for service workers
This CL uses the OriginalURLViaServiceWorker() in the timing allow check
algorithm if the response WasFetchedViaServiceWorker(). This way, if a
service worker changes a same origin request to become cross origin,
then the timing allow check algorithm will still fail.
resource-timing-worker.js is changed so it avoids an empty Response,
which is an odd case in terms of same origin checks.
Bug: 837275
Change-Id: I7e497a6fcc2ee14244121b915ca5f5cceded417a
Reviewed-on: https://chromium-review.googlesource.com/1038229
Commit-Queue: Nicolás Peña Moreno <npm@chromium.org>
Reviewed-by: Yoav Weiss <yoav@yoav.ws>
Reviewed-by: Timothy Dresser <tdresser@chromium.org>
Cr-Commit-Position: refs/heads/master@{#555476} | void Performance::BuildJSONValue(V8ObjectBuilder& builder) const {
builder.AddNumber("timeOrigin", timeOrigin());
}
| void Performance::BuildJSONValue(V8ObjectBuilder& builder) const {
builder.AddNumber("timeOrigin", timeOrigin());
}
| C | Chrome | 0 |
CVE-2011-2861 | https://www.cvedetails.com/cve/CVE-2011-2861/ | CWE-20 | https://github.com/chromium/chromium/commit/8262245d384be025f13e2a5b3a03b7e5c98374ce | 8262245d384be025f13e2a5b3a03b7e5c98374ce | DevTools: move DevToolsAgent/Client into content.
BUG=84078
TEST=
Review URL: http://codereview.chromium.org/7461019
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@93596 0039d316-1c4b-4281-b951-d872f2087c98 | void RenderView::OnRedo() {
if (!webview())
return;
webview()->focusedFrame()->executeCommand(WebString::fromUTF8("Redo"));
}
| void RenderView::OnRedo() {
if (!webview())
return;
webview()->focusedFrame()->executeCommand(WebString::fromUTF8("Redo"));
}
| C | Chrome | 0 |
CVE-2016-1647 | https://www.cvedetails.com/cve/CVE-2016-1647/ | null | https://github.com/chromium/chromium/commit/e5787005a9004d7be289cc649c6ae4f3051996cd | e5787005a9004d7be289cc649c6ae4f3051996cd | Check that RWHI isn't deleted manually while owned by a scoped_ptr in RVHI
BUG=590284
Review URL: https://codereview.chromium.org/1747183002
Cr-Commit-Position: refs/heads/master@{#378844} | void RenderWidgetHostImpl::AddMouseEventCallback(
const MouseEventCallback& callback) {
mouse_event_callbacks_.push_back(callback);
}
| void RenderWidgetHostImpl::AddMouseEventCallback(
const MouseEventCallback& callback) {
mouse_event_callbacks_.push_back(callback);
}
| C | Chrome | 0 |
CVE-2017-5016 | https://www.cvedetails.com/cve/CVE-2017-5016/ | CWE-1021 | https://github.com/chromium/chromium/commit/a8e17a3031b6ad69c399e5e04dd0084e577097fc | a8e17a3031b6ad69c399e5e04dd0084e577097fc | Form validation: Do not show validation bubble if the page is invisible.
BUG=673163
Review-Url: https://codereview.chromium.org/2572813003
Cr-Commit-Position: refs/heads/master@{#438476} | void HTMLFormControlElement::didChangeForm() {
ListedElement::didChangeForm();
formOwnerSetNeedsValidityCheck();
if (formOwner() && isConnected() && canBeSuccessfulSubmitButton())
formOwner()->invalidateDefaultButtonStyle();
}
| void HTMLFormControlElement::didChangeForm() {
ListedElement::didChangeForm();
formOwnerSetNeedsValidityCheck();
if (formOwner() && isConnected() && canBeSuccessfulSubmitButton())
formOwner()->invalidateDefaultButtonStyle();
}
| C | Chrome | 0 |
CVE-2016-0824 | https://www.cvedetails.com/cve/CVE-2016-0824/ | CWE-254 | https://android.googlesource.com/platform/external/libmpeg2/+/ffab15eb80630dc799eb410855c93525b75233c3 | ffab15eb80630dc799eb410855c93525b75233c3 | Fixed bit stream access to make sure that it is not read beyond the allocated size.
Bug: 25765591
Change-Id: I98c23a3c3f84f6710f29bffe5ed73adcf51d47f6
| IMPEG2D_ERROR_CODES_T impeg2d_pre_pic_dec_proc(dec_state_t *ps_dec)
{
WORD32 u4_get_disp;
pic_buf_t *ps_disp_pic;
IMPEG2D_ERROR_CODES_T e_error = (IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE;
u4_get_disp = 0;
ps_disp_pic = NULL;
/* Field Picture */
if(ps_dec->u2_picture_structure != FRAME_PICTURE)
{
ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 31) >> 5;
if(ps_dec->u2_num_flds_decoded == 0)
{
pic_buf_t *ps_pic_buf;
u4_get_disp = 1;
ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id);
if (NULL == ps_pic_buf)
{
return IMPEG2D_NO_FREE_BUF_ERR;
}
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP);
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF);
ps_pic_buf->u4_ts = ps_dec->u4_inp_ts;
ps_pic_buf->e_pic_type = ps_dec->e_pic_type;
ps_dec->ps_cur_pic = ps_pic_buf;
ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y;
ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u;
ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v;
}
if(ps_dec->u2_picture_structure == TOP_FIELD)
{
ps_dec->u2_fld_parity = TOP;
}
else
{
ps_dec->u2_fld_parity = BOTTOM;
}
ps_dec->u2_field_dct = 0;
ps_dec->u2_read_dct_type = 0;
ps_dec->u2_read_motion_type = 1;
ps_dec->u2_fld_pic = 1;
ps_dec->u2_frm_pic = 0;
ps_dec->ps_func_forw_or_back = gas_impeg2d_func_fld_fw_or_bk;
ps_dec->ps_func_bi_direct = gas_impeg2d_func_fld_bi_direct;
}
/* Frame Picture */
else
{
pic_buf_t *ps_pic_buf;
ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 15) >> 4;
u4_get_disp = 1;
ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id);
if (NULL == ps_pic_buf)
{
return IMPEG2D_NO_FREE_BUF_ERR;
}
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP);
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF);
ps_pic_buf->u4_ts = ps_dec->u4_inp_ts;
ps_pic_buf->e_pic_type = ps_dec->e_pic_type;
ps_dec->ps_cur_pic = ps_pic_buf;
ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y;
ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u;
ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v;
if(ps_dec->u2_frame_pred_frame_dct == 0)
{
ps_dec->u2_read_dct_type = 1;
ps_dec->u2_read_motion_type = 1;
}
else
{
ps_dec->u2_read_dct_type = 0;
ps_dec->u2_read_motion_type = 0;
ps_dec->u2_motion_type = 2;
ps_dec->u2_field_dct = 0;
}
ps_dec->u2_fld_parity = TOP;
ps_dec->u2_fld_pic = 0;
ps_dec->u2_frm_pic = 1;
ps_dec->ps_func_forw_or_back = gas_impeg2d_func_frm_fw_or_bk;
ps_dec->ps_func_bi_direct = gas_impeg2d_func_frm_bi_direct;
}
ps_dec->u2_def_dc_pred[Y_LUMA] = 128 << ps_dec->u2_intra_dc_precision;
ps_dec->u2_def_dc_pred[U_CHROMA] = 128 << ps_dec->u2_intra_dc_precision;
ps_dec->u2_def_dc_pred[V_CHROMA] = 128 << ps_dec->u2_intra_dc_precision;
ps_dec->u2_num_mbs_left = ps_dec->u2_num_horiz_mb * ps_dec->u2_num_vert_mb;
if(u4_get_disp)
{
if(ps_dec->u4_num_frames_decoded > 1)
{
ps_disp_pic = impeg2_disp_mgr_get(&ps_dec->s_disp_mgr, &ps_dec->i4_disp_buf_id);
}
ps_dec->ps_disp_pic = ps_disp_pic;
if(ps_disp_pic)
{
if(1 == ps_dec->u4_share_disp_buf)
{
ps_dec->ps_disp_frm_buf->pv_y_buf = ps_disp_pic->pu1_y;
if(IV_YUV_420P == ps_dec->i4_chromaFormat)
{
ps_dec->ps_disp_frm_buf->pv_u_buf = ps_disp_pic->pu1_u;
ps_dec->ps_disp_frm_buf->pv_v_buf = ps_disp_pic->pu1_v;
}
else
{
UWORD8 *pu1_buf;
pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[1];
ps_dec->ps_disp_frm_buf->pv_u_buf = pu1_buf;
pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[2];
ps_dec->ps_disp_frm_buf->pv_v_buf = pu1_buf;
}
}
}
}
switch(ps_dec->e_pic_type)
{
case I_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_i_slice;
break;
}
case D_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_d_slice;
break;
}
case P_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice;
ps_dec->pu2_mb_type = gau2_impeg2d_p_mb_type;
break;
}
case B_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice;
ps_dec->pu2_mb_type = gau2_impeg2d_b_mb_type;
break;
}
default:
return IMPEG2D_INVALID_PIC_TYPE;
}
/*************************************************************************/
/* Set the reference pictures */
/*************************************************************************/
/* Error resilience: If forward and backward pictures are going to be NULL*/
/* then assign both to the current */
/* if one of them NULL then we will assign the non null to the NULL one */
if(ps_dec->e_pic_type == P_PIC)
{
if (NULL == ps_dec->as_recent_fld[1][0].pu1_y)
{
ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf;
}
if (NULL == ps_dec->as_recent_fld[1][1].pu1_y)
{
impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1],
ps_dec->u2_frame_width);
}
ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[1][0];
ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[1][1];
}
else if(ps_dec->e_pic_type == B_PIC)
{
if((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y))
{
ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf;
impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1],
ps_dec->u2_frame_width);
ps_dec->as_recent_fld[0][0] = ps_dec->s_cur_frm_buf;
ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1];
}
else if ((NULL != ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y))
{
ps_dec->as_recent_fld[0][0] = ps_dec->as_recent_fld[1][0];
ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1];
}
else if ((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL != ps_dec->as_recent_fld[0][0].pu1_y))
{
ps_dec->as_recent_fld[1][0] = ps_dec->as_recent_fld[0][0];
ps_dec->as_recent_fld[1][1] = ps_dec->as_recent_fld[0][1];
}
ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[0][0];
ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[0][1];
ps_dec->as_ref_buf[BACK][TOP] = ps_dec->as_recent_fld[1][0];
ps_dec->as_ref_buf[BACK][BOTTOM] = ps_dec->as_recent_fld[1][1];
}
return e_error;
}
| IMPEG2D_ERROR_CODES_T impeg2d_pre_pic_dec_proc(dec_state_t *ps_dec)
{
WORD32 u4_get_disp;
pic_buf_t *ps_disp_pic;
IMPEG2D_ERROR_CODES_T e_error = (IMPEG2D_ERROR_CODES_T)IVD_ERROR_NONE;
u4_get_disp = 0;
ps_disp_pic = NULL;
/* Field Picture */
if(ps_dec->u2_picture_structure != FRAME_PICTURE)
{
ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 31) >> 5;
if(ps_dec->u2_num_flds_decoded == 0)
{
pic_buf_t *ps_pic_buf;
u4_get_disp = 1;
ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id);
if (NULL == ps_pic_buf)
{
return IMPEG2D_NO_FREE_BUF_ERR;
}
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP);
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF);
ps_pic_buf->u4_ts = ps_dec->u4_inp_ts;
ps_pic_buf->e_pic_type = ps_dec->e_pic_type;
ps_dec->ps_cur_pic = ps_pic_buf;
ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y;
ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u;
ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v;
}
if(ps_dec->u2_picture_structure == TOP_FIELD)
{
ps_dec->u2_fld_parity = TOP;
}
else
{
ps_dec->u2_fld_parity = BOTTOM;
}
ps_dec->u2_field_dct = 0;
ps_dec->u2_read_dct_type = 0;
ps_dec->u2_read_motion_type = 1;
ps_dec->u2_fld_pic = 1;
ps_dec->u2_frm_pic = 0;
ps_dec->ps_func_forw_or_back = gas_impeg2d_func_fld_fw_or_bk;
ps_dec->ps_func_bi_direct = gas_impeg2d_func_fld_bi_direct;
}
/* Frame Picture */
else
{
pic_buf_t *ps_pic_buf;
ps_dec->u2_num_vert_mb = (ps_dec->u2_vertical_size + 15) >> 4;
u4_get_disp = 1;
ps_pic_buf = impeg2_buf_mgr_get_next_free(ps_dec->pv_pic_buf_mg, &ps_dec->i4_cur_buf_id);
if (NULL == ps_pic_buf)
{
return IMPEG2D_NO_FREE_BUF_ERR;
}
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_DISP);
impeg2_buf_mgr_set_status((buf_mgr_t *)ps_dec->pv_pic_buf_mg, ps_dec->i4_cur_buf_id, BUF_MGR_REF);
ps_pic_buf->u4_ts = ps_dec->u4_inp_ts;
ps_pic_buf->e_pic_type = ps_dec->e_pic_type;
ps_dec->ps_cur_pic = ps_pic_buf;
ps_dec->s_cur_frm_buf.pu1_y = ps_pic_buf->pu1_y;
ps_dec->s_cur_frm_buf.pu1_u = ps_pic_buf->pu1_u;
ps_dec->s_cur_frm_buf.pu1_v = ps_pic_buf->pu1_v;
if(ps_dec->u2_frame_pred_frame_dct == 0)
{
ps_dec->u2_read_dct_type = 1;
ps_dec->u2_read_motion_type = 1;
}
else
{
ps_dec->u2_read_dct_type = 0;
ps_dec->u2_read_motion_type = 0;
ps_dec->u2_motion_type = 2;
ps_dec->u2_field_dct = 0;
}
ps_dec->u2_fld_parity = TOP;
ps_dec->u2_fld_pic = 0;
ps_dec->u2_frm_pic = 1;
ps_dec->ps_func_forw_or_back = gas_impeg2d_func_frm_fw_or_bk;
ps_dec->ps_func_bi_direct = gas_impeg2d_func_frm_bi_direct;
}
ps_dec->u2_def_dc_pred[Y_LUMA] = 128 << ps_dec->u2_intra_dc_precision;
ps_dec->u2_def_dc_pred[U_CHROMA] = 128 << ps_dec->u2_intra_dc_precision;
ps_dec->u2_def_dc_pred[V_CHROMA] = 128 << ps_dec->u2_intra_dc_precision;
ps_dec->u2_num_mbs_left = ps_dec->u2_num_horiz_mb * ps_dec->u2_num_vert_mb;
if(u4_get_disp)
{
if(ps_dec->u4_num_frames_decoded > 1)
{
ps_disp_pic = impeg2_disp_mgr_get(&ps_dec->s_disp_mgr, &ps_dec->i4_disp_buf_id);
}
ps_dec->ps_disp_pic = ps_disp_pic;
if(ps_disp_pic)
{
if(1 == ps_dec->u4_share_disp_buf)
{
ps_dec->ps_disp_frm_buf->pv_y_buf = ps_disp_pic->pu1_y;
if(IV_YUV_420P == ps_dec->i4_chromaFormat)
{
ps_dec->ps_disp_frm_buf->pv_u_buf = ps_disp_pic->pu1_u;
ps_dec->ps_disp_frm_buf->pv_v_buf = ps_disp_pic->pu1_v;
}
else
{
UWORD8 *pu1_buf;
pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[1];
ps_dec->ps_disp_frm_buf->pv_u_buf = pu1_buf;
pu1_buf = ps_dec->as_disp_buffers[ps_disp_pic->i4_buf_id].pu1_bufs[2];
ps_dec->ps_disp_frm_buf->pv_v_buf = pu1_buf;
}
}
}
}
switch(ps_dec->e_pic_type)
{
case I_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_i_slice;
break;
}
case D_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_d_slice;
break;
}
case P_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice;
ps_dec->pu2_mb_type = gau2_impeg2d_p_mb_type;
break;
}
case B_PIC:
{
ps_dec->pf_decode_slice = impeg2d_dec_p_b_slice;
ps_dec->pu2_mb_type = gau2_impeg2d_b_mb_type;
break;
}
default:
return IMPEG2D_INVALID_PIC_TYPE;
}
/*************************************************************************/
/* Set the reference pictures */
/*************************************************************************/
/* Error resilience: If forward and backward pictures are going to be NULL*/
/* then assign both to the current */
/* if one of them NULL then we will assign the non null to the NULL one */
if(ps_dec->e_pic_type == P_PIC)
{
if (NULL == ps_dec->as_recent_fld[1][0].pu1_y)
{
ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf;
}
if (NULL == ps_dec->as_recent_fld[1][1].pu1_y)
{
impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1],
ps_dec->u2_frame_width);
}
ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[1][0];
ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[1][1];
}
else if(ps_dec->e_pic_type == B_PIC)
{
if((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y))
{
ps_dec->as_recent_fld[1][0] = ps_dec->s_cur_frm_buf;
impeg2d_get_bottom_field_buf(&ps_dec->s_cur_frm_buf, &ps_dec->as_recent_fld[1][1],
ps_dec->u2_frame_width);
ps_dec->as_recent_fld[0][0] = ps_dec->s_cur_frm_buf;
ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1];
}
else if ((NULL != ps_dec->as_recent_fld[1][0].pu1_y) && (NULL == ps_dec->as_recent_fld[0][0].pu1_y))
{
ps_dec->as_recent_fld[0][0] = ps_dec->as_recent_fld[1][0];
ps_dec->as_recent_fld[0][1] = ps_dec->as_recent_fld[1][1];
}
else if ((NULL == ps_dec->as_recent_fld[1][0].pu1_y) && (NULL != ps_dec->as_recent_fld[0][0].pu1_y))
{
ps_dec->as_recent_fld[1][0] = ps_dec->as_recent_fld[0][0];
ps_dec->as_recent_fld[1][1] = ps_dec->as_recent_fld[0][1];
}
ps_dec->as_ref_buf[FORW][TOP] = ps_dec->as_recent_fld[0][0];
ps_dec->as_ref_buf[FORW][BOTTOM] = ps_dec->as_recent_fld[0][1];
ps_dec->as_ref_buf[BACK][TOP] = ps_dec->as_recent_fld[1][0];
ps_dec->as_ref_buf[BACK][BOTTOM] = ps_dec->as_recent_fld[1][1];
}
return e_error;
}
| C | Android | 0 |
CVE-2013-1929 | https://www.cvedetails.com/cve/CVE-2013-1929/ | CWE-119 | https://github.com/torvalds/linux/commit/715230a44310a8cf66fbfb5a46f9a62a9b2de424 | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | tg3: fix length overflow in VPD firmware parsing
Commit 184b89044fb6e2a74611dafa69b1dce0d98612c6 ("tg3: Use VPD fw version
when present") introduced VPD parsing that contained a potential length
overflow.
Limit the hardware's reported firmware string length (max 255 bytes) to
stay inside the driver's firmware string length (32 bytes). On overflow,
truncate the formatted firmware string instead of potentially overwriting
portions of the tg3 struct.
http://cansecwest.com/slides/2013/PrivateCore%20CSW%202013.pdf
Signed-off-by: Kees Cook <keescook@chromium.org>
Reported-by: Oded Horovitz <oded@privatecore.com>
Reported-by: Brad Spengler <spender@grsecurity.net>
Cc: stable@vger.kernel.org
Cc: Matt Carlson <mcarlson@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net> | static void tg3_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
struct tg3 *tp = netdev_priv(dev);
release_firmware(tp->fw);
tg3_reset_task_cancel(tp);
if (tg3_flag(tp, USE_PHYLIB)) {
tg3_phy_fini(tp);
tg3_mdio_fini(tp);
}
unregister_netdev(dev);
if (tp->aperegs) {
iounmap(tp->aperegs);
tp->aperegs = NULL;
}
if (tp->regs) {
iounmap(tp->regs);
tp->regs = NULL;
}
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
| static void tg3_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
struct tg3 *tp = netdev_priv(dev);
release_firmware(tp->fw);
tg3_reset_task_cancel(tp);
if (tg3_flag(tp, USE_PHYLIB)) {
tg3_phy_fini(tp);
tg3_mdio_fini(tp);
}
unregister_netdev(dev);
if (tp->aperegs) {
iounmap(tp->aperegs);
tp->aperegs = NULL;
}
if (tp->regs) {
iounmap(tp->regs);
tp->regs = NULL;
}
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
| C | linux | 0 |
CVE-2016-10165 | https://www.cvedetails.com/cve/CVE-2016-10165/ | CWE-125 | https://github.com/mm2/Little-CMS/commit/5ca71a7bc18b6897ab21d815d15e218e204581e2 | 5ca71a7bc18b6897ab21d815d15e218e204581e2 | Added an extra check to MLU bounds
Thanks to Ibrahim el-sayed for spotting the bug | void *Type_LUT16_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
cmsUInt8Number InputChannels, OutputChannels, CLUTpoints;
cmsPipeline* NewLUT = NULL;
cmsUInt32Number nTabSize;
cmsFloat64Number Matrix[3*3];
cmsUInt16Number InputEntries, OutputEntries;
*nItems = 0;
if (!_cmsReadUInt8Number(io, &InputChannels)) return NULL;
if (!_cmsReadUInt8Number(io, &OutputChannels)) return NULL;
if (!_cmsReadUInt8Number(io, &CLUTpoints)) return NULL; // 255 maximum
if (!_cmsReadUInt8Number(io, NULL)) return NULL;
if (InputChannels > cmsMAXCHANNELS) goto Error;
if (OutputChannels > cmsMAXCHANNELS) goto Error;
NewLUT = cmsPipelineAlloc(self ->ContextID, InputChannels, OutputChannels);
if (NewLUT == NULL) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[0])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[1])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[2])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[3])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[4])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[5])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[6])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[7])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[8])) goto Error;
if ((InputChannels == 3) && !_cmsMAT3isIdentity((cmsMAT3*) Matrix)) {
if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocMatrix(self ->ContextID, 3, 3, Matrix, NULL)))
goto Error;
}
if (!_cmsReadUInt16Number(io, &InputEntries)) goto Error;
if (!_cmsReadUInt16Number(io, &OutputEntries)) goto Error;
if (InputEntries > 0x7FFF || OutputEntries > 0x7FFF) goto Error;
if (CLUTpoints == 1) goto Error; // Impossible value, 0 for no CLUT and then 2 at least
if (!Read16bitTables(self ->ContextID, io, NewLUT, InputChannels, InputEntries)) goto Error;
nTabSize = uipow(OutputChannels, CLUTpoints, InputChannels);
if (nTabSize == (cmsUInt32Number) -1) goto Error;
if (nTabSize > 0) {
cmsUInt16Number *T;
T = (cmsUInt16Number*) _cmsCalloc(self ->ContextID, nTabSize, sizeof(cmsUInt16Number));
if (T == NULL) goto Error;
if (!_cmsReadUInt16Array(io, nTabSize, T)) {
_cmsFree(self ->ContextID, T);
goto Error;
}
if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocCLut16bit(self ->ContextID, CLUTpoints, InputChannels, OutputChannels, T))) {
_cmsFree(self ->ContextID, T);
goto Error;
}
_cmsFree(self ->ContextID, T);
}
if (!Read16bitTables(self ->ContextID, io, NewLUT, OutputChannels, OutputEntries)) goto Error;
*nItems = 1;
return NewLUT;
Error:
if (NewLUT != NULL) cmsPipelineFree(NewLUT);
return NULL;
cmsUNUSED_PARAMETER(SizeOfTag);
}
| void *Type_LUT16_Read(struct _cms_typehandler_struct* self, cmsIOHANDLER* io, cmsUInt32Number* nItems, cmsUInt32Number SizeOfTag)
{
cmsUInt8Number InputChannels, OutputChannels, CLUTpoints;
cmsPipeline* NewLUT = NULL;
cmsUInt32Number nTabSize;
cmsFloat64Number Matrix[3*3];
cmsUInt16Number InputEntries, OutputEntries;
*nItems = 0;
if (!_cmsReadUInt8Number(io, &InputChannels)) return NULL;
if (!_cmsReadUInt8Number(io, &OutputChannels)) return NULL;
if (!_cmsReadUInt8Number(io, &CLUTpoints)) return NULL; // 255 maximum
if (!_cmsReadUInt8Number(io, NULL)) return NULL;
if (InputChannels > cmsMAXCHANNELS) goto Error;
if (OutputChannels > cmsMAXCHANNELS) goto Error;
NewLUT = cmsPipelineAlloc(self ->ContextID, InputChannels, OutputChannels);
if (NewLUT == NULL) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[0])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[1])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[2])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[3])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[4])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[5])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[6])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[7])) goto Error;
if (!_cmsRead15Fixed16Number(io, &Matrix[8])) goto Error;
if ((InputChannels == 3) && !_cmsMAT3isIdentity((cmsMAT3*) Matrix)) {
if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocMatrix(self ->ContextID, 3, 3, Matrix, NULL)))
goto Error;
}
if (!_cmsReadUInt16Number(io, &InputEntries)) goto Error;
if (!_cmsReadUInt16Number(io, &OutputEntries)) goto Error;
if (InputEntries > 0x7FFF || OutputEntries > 0x7FFF) goto Error;
if (CLUTpoints == 1) goto Error; // Impossible value, 0 for no CLUT and then 2 at least
if (!Read16bitTables(self ->ContextID, io, NewLUT, InputChannels, InputEntries)) goto Error;
nTabSize = uipow(OutputChannels, CLUTpoints, InputChannels);
if (nTabSize == (cmsUInt32Number) -1) goto Error;
if (nTabSize > 0) {
cmsUInt16Number *T;
T = (cmsUInt16Number*) _cmsCalloc(self ->ContextID, nTabSize, sizeof(cmsUInt16Number));
if (T == NULL) goto Error;
if (!_cmsReadUInt16Array(io, nTabSize, T)) {
_cmsFree(self ->ContextID, T);
goto Error;
}
if (!cmsPipelineInsertStage(NewLUT, cmsAT_END, cmsStageAllocCLut16bit(self ->ContextID, CLUTpoints, InputChannels, OutputChannels, T))) {
_cmsFree(self ->ContextID, T);
goto Error;
}
_cmsFree(self ->ContextID, T);
}
if (!Read16bitTables(self ->ContextID, io, NewLUT, OutputChannels, OutputEntries)) goto Error;
*nItems = 1;
return NewLUT;
Error:
if (NewLUT != NULL) cmsPipelineFree(NewLUT);
return NULL;
cmsUNUSED_PARAMETER(SizeOfTag);
}
| C | Little-CMS | 0 |
CVE-2016-3839 | https://www.cvedetails.com/cve/CVE-2016-3839/ | CWE-284 | https://android.googlesource.com/platform/system/bt/+/472271b153c5dc53c28beac55480a8d8434b2d5c | 472271b153c5dc53c28beac55480a8d8434b2d5c | DO NOT MERGE Fix potential DoS caused by delivering signal to BT process
Bug: 28885210
Change-Id: I63866d894bfca47464d6e42e3fb0357c4f94d360
Conflicts:
btif/co/bta_hh_co.c
btif/src/btif_core.c
Merge conflict resolution of ag/1161415 (referencing ag/1164670)
- Directly into mnc-mr2-release
| static int start_audio_datapath(struct a2dp_stream_common *common)
{
INFO("state %d", common->state);
if (common->ctrl_fd == AUDIO_SKT_DISCONNECTED) {
INFO("%s AUDIO_SKT_DISCONNECTED", __func__);
return -1;
}
int oldstate = common->state;
common->state = AUDIO_A2DP_STATE_STARTING;
int a2dp_status = a2dp_command(common, A2DP_CTRL_CMD_START);
if (a2dp_status < 0)
{
ERROR("%s Audiopath start failed (status %d)", __func__, a2dp_status);
common->state = oldstate;
return -1;
}
else if (a2dp_status == A2DP_CTRL_ACK_INCALL_FAILURE)
{
ERROR("%s Audiopath start failed - in call, move to suspended", __func__);
common->state = oldstate;
return -1;
}
/* connect socket if not yet connected */
if (common->audio_fd == AUDIO_SKT_DISCONNECTED)
{
common->audio_fd = skt_connect(A2DP_DATA_PATH, common->buffer_sz);
if (common->audio_fd < 0)
{
common->state = oldstate;
return -1;
}
common->state = AUDIO_A2DP_STATE_STARTED;
}
return 0;
}
| static int start_audio_datapath(struct a2dp_stream_common *common)
{
INFO("state %d", common->state);
if (common->ctrl_fd == AUDIO_SKT_DISCONNECTED) {
INFO("%s AUDIO_SKT_DISCONNECTED", __func__);
return -1;
}
int oldstate = common->state;
common->state = AUDIO_A2DP_STATE_STARTING;
int a2dp_status = a2dp_command(common, A2DP_CTRL_CMD_START);
if (a2dp_status < 0)
{
ERROR("%s Audiopath start failed (status %d)", __func__, a2dp_status);
common->state = oldstate;
return -1;
}
else if (a2dp_status == A2DP_CTRL_ACK_INCALL_FAILURE)
{
ERROR("%s Audiopath start failed - in call, move to suspended", __func__);
common->state = oldstate;
return -1;
}
/* connect socket if not yet connected */
if (common->audio_fd == AUDIO_SKT_DISCONNECTED)
{
common->audio_fd = skt_connect(A2DP_DATA_PATH, common->buffer_sz);
if (common->audio_fd < 0)
{
common->state = oldstate;
return -1;
}
common->state = AUDIO_A2DP_STATE_STARTED;
}
return 0;
}
| C | Android | 0 |
CVE-2017-10971 | https://www.cvedetails.com/cve/CVE-2017-10971/ | CWE-119 | https://cgit.freedesktop.org/xorg/xserver/commit/?id=215f894965df5fb0bb45b107d84524e700d2073c | 215f894965df5fb0bb45b107d84524e700d2073c | null | UpdateTouchesForGrab(DeviceIntPtr mouse)
{
int i;
if (!mouse->touch || mouse->deviceGrab.fromPassiveGrab)
return;
for (i = 0; i < mouse->touch->num_touches; i++) {
TouchPointInfoPtr ti = mouse->touch->touches + i;
TouchListener *listener = &ti->listeners[0];
GrabPtr grab = mouse->deviceGrab.grab;
if (ti->active &&
CLIENT_BITS(listener->listener) == grab->resource) {
listener->listener = grab->resource;
listener->level = grab->grabtype;
listener->state = LISTENER_IS_OWNER;
listener->window = grab->window;
if (grab->grabtype == CORE || grab->grabtype == XI ||
!xi2mask_isset(grab->xi2mask, mouse, XI_TouchBegin))
listener->type = LISTENER_POINTER_GRAB;
else
listener->type = LISTENER_GRAB;
if (listener->grab)
FreeGrab(listener->grab);
listener->grab = AllocGrab(grab);
}
}
}
| UpdateTouchesForGrab(DeviceIntPtr mouse)
{
int i;
if (!mouse->touch || mouse->deviceGrab.fromPassiveGrab)
return;
for (i = 0; i < mouse->touch->num_touches; i++) {
TouchPointInfoPtr ti = mouse->touch->touches + i;
TouchListener *listener = &ti->listeners[0];
GrabPtr grab = mouse->deviceGrab.grab;
if (ti->active &&
CLIENT_BITS(listener->listener) == grab->resource) {
listener->listener = grab->resource;
listener->level = grab->grabtype;
listener->state = LISTENER_IS_OWNER;
listener->window = grab->window;
if (grab->grabtype == CORE || grab->grabtype == XI ||
!xi2mask_isset(grab->xi2mask, mouse, XI_TouchBegin))
listener->type = LISTENER_POINTER_GRAB;
else
listener->type = LISTENER_GRAB;
if (listener->grab)
FreeGrab(listener->grab);
listener->grab = AllocGrab(grab);
}
}
}
| C | xserver | 0 |
CVE-2013-0910 | https://www.cvedetails.com/cve/CVE-2013-0910/ | CWE-287 | https://github.com/chromium/chromium/commit/ac8bd041b81e46e4e4fcd5021aaa5499703952e6 | ac8bd041b81e46e4e4fcd5021aaa5499703952e6 | Follow-on fixes and naming changes for https://codereview.chromium.org/12086077/
BUG=172573
Review URL: https://codereview.chromium.org/12177018
git-svn-id: svn://svn.chromium.org/chrome/trunk/src@180600 0039d316-1c4b-4281-b951-d872f2087c98 | virtual ResourceContext* GetResourceContext() {
return context_;
}
| virtual ResourceContext* GetResourceContext() {
return context_;
}
| C | Chrome | 0 |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 87