text
stringlengths
478
483k
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xsltCompilePatternInternal(const xmlChar *pattern, xmlDocPtr doc, xmlNodePtr node, xsltStylesheetPtr style, xsltTransformContextPtr runtime, int novar) { xsltParserContextPtr ctxt = NULL; xsltCompMatchPtr element, first = NULL, previous = NULL; int current, start, end, level, j; if (pattern == NULL) { xsltTransformError(NULL, NULL, node, "xsltCompilePattern : NULL pattern\n"); return(NULL); } ctxt = xsltNewParserContext(style, runtime); if (ctxt == NULL) return(NULL); ctxt->doc = doc; ctxt->elem = node; current = end = 0; while (pattern[current] != 0) { start = current; while (IS_BLANK_CH(pattern[current])) current++; end = current; level = 0; while ((pattern[end] != 0) && ((pattern[end] != '|') || (level != 0))) { if (pattern[end] == '[') level++; else if (pattern[end] == ']') level--; else if (pattern[end] == '\'') { end++; while ((pattern[end] != 0) && (pattern[end] != '\'')) end++; } else if (pattern[end] == '"') { end++; while ((pattern[end] != 0) && (pattern[end] != '"')) end++; } end++; } if (current == end) { xsltTransformError(NULL, NULL, node, "xsltCompilePattern : NULL pattern\n"); goto error; } element = xsltNewCompMatch(); if (element == NULL) { goto error; } if (first == NULL) first = element; else if (previous != NULL) previous->next = element; previous = element; ctxt->comp = element; ctxt->base = xmlStrndup(&pattern[start], end - start); if (ctxt->base == NULL) goto error; ctxt->cur = &(ctxt->base)[current - start]; element->pattern = ctxt->base; element->nsList = xmlGetNsList(doc, node); j = 0; if (element->nsList != NULL) { while (element->nsList[j] != NULL) j++; } element->nsNr = j; #ifdef WITH_XSLT_DEBUG_PATTERN xsltGenericDebug(xsltGenericDebugContext, "xsltCompilePattern : parsing '%s'\n", element->pattern); #endif /* Preset default priority to be zero. This may be changed by xsltCompileLocationPathPattern. */ element->priority = 0; xsltCompileLocationPathPattern(ctxt, novar); if (ctxt->error) { xsltTransformError(NULL, style, node, "xsltCompilePattern : failed to compile '%s'\n", element->pattern); if (style != NULL) style->errors++; goto error; } /* * Reverse for faster interpretation. */ xsltReverseCompMatch(ctxt, element); /* * Set-up the priority */ if (element->priority == 0) { /* if not yet determined */ if (((element->steps[0].op == XSLT_OP_ELEM) || (element->steps[0].op == XSLT_OP_ATTR) || (element->steps[0].op == XSLT_OP_PI)) && (element->steps[0].value != NULL) && (element->steps[1].op == XSLT_OP_END)) { ; /* previously preset */ } else if ((element->steps[0].op == XSLT_OP_ATTR) && (element->steps[0].value2 != NULL) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.25; } else if ((element->steps[0].op == XSLT_OP_NS) && (element->steps[0].value != NULL) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.25; } else if ((element->steps[0].op == XSLT_OP_ATTR) && (element->steps[0].value == NULL) && (element->steps[0].value2 == NULL) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.5; } else if (((element->steps[0].op == XSLT_OP_PI) || (element->steps[0].op == XSLT_OP_TEXT) || (element->steps[0].op == XSLT_OP_ALL) || (element->steps[0].op == XSLT_OP_NODE) || (element->steps[0].op == XSLT_OP_COMMENT)) && (element->steps[1].op == XSLT_OP_END)) { element->priority = -0.5; } else { element->priority = 0.5; } } #ifdef WITH_XSLT_DEBUG_PATTERN xsltGenericDebug(xsltGenericDebugContext, "xsltCompilePattern : parsed %s, default priority %f\n", element->pattern, element->priority); #endif if (pattern[end] == '|') end++; current = end; } if (end == 0) { xsltTransformError(NULL, style, node, "xsltCompilePattern : NULL pattern\n"); if (style != NULL) style->errors++; goto error; } xsltFreeParserContext(ctxt); return(first); error: if (ctxt != NULL) xsltFreeParserContext(ctxt); if (first != NULL) xsltFreeCompMatchList(first); return(NULL); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125'], 'message': 'Fix some case of pattern parsing errors We could accidentally hit an off by one string array access due to improper loop exit when parsing patterns'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: Database::Database() { QSqlDatabase db = QSqlDatabase::addDatabase(QLatin1String("QSQLITE")); QSettings qs; QStringList datapaths; int i; datapaths << g.qdBasePath.absolutePath(); datapaths << QDesktopServices::storageLocation(QDesktopServices::DataLocation); #if defined(Q_OS_UNIX) && ! defined(Q_OS_MAC) datapaths << QDir::homePath() + QLatin1String("/.config/Mumble"); #endif datapaths << QDir::homePath(); datapaths << QDir::currentPath(); datapaths << qApp->applicationDirPath(); datapaths << qs.value(QLatin1String("InstPath")).toString(); bool found = false; for (i = 0; (i < datapaths.size()) && ! found; i++) { if (!datapaths[i].isEmpty()) { QFile f(datapaths[i] + QLatin1String("/mumble.sqlite")); if (f.exists()) { db.setDatabaseName(f.fileName()); found = db.open(); } QFile f2(datapaths[i] + QLatin1String("/.mumble.sqlite")); if (f2.exists()) { db.setDatabaseName(f2.fileName()); found = db.open(); } } } if (! found) { for (i = 0; (i < datapaths.size()) && ! found; i++) { if (!datapaths[i].isEmpty()) { QDir::root().mkpath(datapaths[i]); #ifdef Q_OS_WIN QFile f(datapaths[i] + QLatin1String("/mumble.sqlite")); #else QFile f(datapaths[i] + QLatin1String("/.mumble.sqlite")); #endif db.setDatabaseName(f.fileName()); found = db.open(); } } } if (! found) { QMessageBox::critical(NULL, QLatin1String("Mumble"), tr("Mumble failed to initialize a database in any\nof the possible locations."), QMessageBox::Ok | QMessageBox::Default, QMessageBox::NoButton); qFatal("Database: Failed initialization"); } QFileInfo fi(db.databaseName()); if (! fi.isWritable()) { QMessageBox::critical(NULL, QLatin1String("Mumble"), tr("The database '%1' is read-only. Mumble cannot store server settings (i.e. SSL certificates) until you fix this problem.").arg(fi.filePath()), QMessageBox::Ok | QMessageBox::Default, QMessageBox::NoButton); qWarning("Database: Database is read-only"); } QSqlQuery query; query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `servers` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` TEXT, `hostname` TEXT, `port` INTEGER DEFAULT " MUMTEXT(DEFAULT_MUMBLE_PORT) ", `username` TEXT, `password` TEXT)")); query.exec(QLatin1String("ALTER TABLE `servers` ADD COLUMN `url` TEXT")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `comments` (`who` TEXT, `comment` BLOB, `seen` DATE)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `comments_comment` ON `comments`(`who`, `comment`)")); query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `comments_seen` ON `comments`(`seen`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `blobs` (`hash` TEXT, `data` BLOB, `seen` DATE)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `blobs_hash` ON `blobs`(`hash`)")); query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `blobs_seen` ON `blobs`(`seen`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `tokens` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB, `token` TEXT)")); query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `tokens_host_port` ON `tokens`(`digest`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `shortcut` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB, `shortcut` BLOB, `target` BLOB, `suppress` INTEGER)")); query.exec(QLatin1String("CREATE INDEX IF NOT EXISTS `shortcut_host_port` ON `shortcut`(`digest`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `udp` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `digest` BLOB)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `udp_host_port` ON `udp`(`digest`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `cert` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hostname` TEXT, `port` INTEGER, `digest` TEXT)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `cert_host_port` ON `cert`(`hostname`,`port`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `friends` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `name` TEXT, `hash` TEXT)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `friends_name` ON `friends`(`name`)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `friends_hash` ON `friends`(`hash`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `muted` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hash` TEXT)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `muted_hash` ON `muted`(`hash`)")); query.exec(QLatin1String("CREATE TABLE IF NOT EXISTS `pingcache` (`id` INTEGER PRIMARY KEY AUTOINCREMENT, `hostname` TEXT, `port` INTEGER, `ping` INTEGER)")); query.exec(QLatin1String("CREATE UNIQUE INDEX IF NOT EXISTS `pingcache_host_port` ON `pingcache`(`hostname`,`port`)")); query.exec(QLatin1String("DELETE FROM `comments` WHERE `seen` < datetime('now', '-1 years')")); query.exec(QLatin1String("DELETE FROM `blobs` WHERE `seen` < datetime('now', '-1 months')")); query.exec(QLatin1String("VACUUM")); query.exec(QLatin1String("PRAGMA synchronous = OFF")); query.exec(QLatin1String("PRAGMA journal_mode = TRUNCATE")); query.exec(QLatin1String("SELECT sqlite_version()")); while (query.next()) qWarning() << "Database SQLite:" << query.value(0).toString(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-310'], 'message': 'Explicitly remove file permissions for settings and DB'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { #ifdef CONFIG_X86_MCE /* notify userspace of pending MCEs */ if (thread_info_flags & _TIF_MCE_NOTIFY) mce_notify_process(); #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); if (current->replacement_session_keyring) key_replace_session_keyring(); } if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) fire_user_return_notifiers(); #ifdef CONFIG_X86_32 clear_thread_flag(TIF_IRET); #endif /* CONFIG_X86_32 */ } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-400'], 'message': 'signal/x86: Delay calling signals in atomic On x86_64 we must disable preemption before we enable interrupts for stack faults, int3 and debugging, because the current task is using a per CPU debug stack defined by the IST. If we schedule out, another task can come in and use the same stack and cause the stack to be corrupted and crash the kernel on return. When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and one of these is the spin lock used in signal handling. Some of the debug code (int3) causes do_trap() to send a signal. This function calls a spin lock that has been converted to a mutex and has the possibility to sleep. If this happens, the above issues with the corrupted stack is possible. Instead of calling the signal right away, for PREEMPT_RT and x86_64, the signal information is stored on the stacks task_struct and TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume code will send the signal when preemption is enabled. [ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ] Cc: stable-rt@vger.kernel.org Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; int user_icebp = 0; unsigned long dr6; int si_code; get_debugreg(dr6, 6); /* Filter out all the reserved bits which are preset to 1 */ dr6 &= ~DR6_RESERVED; /* * If dr6 has no reason to give us about the origin of this trap, * then it's very likely the result of an icebp/int01 trap. * User wants a sigtrap for that. */ if (!dr6 && user_mode(regs)) user_icebp = 1; /* Catch kmemcheck conditions first of all! */ if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) return; /* DR6 may or may not be cleared by the CPU */ set_debugreg(0, 6); /* * The processor cleared BTF, so don't mark that we need it set. */ clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, SIGTRAP) == NOTIFY_STOP) return; /* It's safe to allow irq's after DR6 has been saved */ preempt_conditional_sti(regs); if (regs->flags & X86_VM_MASK) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); preempt_conditional_cli(regs); return; } /* * Single-stepping through system calls: ignore any exceptions in * kernel space, but re-enable TF when returning to user mode. * * We already checked v86 mode above, so we can check for kernel mode * by just checking the CPL of CS. */ if ((dr6 & DR_STEP) && !user_mode(regs)) { tsk->thread.debugreg6 &= ~DR_STEP; set_tsk_thread_flag(tsk, TIF_SINGLESTEP); regs->flags &= ~X86_EFLAGS_TF; } si_code = get_si_code(tsk->thread.debugreg6); if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) send_sigtrap(tsk, regs, error_code, si_code); preempt_conditional_cli(regs); return; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-400'], 'message': 'x86: Do not disable preemption in int3 on 32bit Preemption must be disabled before enabling interrupts in do_trap on x86_64 because the stack in use for int3 and debug is a per CPU stack set by th IST. But 32bit does not have an IST and the stack still belongs to the current task and there is no problem in scheduling out the task. Keep preemption enabled on X86_32 when enabling interrupts for do_trap(). The name of the function is changed from preempt_conditional_sti/cli() to conditional_sti/cli_ist(), to annotate that this function is used when the stack is on the IST. Cc: stable-rt@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) return; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ #ifdef CONFIG_KPROBES if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) return; #else if (notify_die(DIE_TRAP, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) return; #endif preempt_conditional_sti(regs); do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); preempt_conditional_cli(regs); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-400'], 'message': 'x86: Do not disable preemption in int3 on 32bit Preemption must be disabled before enabling interrupts in do_trap on x86_64 because the stack in use for int3 and debug is a per CPU stack set by th IST. But 32bit does not have an IST and the stack still belongs to the current task and there is no problem in scheduling out the task. Keep preemption enabled on X86_32 when enabling interrupts for do_trap(). The name of the function is changed from preempt_conditional_sti/cli() to conditional_sti/cli_ist(), to annotate that this function is used when the stack is on the IST. Cc: stable-rt@vger.kernel.org Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void Ogg::XiphComment::parse(const ByteVector &data) { // The first thing in the comment data is the vendor ID length, followed by a // UTF8 string with the vendor ID. int pos = 0; int vendorLength = data.mid(0, 4).toUInt(false); pos += 4; d->vendorID = String(data.mid(pos, vendorLength), String::UTF8); pos += vendorLength; // Next the number of fields in the comment vector. int commentFields = data.mid(pos, 4).toUInt(false); pos += 4; for(int i = 0; i < commentFields; i++) { // Each comment field is in the format "KEY=value" in a UTF8 string and has // 4 bytes before the text starts that gives the length. int commentLength = data.mid(pos, 4).toUInt(false); pos += 4; String comment = String(data.mid(pos, commentLength), String::UTF8); pos += commentLength; int commentSeparatorPosition = comment.find("="); String key = comment.substr(0, commentSeparatorPosition); String value = comment.substr(commentSeparatorPosition + 1); addField(key, value, false); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Be more careful when parsing Vorbis Comments'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: log2vis_unicode (PyObject * unicode, FriBidiParType base_direction, int clean, int reordernsm) { PyObject *logical = NULL; /* input string encoded in utf-8 */ PyObject *visual = NULL; /* output string encoded in utf-8 */ PyObject *result = NULL; /* unicode output string */ int length = PyUnicode_GET_SIZE (unicode); logical = PyUnicode_AsUTF8String (unicode); if (logical == NULL) goto cleanup; visual = log2vis_utf8 (logical, length, base_direction, clean, reordernsm); if (visual == NULL) goto cleanup; result = PyUnicode_DecodeUTF8 (PyString_AS_STRING (visual), PyString_GET_SIZE (visual), "strict"); cleanup: Py_XDECREF (logical); Py_XDECREF (visual); return result; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'refactor pyfribidi.c module pyfribidi.c is now compiled as _pyfribidi. This module only handles unicode internally and doesn't use the fribidi_utf8_to_unicode function (which can't handle 4 byte utf-8 sequences). This fixes the buffer overflow in issue #2. The code is now also much simpler: pyfribidi.c is down from 280 to 130 lines of code. We now ship a pure python pyfribidi that handles the case when non-unicode strings are passed in. We now also adapt the size of the output string if clean=True is passed.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: initpyfribidi (void) { PyObject *module; /* XXX What should be done if we fail here? */ module = Py_InitModule3 ("pyfribidi", PyfribidiMethods, _pyfribidi__doc__); PyModule_AddIntConstant (module, "RTL", (long) FRIBIDI_TYPE_RTL); PyModule_AddIntConstant (module, "LTR", (long) FRIBIDI_TYPE_LTR); PyModule_AddIntConstant (module, "ON", (long) FRIBIDI_TYPE_ON); PyModule_AddStringConstant (module, "__author__", "Yaacov Zamir and Nir Soffer"); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'refactor pyfribidi.c module pyfribidi.c is now compiled as _pyfribidi. This module only handles unicode internally and doesn't use the fribidi_utf8_to_unicode function (which can't handle 4 byte utf-8 sequences). This fixes the buffer overflow in issue #2. The code is now also much simpler: pyfribidi.c is down from 280 to 130 lines of code. We now ship a pure python pyfribidi that handles the case when non-unicode strings are passed in. We now also adapt the size of the output string if clean=True is passed.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bgp_capability_as4 (struct peer *peer, struct capability_header *hdr) { as_t as4 = stream_getl (BGP_INPUT(peer)); if (BGP_DEBUG (as4, AS4)) zlog_debug ("%s [AS4] about to set cap PEER_CAP_AS4_RCV, got as4 %u", peer->host, as4); SET_FLAG (peer->cap, PEER_CAP_AS4_RCV); return as4; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'bgpd: Open option parse errors don't NOTIFY, resulting in abort & DoS * bgp_packet.c: (bgp_open_receive) Errors from bgp_open_option_parse are detected, and the code will stop processing the OPEN and return. However it does so without calling bgp_notify_send to send a NOTIFY - which means the peer FSM doesn't get stopped, and bgp_read will be called again later. Because it returns, it doesn't go through the code near the end of the function that removes the current message from the peer input streaam. Thus the next call to bgp_read will try to parse a half-parsed stream as if it were a new BGP message, leading to an assert later in the code when it tries to read stuff that isn't there. Add the required call to bgp_notify_send before returning. * bgp_open.c: (bgp_capability_as4) Be a bit stricter, check the length field corresponds to the only value it can be, which is the amount we're going to read off the stream. And make sure the capability flag gets set, so callers can know this capability was read, regardless. (peek_for_as4_capability) Let bgp_capability_as4 do the length check.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bgp_open_receive (struct peer *peer, bgp_size_t size) { int ret; u_char version; u_char optlen; u_int16_t holdtime; u_int16_t send_holdtime; as_t remote_as; as_t as4 = 0; struct peer *realpeer; struct in_addr remote_id; int capability; u_int8_t notify_data_remote_as[2]; u_int8_t notify_data_remote_id[4]; realpeer = NULL; /* Parse open packet. */ version = stream_getc (peer->ibuf); memcpy (notify_data_remote_as, stream_pnt (peer->ibuf), 2); remote_as = stream_getw (peer->ibuf); holdtime = stream_getw (peer->ibuf); memcpy (notify_data_remote_id, stream_pnt (peer->ibuf), 4); remote_id.s_addr = stream_get_ipv4 (peer->ibuf); /* Receive OPEN message log */ if (BGP_DEBUG (normal, NORMAL)) zlog_debug ("%s rcv OPEN, version %d, remote-as (in open) %u," " holdtime %d, id %s", peer->host, version, remote_as, holdtime, inet_ntoa (remote_id)); /* BEGIN to read the capability here, but dont do it yet */ capability = 0; optlen = stream_getc (peer->ibuf); if (optlen != 0) { /* We need the as4 capability value *right now* because * if it is there, we have not got the remote_as yet, and without * that we do not know which peer is connecting to us now. */ as4 = peek_for_as4_capability (peer, optlen); } /* Just in case we have a silly peer who sends AS4 capability set to 0 */ if (CHECK_FLAG (peer->cap, PEER_CAP_AS4_RCV) && !as4) { zlog_err ("%s bad OPEN, got AS4 capability, but AS4 set to 0", peer->host); bgp_notify_send (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_BAD_PEER_AS); return -1; } if (remote_as == BGP_AS_TRANS) { /* Take the AS4 from the capability. We must have received the * capability now! Otherwise we have a asn16 peer who uses * BGP_AS_TRANS, for some unknown reason. */ if (as4 == BGP_AS_TRANS) { zlog_err ("%s [AS4] NEW speaker using AS_TRANS for AS4, not allowed", peer->host); bgp_notify_send (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_BAD_PEER_AS); return -1; } if (!as4 && BGP_DEBUG (as4, AS4)) zlog_debug ("%s [AS4] OPEN remote_as is AS_TRANS, but no AS4." " Odd, but proceeding.", peer->host); else if (as4 < BGP_AS_MAX && BGP_DEBUG (as4, AS4)) zlog_debug ("%s [AS4] OPEN remote_as is AS_TRANS, but AS4 (%u) fits " "in 2-bytes, very odd peer.", peer->host, as4); if (as4) remote_as = as4; } else { /* We may have a partner with AS4 who has an asno < BGP_AS_MAX */ /* If we have got the capability, peer->as4cap must match remote_as */ if (CHECK_FLAG (peer->cap, PEER_CAP_AS4_RCV) && as4 != remote_as) { /* raise error, log this, close session */ zlog_err ("%s bad OPEN, got AS4 capability, but remote_as %u" " mismatch with 16bit 'myasn' %u in open", peer->host, as4, remote_as); bgp_notify_send (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_BAD_PEER_AS); return -1; } } /* Lookup peer from Open packet. */ if (CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER)) { int as = 0; realpeer = peer_lookup_with_open (&peer->su, remote_as, &remote_id, &as); if (! realpeer) { /* Peer's source IP address is check in bgp_accept(), so this must be AS number mismatch or remote-id configuration mismatch. */ if (as) { if (BGP_DEBUG (normal, NORMAL)) zlog_debug ("%s bad OPEN, wrong router identifier %s", peer->host, inet_ntoa (remote_id)); bgp_notify_send_with_data (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_BAD_BGP_IDENT, notify_data_remote_id, 4); } else { if (BGP_DEBUG (normal, NORMAL)) zlog_debug ("%s bad OPEN, remote AS is %u, expected %u", peer->host, remote_as, peer->as); bgp_notify_send_with_data (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_BAD_PEER_AS, notify_data_remote_as, 2); } return -1; } } /* When collision is detected and this peer is closed. Retrun immidiately. */ ret = bgp_collision_detect (peer, remote_id); if (ret < 0) return ret; /* Hack part. */ if (CHECK_FLAG (peer->sflags, PEER_STATUS_ACCEPT_PEER)) { if (realpeer->status == Established && CHECK_FLAG (realpeer->sflags, PEER_STATUS_NSF_MODE)) { realpeer->last_reset = PEER_DOWN_NSF_CLOSE_SESSION; SET_FLAG (realpeer->sflags, PEER_STATUS_NSF_WAIT); } else if (ret == 0 && realpeer->status != Active && realpeer->status != OpenSent && realpeer->status != OpenConfirm && realpeer->status != Connect) { /* XXX: This is an awful problem.. * * According to the RFC we should just let this connection (of the * accepted 'peer') continue on to Established if the other * connection (the 'realpeer' one) is in state Connect, and deal * with the more larval FSM as/when it gets far enough to receive * an Open. We don't do that though, we instead close the (more * developed) accepted connection. * * This means there's a race, which if hit, can loop: * * FSM for A FSM for B * realpeer accept-peer realpeer accept-peer * * Connect Connect * Active * OpenSent OpenSent * <arrive here, * Notify, delete> * Idle Active * OpenSent OpenSent * <arrive here, * Notify, delete> * Idle * <wait> <wait> * Connect Connect * * * If both sides are Quagga, they're almost certain to wait for * the same amount of time of course (which doesn't preclude other * implementations also waiting for same time). The race is * exacerbated by high-latency (in bgpd and/or the network). * * The reason we do this is because our FSM is tied to our peer * structure, which carries our configuration information, etc. * I.e. we can't let the accepted-peer FSM continue on as it is, * cause it's not associated with any actual peer configuration - * it's just a dummy. * * It's possible we could hack-fix this by just bgp_stop'ing the * realpeer and continueing on with the 'transfer FSM' below. * Ideally, we need to seperate FSMs from struct peer. * * Setting one side to passive avoids the race, as a workaround. */ if (BGP_DEBUG (events, EVENTS)) zlog_debug ("%s peer status is %s close connection", realpeer->host, LOOKUP (bgp_status_msg, realpeer->status)); bgp_notify_send (peer, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONNECT_REJECT); return -1; } if (BGP_DEBUG (events, EVENTS)) zlog_debug ("%s [Event] Transfer accept BGP peer to real (state %s)", peer->host, LOOKUP (bgp_status_msg, realpeer->status)); bgp_stop (realpeer); /* Transfer file descriptor. */ realpeer->fd = peer->fd; peer->fd = -1; /* Transfer input buffer. */ stream_free (realpeer->ibuf); realpeer->ibuf = peer->ibuf; realpeer->packet_size = peer->packet_size; peer->ibuf = NULL; /* Transfer status. */ realpeer->status = peer->status; bgp_stop (peer); /* peer pointer change. Open packet send to neighbor. */ peer = realpeer; bgp_open_send (peer); if (peer->fd < 0) { zlog_err ("bgp_open_receive peer's fd is negative value %d", peer->fd); return -1; } BGP_READ_ON (peer->t_read, bgp_read, peer->fd); } /* remote router-id check. */ if (remote_id.s_addr == 0 || IPV4_CLASS_DE (ntohl (remote_id.s_addr)) || ntohl (peer->local_id.s_addr) == ntohl (remote_id.s_addr)) { if (BGP_DEBUG (normal, NORMAL)) zlog_debug ("%s bad OPEN, wrong router identifier %s", peer->host, inet_ntoa (remote_id)); bgp_notify_send_with_data (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_BAD_BGP_IDENT, notify_data_remote_id, 4); return -1; } /* Set remote router-id */ peer->remote_id = remote_id; /* Peer BGP version check. */ if (version != BGP_VERSION_4) { u_int8_t maxver = BGP_VERSION_4; if (BGP_DEBUG (normal, NORMAL)) zlog_debug ("%s bad protocol version, remote requested %d, local request %d", peer->host, version, BGP_VERSION_4); bgp_notify_send_with_data (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_UNSUP_VERSION, &maxver, 1); return -1; } /* Check neighbor as number. */ if (remote_as != peer->as) { if (BGP_DEBUG (normal, NORMAL)) zlog_debug ("%s bad OPEN, remote AS is %u, expected %u", peer->host, remote_as, peer->as); bgp_notify_send_with_data (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_BAD_PEER_AS, notify_data_remote_as, 2); return -1; } /* From the rfc: Upon receipt of an OPEN message, a BGP speaker MUST calculate the value of the Hold Timer by using the smaller of its configured Hold Time and the Hold Time received in the OPEN message. The Hold Time MUST be either zero or at least three seconds. An implementation may reject connections on the basis of the Hold Time. */ if (holdtime < 3 && holdtime != 0) { bgp_notify_send (peer, BGP_NOTIFY_OPEN_ERR, BGP_NOTIFY_OPEN_UNACEP_HOLDTIME); return -1; } /* From the rfc: A reasonable maximum time between KEEPALIVE messages would be one third of the Hold Time interval. KEEPALIVE messages MUST NOT be sent more frequently than one per second. An implementation MAY adjust the rate at which it sends KEEPALIVE messages as a function of the Hold Time interval. */ if (CHECK_FLAG (peer->config, PEER_CONFIG_TIMER)) send_holdtime = peer->holdtime; else send_holdtime = peer->bgp->default_holdtime; if (holdtime < send_holdtime) peer->v_holdtime = holdtime; else peer->v_holdtime = send_holdtime; peer->v_keepalive = peer->v_holdtime / 3; /* Open option part parse. */ if (optlen != 0) { ret = bgp_open_option_parse (peer, optlen, &capability); if (ret < 0) return ret; } else { if (BGP_DEBUG (normal, NORMAL)) zlog_debug ("%s rcvd OPEN w/ OPTION parameter len: 0", peer->host); } /* Override capability. */ if (! capability || CHECK_FLAG (peer->flags, PEER_FLAG_OVERRIDE_CAPABILITY)) { peer->afc_nego[AFI_IP][SAFI_UNICAST] = peer->afc[AFI_IP][SAFI_UNICAST]; peer->afc_nego[AFI_IP][SAFI_MULTICAST] = peer->afc[AFI_IP][SAFI_MULTICAST]; peer->afc_nego[AFI_IP6][SAFI_UNICAST] = peer->afc[AFI_IP6][SAFI_UNICAST]; peer->afc_nego[AFI_IP6][SAFI_MULTICAST] = peer->afc[AFI_IP6][SAFI_MULTICAST]; } /* Get sockname. */ bgp_getsockname (peer); BGP_EVENT_ADD (peer, Receive_OPEN_message); peer->packet_size = 0; if (peer->ibuf) stream_reset (peer->ibuf); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'bgpd: Open option parse errors don't NOTIFY, resulting in abort & DoS * bgp_packet.c: (bgp_open_receive) Errors from bgp_open_option_parse are detected, and the code will stop processing the OPEN and return. However it does so without calling bgp_notify_send to send a NOTIFY - which means the peer FSM doesn't get stopped, and bgp_read will be called again later. Because it returns, it doesn't go through the code near the end of the function that removes the current message from the peer input streaam. Thus the next call to bgp_read will try to parse a half-parsed stream as if it were a new BGP message, leading to an assert later in the code when it tries to read stuff that isn't there. Add the required call to bgp_notify_send before returning. * bgp_open.c: (bgp_capability_as4) Be a bit stricter, check the length field corresponds to the only value it can be, which is the amount we're going to read off the stream. And make sure the capability flag gets set, so callers can know this capability was read, regardless. (peek_for_as4_capability) Let bgp_capability_as4 do the length check.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: _gnutls_proc_x509_server_certificate (gnutls_session_t session, uint8_t * data, size_t data_size) { int size, len, ret; uint8_t *p = data; cert_auth_info_t info; gnutls_certificate_credentials_t cred; ssize_t dsize = data_size; int i; gnutls_pcert_st *peer_certificate_list; size_t peer_certificate_list_size = 0, j, x; gnutls_datum_t tmp; cred = (gnutls_certificate_credentials_t) _gnutls_get_cred (session->key, GNUTLS_CRD_CERTIFICATE, NULL); if (cred == NULL) { gnutls_assert (); return GNUTLS_E_INSUFFICIENT_CREDENTIALS; } if ((ret = _gnutls_auth_info_set (session, GNUTLS_CRD_CERTIFICATE, sizeof (cert_auth_info_st), 1)) < 0) { gnutls_assert (); return ret; } info = _gnutls_get_auth_info (session); if (data == NULL || data_size == 0) { gnutls_assert (); /* no certificate was sent */ return GNUTLS_E_NO_CERTIFICATE_FOUND; } DECR_LEN (dsize, 3); size = _gnutls_read_uint24 (p); p += 3; /* some implementations send 0B 00 00 06 00 00 03 00 00 00 * instead of just 0B 00 00 03 00 00 00 as an empty certificate message. */ if (size == 0 || size == 3) { gnutls_assert (); /* no certificate was sent */ return GNUTLS_E_NO_CERTIFICATE_FOUND; } i = dsize; while (i > 0) { DECR_LEN (dsize, 3); len = _gnutls_read_uint24 (p); p += 3; DECR_LEN (dsize, len); peer_certificate_list_size++; p += len; i -= len + 3; } if (peer_certificate_list_size == 0) { gnutls_assert (); return GNUTLS_E_NO_CERTIFICATE_FOUND; } /* Ok we now allocate the memory to hold the * certificate list */ peer_certificate_list = gnutls_calloc (1, sizeof (gnutls_pcert_st) * (peer_certificate_list_size)); if (peer_certificate_list == NULL) { gnutls_assert (); return GNUTLS_E_MEMORY_ERROR; } p = data + 3; /* Now we start parsing the list (again). * We don't use DECR_LEN since the list has * been parsed before. */ for (j = 0; j < peer_certificate_list_size; j++) { len = _gnutls_read_uint24 (p); p += 3; tmp.size = len; tmp.data = p; ret = gnutls_pcert_import_x509_raw (&peer_certificate_list [j], &tmp, GNUTLS_X509_FMT_DER, 0); if (ret < 0) { gnutls_assert (); goto cleanup; } p += len; } if ((ret = _gnutls_copy_certificate_auth_info (info, peer_certificate_list, peer_certificate_list_size, 0, NULL)) < 0) { gnutls_assert (); goto cleanup; } if ((ret = _gnutls_check_key_usage (&peer_certificate_list[0], gnutls_kx_get (session))) < 0) { gnutls_assert (); goto cleanup; } ret = 0; cleanup: CLEAR_CERTS; gnutls_free (peer_certificate_list); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'Deinitialize the correct number of certificates. Reported by Remi Gacogne.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int gnutls_pcert_import_x509 (gnutls_pcert_st* pcert, gnutls_x509_crt_t crt, unsigned int flags) { int ret; size_t sz; memset(pcert, 0, sizeof(*pcert)); pcert->type = GNUTLS_CRT_X509; pcert->cert.data = NULL; sz = 0; ret = gnutls_x509_crt_export(crt, GNUTLS_X509_FMT_DER, NULL, &sz); if (ret < 0 && ret != GNUTLS_E_SHORT_MEMORY_BUFFER) { ret = gnutls_assert_val(ret); goto cleanup; } pcert->cert.data = gnutls_malloc(sz); if (pcert->cert.data == NULL) { ret = gnutls_assert_val(GNUTLS_E_MEMORY_ERROR); goto cleanup; } ret = gnutls_x509_crt_export(crt, GNUTLS_X509_FMT_DER, pcert->cert.data, &sz); if (ret < 0) { ret = gnutls_assert_val(ret); goto cleanup; } pcert->cert.size = sz; ret = gnutls_pubkey_init(&pcert->pubkey); if (ret < 0) { ret = gnutls_assert_val(ret); goto cleanup; } ret = gnutls_pubkey_import_x509(pcert->pubkey, crt, 0); if (ret < 0) { gnutls_pubkey_deinit(pcert->pubkey); ret = gnutls_assert_val(ret); goto cleanup; } return 0; cleanup: gnutls_free(pcert->cert.data); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'Deinitialize the correct number of certificates. Reported by Remi Gacogne.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: _gdata_service_build_session (void) { SoupSession *session = soup_session_sync_new (); #ifdef HAVE_GNOME soup_session_add_feature_by_type (session, SOUP_TYPE_GNOME_FEATURES_2_26); #endif /* HAVE_GNOME */ /* Log all libsoup traffic if debugging's turned on */ if (_gdata_service_get_log_level () > GDATA_LOG_MESSAGES) { SoupLoggerLogLevel level; SoupLogger *logger; switch (_gdata_service_get_log_level ()) { case GDATA_LOG_FULL: level = SOUP_LOGGER_LOG_BODY; break; case GDATA_LOG_HEADERS: level = SOUP_LOGGER_LOG_HEADERS; break; case GDATA_LOG_MESSAGES: case GDATA_LOG_NONE: default: g_assert_not_reached (); } logger = soup_logger_new (level, -1); soup_logger_set_printer (logger, (SoupLoggerPrinter) soup_log_printer, NULL, NULL); soup_session_add_feature (session, SOUP_SESSION_FEATURE (logger)); g_object_unref (logger); } return session; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'core: Validate SSL certificates for all connections This prevents MitM attacks which use spoofed SSL certificates. Closes: https://bugzilla.gnome.org/show_bug.cgi?id=671535'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: GnashImage::GnashImage(iterator data, size_t width, size_t height, ImageType type, ImageLocation location) : _type(type), _location(location), _width(width), _height(height), _data(data) { } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix crash in GnashImage.cpp'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { pmd_t *pmd; unsigned long next; int err = 0; pmd = pmd_offset(pud, addr); do { again: next = pmd_addr_end(addr, end); if (pmd_none(*pmd)) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) break; continue; } /* * This implies that each ->pmd_entry() handler * needs to know about pmd_trans_huge() pmds */ if (walk->pmd_entry) err = walk->pmd_entry(pmd, addr, next, walk); if (err) break; /* * Check this here so we only break down trans_huge * pages when we _need_ to */ if (!walk->pte_entry) continue; split_huge_page_pmd(walk->mm, pmd); if (pmd_none_or_clear_bad(pmd)) goto again; err = walk_pte_range(pmd, addr, next, walk); if (err) break; } while (pmd++, addr = next, addr != end); return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void mark_screen_rdonly(struct mm_struct *mm) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; int i; pgd = pgd_offset(mm, 0xA0000); if (pgd_none_or_clear_bad(pgd)) goto out; pud = pud_offset(pgd, 0xA0000); if (pud_none_or_clear_bad(pud)) goto out; pmd = pmd_offset(pud, 0xA0000); split_huge_page_pmd(mm, pmd); if (pmd_none_or_clear_bad(pmd)) goto out; pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); for (i = 0; i < 32; i++) { if (pte_present(*pte)) set_pte(pte, pte_wrprotect(*pte)); pte++; } pte_unmap_unlock(pte, ptl); out: flush_tlb(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) { unsigned long next; pmd_t *pmd; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) { if (mincore_huge_pmd(vma, pmd, addr, next, vec)) { vec += (next - addr) >> PAGE_SHIFT; continue; } /* fall through */ } if (pmd_none_or_clear_bad(pmd)) mincore_unmapped_range(vma, addr, next, vec); else mincore_pte_range(vma, pmd, addr, next, vec); vec += (next - addr) >> PAGE_SHIFT; } while (pmd++, addr = next, addr != end); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; split_huge_page_pmd(walk->mm, pmd); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; if (!pte_present(ptent)) continue; page = vm_normal_page(vma, addr, ptent); if (!page) continue; if (PageReserved(page)) continue; /* Clear accessed and referenced bits. */ ptep_test_and_clear_young(vma, addr, pte); ClearPageReferenced(page); } pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; pte_t *pte; spinlock_t *ptl; split_huge_page_pmd(walk->mm, pmd); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) if (is_target_pte_for_mc(vma, addr, *pte, NULL)) mc.precharge++; /* increment precharge temporarily */ pte_unmap_unlock(pte - 1, ptl); cond_resched(); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct numa_maps *md; spinlock_t *ptl; pte_t *orig_pte; pte_t *pte; md = walk->private; spin_lock(&walk->mm->page_table_lock); if (pmd_trans_huge(*pmd)) { if (pmd_trans_splitting(*pmd)) { spin_unlock(&walk->mm->page_table_lock); wait_split_huge_page(md->vma->anon_vma, pmd); } else { pte_t huge_pte = *(pte_t *)pmd; struct page *page; page = can_gather_numa_stats(huge_pte, md->vma, addr); if (page) gather_stats(page, md, pte_dirty(huge_pte), HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(&walk->mm->page_table_lock); return 0; } } else { spin_unlock(&walk->mm->page_table_lock); } orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { struct page *page = can_gather_numa_stats(*pte, md->vma, addr); if (!page) continue; gather_stats(page, md, pte_dirty(*pte), 1); } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'mm: thp: fix pmd_bad() triggering in code paths holding mmap_sem read mode In some cases it may happen that pmd_none_or_clear_bad() is called with the mmap_sem hold in read mode. In those cases the huge page faults can allocate hugepmds under pmd_none_or_clear_bad() and that can trigger a false positive from pmd_bad() that will not like to see a pmd materializing as trans huge. It's not khugepaged causing the problem, khugepaged holds the mmap_sem in write mode (and all those sites must hold the mmap_sem in read mode to prevent pagetables to go away from under them, during code review it seems vm86 mode on 32bit kernels requires that too unless it's restricted to 1 thread per process or UP builds). The race is only with the huge pagefaults that can convert a pmd_none() into a pmd_trans_huge(). Effectively all these pmd_none_or_clear_bad() sites running with mmap_sem in read mode are somewhat speculative with the page faults, and the result is always undefined when they run simultaneously. This is probably why it wasn't common to run into this. For example if the madvise(MADV_DONTNEED) runs zap_page_range() shortly before the page fault, the hugepage will not be zapped, if the page fault runs first it will be zapped. Altering pmd_bad() not to error out if it finds hugepmds won't be enough to fix this, because zap_pmd_range would then proceed to call zap_pte_range (which would be incorrect if the pmd become a pmd_trans_huge()). The simplest way to fix this is to read the pmd in the local stack (regardless of what we read, no need of actual CPU barriers, only compiler barrier needed), and be sure it is not changing under the code that computes its value. Even if the real pmd is changing under the value we hold on the stack, we don't care. If we actually end up in zap_pte_range it means the pmd was not none already and it was not huge, and it can't become huge from under us (khugepaged locking explained above). All we need is to enforce that there is no way anymore that in a code path like below, pmd_trans_huge can be false, but pmd_none_or_clear_bad can run into a hugepmd. The overhead of a barrier() is just a compiler tweak and should not be measurable (I only added it for THP builds). I don't exclude different compiler versions may have prevented the race too by caching the value of *pmd on the stack (that hasn't been verified, but it wouldn't be impossible considering pmd_none_or_clear_bad, pmd_bad, pmd_trans_huge, pmd_none are all inlines and there's no external function called in between pmd_trans_huge and pmd_none_or_clear_bad). if (pmd_trans_huge(*pmd)) { if (next-addr != HPAGE_PMD_SIZE) { VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); split_huge_page_pmd(vma->vm_mm, pmd); } else if (zap_huge_pmd(tlb, vma, pmd, addr)) continue; /* fall through */ } if (pmd_none_or_clear_bad(pmd)) Because this race condition could be exercised without special privileges this was reported in CVE-2012-1179. The race was identified and fully explained by Ulrich who debugged it. I'm quoting his accurate explanation below, for reference. ====== start quote ======= mapcount 0 page_mapcount 1 kernel BUG at mm/huge_memory.c:1384! At some point prior to the panic, a "bad pmd ..." message similar to the following is logged on the console: mm/memory.c:145: bad pmd ffff8800376e1f98(80000000314000e7). The "bad pmd ..." message is logged by pmd_clear_bad() before it clears the page's PMD table entry. 143 void pmd_clear_bad(pmd_t *pmd) 144 { -> 145 pmd_ERROR(*pmd); 146 pmd_clear(pmd); 147 } After the PMD table entry has been cleared, there is an inconsistency between the actual number of PMD table entries that are mapping the page and the page's map count (_mapcount field in struct page). When the page is subsequently reclaimed, __split_huge_page() detects this inconsistency. 1381 if (mapcount != page_mapcount(page)) 1382 printk(KERN_ERR "mapcount %d page_mapcount %d\n", 1383 mapcount, page_mapcount(page)); -> 1384 BUG_ON(mapcount != page_mapcount(page)); The root cause of the problem is a race of two threads in a multithreaded process. Thread B incurs a page fault on a virtual address that has never been accessed (PMD entry is zero) while Thread A is executing an madvise() system call on a virtual address within the same 2 MB (huge page) range. virtual address space .---------------------. | | | | .-|---------------------| | | | | | |<-- B(fault) | | | 2 MB | |/////////////////////|-. huge < |/////////////////////| > A(range) page | |/////////////////////|-' | | | | | | '-|---------------------| | | | | '---------------------' - Thread A is executing an madvise(..., MADV_DONTNEED) system call on the virtual address range "A(range)" shown in the picture. sys_madvise // Acquire the semaphore in shared mode. down_read(&current->mm->mmap_sem) ... madvise_vma switch (behavior) case MADV_DONTNEED: madvise_dontneed zap_page_range unmap_vmas unmap_page_range zap_pud_range zap_pmd_range // // Assume that this huge page has never been accessed. // I.e. content of the PMD entry is zero (not mapped). // if (pmd_trans_huge(*pmd)) { // We don't get here due to the above assumption. } // // Assume that Thread B incurred a page fault and .---------> // sneaks in here as shown below. | // | if (pmd_none_or_clear_bad(pmd)) | { | if (unlikely(pmd_bad(*pmd))) | pmd_clear_bad | { | pmd_ERROR | // Log "bad pmd ..." message here. | pmd_clear | // Clear the page's PMD entry. | // Thread B incremented the map count | // in page_add_new_anon_rmap(), but | // now the page is no longer mapped | // by a PMD entry (-> inconsistency). | } | } | v - Thread B is handling a page fault on virtual address "B(fault)" shown in the picture. ... do_page_fault __do_page_fault // Acquire the semaphore in shared mode. down_read_trylock(&mm->mmap_sem) ... handle_mm_fault if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) // We get here due to the above assumption (PMD entry is zero). do_huge_pmd_anonymous_page alloc_hugepage_vma // Allocate a new transparent huge page here. ... __do_huge_pmd_anonymous_page ... spin_lock(&mm->page_table_lock) ... page_add_new_anon_rmap // Here we increment the page's map count (starts at -1). atomic_set(&page->_mapcount, 0) set_pmd_at // Here we set the page's PMD entry which will be cleared // when Thread A calls pmd_clear_bad(). ... spin_unlock(&mm->page_table_lock) The mmap_sem does not prevent the race because both threads are acquiring it in shared mode (down_read). Thread B holds the page_table_lock while the page's map count and PMD table entry are updated. However, Thread A does not synchronize on that lock. ====== end quote ======= [akpm@linux-foundation.org: checkpatch fixes] Reported-by: Ulrich Obergfell <uobergfe@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Jones <davej@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: <stable@vger.kernel.org> [2.6.38+] Cc: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len, const cdf_header_t *h, cdf_secid_t id) { assert((size_t)CDF_SEC_SIZE(h) == len); return cdf_read(info, (off_t)CDF_SEC_POS(h, id), ((char *)buf) + offs, len); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'add more check found by cert's fuzzer.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size) { size_t i, j, s = size / sizeof(cdf_secid_t); for (i = 0; i < sat->sat_len; i++) { (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6d: ", prefix, i, i * s); for (j = 0; j < s; j++) { (void)fprintf(stderr, "%5d, ", CDF_TOLE4(sat->sat_tab[s * i + j])); if ((j + 1) % 10 == 0) (void)fprintf(stderr, "\n%.6d: ", i * s + j + 1); } (void)fprintf(stderr, "\n"); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': '- add float and double types - fix debug printf formats - fix short stream sizes - don't fail if we don't know about a type'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h, cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count) { size_t i, maxcount; const cdf_summary_info_header_t *si = CAST(const cdf_summary_info_header_t *, sst->sst_tab); const cdf_section_declaration_t *sd = CAST(const cdf_section_declaration_t *, (const void *) ((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET)); if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 || cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1) return -1; ssi->si_byte_order = CDF_TOLE2(si->si_byte_order); ssi->si_os_version = CDF_TOLE2(si->si_os_version); ssi->si_os = CDF_TOLE2(si->si_os); ssi->si_class = si->si_class; cdf_swap_class(&ssi->si_class); ssi->si_count = CDF_TOLE2(si->si_count); *count = 0; maxcount = 0; *info = NULL; for (i = 0; i < CDF_TOLE4(si->si_count); i++) { if (i >= CDF_LOOP_LIMIT) { DPRINTF(("Unpack summary info loop limit")); errno = EFTYPE; return -1; } if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info, count, &maxcount) == -1) return -1; } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': '- add float and double types - fix debug printf formats - fix short stream sizes - don't fail if we don't know about a type'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_dump_property_info(const cdf_property_info_t *info, size_t count) { cdf_timestamp_t tp; struct timespec ts; char buf[64]; size_t i, j; for (i = 0; i < count; i++) { cdf_print_property_name(buf, sizeof(buf), info[i].pi_id); (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf); switch (info[i].pi_type) { case CDF_NULL: break; case CDF_SIGNED16: (void)fprintf(stderr, "signed 16 [%hd]\n", info[i].pi_s16); break; case CDF_SIGNED32: (void)fprintf(stderr, "signed 32 [%d]\n", info[i].pi_s32); break; case CDF_UNSIGNED32: (void)fprintf(stderr, "unsigned 32 [%u]\n", info[i].pi_u32); break; case CDF_LENGTH32_STRING: (void)fprintf(stderr, "string %u [%.*s]\n", info[i].pi_str.s_len, info[i].pi_str.s_len, info[i].pi_str.s_buf); break; case CDF_LENGTH32_WSTRING: (void)fprintf(stderr, "string %u [", info[i].pi_str.s_len); for (j = 0; j < info[i].pi_str.s_len - 1; j++) (void)fputc(info[i].pi_str.s_buf[j << 1], stderr); (void)fprintf(stderr, "]\n"); break; case CDF_FILETIME: tp = info[i].pi_tp; if (tp < 1000000000000000LL) { cdf_print_elapsed_time(buf, sizeof(buf), tp); (void)fprintf(stderr, "timestamp %s\n", buf); } else { cdf_timestamp_to_timespec(&ts, tp); (void)fprintf(stderr, "timestamp %s", cdf_ctime(&ts.tv_sec)); } break; case CDF_CLIPBOARD: (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32); break; default: DPRINTF(("Don't know how to deal with %x\n", info[i].pi_type)); break; } } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': '- add float and double types - fix debug printf formats - fix short stream sizes - don't fail if we don't know about a type'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn) { size_t ss = CDF_SEC_SIZE(h), i, j; ssize_t nr; scn->sst_len = cdf_count_chain(sat, sid, ss); scn->sst_dirlen = len; if (scn->sst_len == (size_t)-1) return -1; scn->sst_tab = calloc(scn->sst_len, ss); if (scn->sst_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read long sector chain loop limit")); errno = EFTYPE; goto out; } if (i >= scn->sst_len) { DPRINTF(("Out of bounds reading long sector chain " "%u > %u\n", i, scn->sst_len)); errno = EFTYPE; goto out; } if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h, sid)) != (ssize_t)ss) { if (i == scn->sst_len - 1 && nr > 0) { /* Last sector might be truncated */ return 0; } DPRINTF(("Reading long sector chain %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(scn->sst_tab); return -1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': '- add float and double types - fix debug printf formats - fix short stream sizes - don't fail if we don't know about a type'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h, const cdf_sat_t *sat, cdf_sat_t *ssat) { size_t i, j; size_t ss = CDF_SEC_SIZE(h); cdf_secid_t sid = h->h_secid_first_sector_in_short_sat; ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h)); if (ssat->sat_len == (size_t)-1) return -1; ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss)); if (ssat->sat_tab == NULL) return -1; for (j = i = 0; sid >= 0; i++, j++) { if (j >= CDF_LOOP_LIMIT) { DPRINTF(("Read short sat sector loop limit")); errno = EFTYPE; goto out; } if (i >= ssat->sat_len) { DPRINTF(("Out of bounds reading short sector chain " "%u > %u\n", i, ssat->sat_len)); errno = EFTYPE; goto out; } if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) != (ssize_t)ss) { DPRINTF(("Reading short sat sector %d", sid)); goto out; } sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]); } return 0; out: free(ssat->sat_tab); return -1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': '- add float and double types - fix debug printf formats - fix short stream sizes - don't fail if we don't know about a type'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%d) id=%x type=%x offs=%x,%d\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %d\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %d, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); l = 4 + (uint32_t)CDF_ROUND(l, sizeof(l)); o += l >> 2; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); goto out; } } return 0; out: free(*info); return -1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': '- add float and double types - fix debug printf formats - fix short stream sizes - don't fail if we don't know about a type'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h, uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount) { const cdf_section_header_t *shp; cdf_section_header_t sh; const uint8_t *p, *q, *e; int16_t s16; int32_t s32; uint32_t u32; int64_t s64; uint64_t u64; cdf_timestamp_t tp; size_t i, o, o4, nelements, j; cdf_property_info_t *inp; if (offs > UINT32_MAX / 4) { errno = EFTYPE; goto out; } shp = CAST(const cdf_section_header_t *, (const void *) ((const char *)sst->sst_tab + offs)); if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1) goto out; sh.sh_len = CDF_TOLE4(shp->sh_len); #define CDF_SHLEN_LIMIT (UINT32_MAX / 8) if (sh.sh_len > CDF_SHLEN_LIMIT) { errno = EFTYPE; goto out; } sh.sh_properties = CDF_TOLE4(shp->sh_properties); #define CDF_PROP_LIMIT (UINT32_MAX / (4 * sizeof(*inp))) if (sh.sh_properties > CDF_PROP_LIMIT) goto out; DPRINTF(("section len: %u properties %u\n", sh.sh_len, sh.sh_properties)); if (*maxcount) { if (*maxcount > CDF_PROP_LIMIT) goto out; *maxcount += sh.sh_properties; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); } else { *maxcount = sh.sh_properties; inp = CAST(cdf_property_info_t *, malloc(*maxcount * sizeof(*inp))); } if (inp == NULL) goto out; *info = inp; inp += *count; *count += sh.sh_properties; p = CAST(const uint8_t *, (const void *) ((const char *)(const void *)sst->sst_tab + offs + sizeof(sh))); e = CAST(const uint8_t *, (const void *) (((const char *)(const void *)shp) + sh.sh_len)); if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1) goto out; for (i = 0; i < sh.sh_properties; i++) { q = (const uint8_t *)(const void *) ((const char *)(const void *)p + CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t); if (q > e) { DPRINTF(("Ran of the end %p > %p\n", q, e)); goto out; } inp[i].pi_id = CDF_GETUINT32(p, i << 1); inp[i].pi_type = CDF_GETUINT32(q, 0); DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i, inp[i].pi_id, inp[i].pi_type, q - p, CDF_GETUINT32(p, (i << 1) + 1))); if (inp[i].pi_type & CDF_VECTOR) { nelements = CDF_GETUINT32(q, 1); o = 2; } else { nelements = 1; o = 1; } o4 = o * sizeof(uint32_t); if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED)) goto unknown; switch (inp[i].pi_type & CDF_TYPEMASK) { case CDF_NULL: case CDF_EMPTY: break; case CDF_SIGNED16: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s16, &q[o4], sizeof(s16)); inp[i].pi_s16 = CDF_TOLE2(s16); break; case CDF_SIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s32, &q[o4], sizeof(s32)); inp[i].pi_s32 = CDF_TOLE4((uint32_t)s32); break; case CDF_BOOL: case CDF_UNSIGNED32: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); inp[i].pi_u32 = CDF_TOLE4(u32); break; case CDF_SIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&s64, &q[o4], sizeof(s64)); inp[i].pi_s64 = CDF_TOLE8((uint64_t)s64); break; case CDF_UNSIGNED64: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); inp[i].pi_u64 = CDF_TOLE8((uint64_t)u64); break; case CDF_FLOAT: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u32, &q[o4], sizeof(u32)); u32 = CDF_TOLE4(u32); memcpy(&inp[i].pi_f, &u32, sizeof(inp[i].pi_f)); break; case CDF_DOUBLE: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&u64, &q[o4], sizeof(u64)); u64 = CDF_TOLE8((uint64_t)u64); memcpy(&inp[i].pi_d, &u64, sizeof(inp[i].pi_d)); break; case CDF_LENGTH32_STRING: case CDF_LENGTH32_WSTRING: if (nelements > 1) { size_t nelem = inp - *info; if (*maxcount > CDF_PROP_LIMIT || nelements > CDF_PROP_LIMIT) goto out; *maxcount += nelements; inp = CAST(cdf_property_info_t *, realloc(*info, *maxcount * sizeof(*inp))); if (inp == NULL) goto out; *info = inp; inp = *info + nelem; } DPRINTF(("nelements = %" SIZE_T_FORMAT "u\n", nelements)); for (j = 0; j < nelements; j++, i++) { uint32_t l = CDF_GETUINT32(q, o); inp[i].pi_str.s_len = l; inp[i].pi_str.s_buf = (const char *) (const void *)(&q[o4 + sizeof(l)]); DPRINTF(("l = %d, r = %" SIZE_T_FORMAT "u, s = %s\n", l, CDF_ROUND(l, sizeof(l)), inp[i].pi_str.s_buf)); if (l & 1) l++; o += l >> 1; if (q + o >= e) goto out; o4 = o * sizeof(uint32_t); } i--; break; case CDF_FILETIME: if (inp[i].pi_type & CDF_VECTOR) goto unknown; (void)memcpy(&tp, &q[o4], sizeof(tp)); inp[i].pi_tp = CDF_TOLE8((uint64_t)tp); break; case CDF_CLIPBOARD: if (inp[i].pi_type & CDF_VECTOR) goto unknown; break; default: unknown: DPRINTF(("Don't know how to deal with %x\n", inp[i].pi_type)); break; } } return 0; out: free(*info); return -1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'Fix bounds checks again.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: _gnutls_ciphertext2compressed (gnutls_session_t session, opaque * compress_data, int compress_size, gnutls_datum_t ciphertext, uint8_t type, record_parameters_st * params) { uint8_t MAC[MAX_HASH_SIZE]; uint16_t c_length; uint8_t pad; int length; uint16_t blocksize; int ret, i, pad_failed = 0; opaque preamble[PREAMBLE_SIZE]; int preamble_size; int ver = gnutls_protocol_get_version (session); int hash_size = _gnutls_hash_get_algo_len (params->mac_algorithm); blocksize = gnutls_cipher_get_block_size (params->cipher_algorithm); /* actual decryption (inplace) */ switch (_gnutls_cipher_is_block (params->cipher_algorithm)) { case CIPHER_STREAM: if ((ret = _gnutls_cipher_decrypt (&params->read.cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert (); return ret; } length = ciphertext.size - hash_size; break; case CIPHER_BLOCK: if ((ciphertext.size < blocksize) || (ciphertext.size % blocksize != 0)) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } if ((ret = _gnutls_cipher_decrypt (&params->read.cipher_state, ciphertext.data, ciphertext.size)) < 0) { gnutls_assert (); return ret; } /* ignore the IV in TLS 1.1. */ if (_gnutls_version_has_explicit_iv (session->security_parameters.version)) { ciphertext.size -= blocksize; ciphertext.data += blocksize; if (ciphertext.size == 0) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } } pad = ciphertext.data[ciphertext.size - 1] + 1; /* pad */ if ((int) pad > (int) ciphertext.size - hash_size) { gnutls_assert (); _gnutls_record_log ("REC[%p]: Short record length %d > %d - %d (under attack?)\n", session, pad, ciphertext.size, hash_size); /* We do not fail here. We check below for the * the pad_failed. If zero means success. */ pad_failed = GNUTLS_E_DECRYPTION_FAILED; } length = ciphertext.size - hash_size - pad; /* Check the pading bytes (TLS 1.x) */ if (_gnutls_version_has_variable_padding (ver) && pad_failed == 0) for (i = 2; i < pad; i++) { if (ciphertext.data[ciphertext.size - i] != ciphertext.data[ciphertext.size - 1]) pad_failed = GNUTLS_E_DECRYPTION_FAILED; } break; default: gnutls_assert (); return GNUTLS_E_INTERNAL_ERROR; } if (length < 0) length = 0; c_length = _gnutls_conv_uint16 ((uint16_t) length); /* Pass the type, version, length and compressed through * MAC. */ if (params->mac_algorithm != GNUTLS_MAC_NULL) { digest_hd_st td; ret = mac_init (&td, params->mac_algorithm, params->read.mac_secret.data, params->read.mac_secret.size, ver); if (ret < 0) { gnutls_assert (); return GNUTLS_E_INTERNAL_ERROR; } preamble_size = make_preamble (UINT64DATA (params->read.sequence_number), type, c_length, ver, preamble); mac_hash (&td, preamble, preamble_size, ver); if (length > 0) mac_hash (&td, ciphertext.data, length, ver); mac_deinit (&td, MAC, ver); } /* This one was introduced to avoid a timing attack against the TLS * 1.0 protocol. */ if (pad_failed != 0) { gnutls_assert (); return pad_failed; } /* HMAC was not the same. */ if (memcmp (MAC, &ciphertext.data[length], hash_size) != 0) { gnutls_assert (); return GNUTLS_E_DECRYPTION_FAILED; } /* copy the decrypted stuff to compress_data. */ if (compress_size < length) { gnutls_assert (); return GNUTLS_E_DECOMPRESSION_FAILED; } memcpy (compress_data, ciphertext.data, length); return length; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-310'], 'message': 'better check decrypted data.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: compressed_to_ciphertext (gnutls_session_t session, uint8_t * cipher_data, int cipher_size, gnutls_datum_t *compressed, content_type_t type, record_parameters_st * params) { uint8_t * tag_ptr = NULL; uint8_t pad; int length, length_to_encrypt, ret; uint8_t preamble[MAX_PREAMBLE_SIZE]; int preamble_size; int tag_size = _gnutls_auth_cipher_tag_len (&params->write.cipher_state); int blocksize = gnutls_cipher_get_block_size (params->cipher_algorithm); unsigned block_algo = _gnutls_cipher_is_block (params->cipher_algorithm); uint8_t *data_ptr; int ver = gnutls_protocol_get_version (session); int explicit_iv = _gnutls_version_has_explicit_iv (session->security_parameters.version); int auth_cipher = _gnutls_auth_cipher_is_aead(&params->write.cipher_state); int random_pad; /* We don't use long padding if requested or if we are in DTLS. */ if (session->internals.priorities.no_padding == 0 && (!IS_DTLS(session))) random_pad = 1; else random_pad = 0; _gnutls_hard_log("ENC[%p]: cipher: %s, MAC: %s, Epoch: %u\n", session, gnutls_cipher_get_name(params->cipher_algorithm), gnutls_mac_get_name(params->mac_algorithm), (unsigned int)params->epoch); preamble_size = make_preamble (UINT64DATA (params->write.sequence_number), type, compressed->size, ver, preamble); /* Calculate the encrypted length (padding etc.) */ length_to_encrypt = length = calc_enc_length (session, compressed->size, tag_size, &pad, random_pad, block_algo, auth_cipher, blocksize); if (length < 0) { return gnutls_assert_val(length); } /* copy the encrypted data to cipher_data. */ if (cipher_size < length) { return gnutls_assert_val(GNUTLS_E_MEMORY_ERROR); } data_ptr = cipher_data; if (explicit_iv) { if (block_algo == CIPHER_BLOCK) { /* copy the random IV. */ ret = _gnutls_rnd (GNUTLS_RND_NONCE, data_ptr, blocksize); if (ret < 0) return gnutls_assert_val(ret); _gnutls_auth_cipher_setiv(&params->write.cipher_state, data_ptr, blocksize); data_ptr += blocksize; cipher_data += blocksize; length_to_encrypt -= blocksize; } else if (auth_cipher) { uint8_t nonce[blocksize]; /* Values in AEAD are pretty fixed in TLS 1.2 for 128-bit block */ if (params->write.IV.data == NULL || params->write.IV.size != AEAD_IMPLICIT_DATA_SIZE) return gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR); /* Instead of generating a new nonce on every packet, we use the * write.sequence_number (It is a MAY on RFC 5288). */ memcpy(nonce, params->write.IV.data, params->write.IV.size); memcpy(&nonce[AEAD_IMPLICIT_DATA_SIZE], UINT64DATA(params->write.sequence_number), 8); _gnutls_auth_cipher_setiv(&params->write.cipher_state, nonce, AEAD_IMPLICIT_DATA_SIZE+AEAD_EXPLICIT_DATA_SIZE); /* copy the explicit part */ memcpy(data_ptr, &nonce[AEAD_IMPLICIT_DATA_SIZE], AEAD_EXPLICIT_DATA_SIZE); data_ptr += AEAD_EXPLICIT_DATA_SIZE; cipher_data += AEAD_EXPLICIT_DATA_SIZE; /* In AEAD ciphers we don't encrypt the tag */ length_to_encrypt -= AEAD_EXPLICIT_DATA_SIZE + tag_size; } } else { /* AEAD ciphers have an explicit IV. Shouldn't be used otherwise. */ if (auth_cipher) return gnutls_assert_val(GNUTLS_E_INTERNAL_ERROR); } memcpy (data_ptr, compressed->data, compressed->size); data_ptr += compressed->size; if (tag_size > 0) { tag_ptr = data_ptr; data_ptr += tag_size; } if (block_algo == CIPHER_BLOCK && pad > 0) { memset (data_ptr, pad - 1, pad); } /* add the authenticate data */ ret = _gnutls_auth_cipher_add_auth(&params->write.cipher_state, preamble, preamble_size); if (ret < 0) return gnutls_assert_val(ret); /* Actual encryption (inplace). */ ret = _gnutls_auth_cipher_encrypt2_tag (&params->write.cipher_state, cipher_data, length_to_encrypt, cipher_data, cipher_size, tag_ptr, tag_size, compressed->size); if (ret < 0) return gnutls_assert_val(ret); return length; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-310'], 'message': 'changes in packet parsing.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); if (r < 0) goto out; break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); if (r < 0) goto out; break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); if (r) goto out; break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_IRQ_LINE_STATUS: case KVM_IRQ_LINE: { struct kvm_irq_level irq_event; r = -EFAULT; if (copy_from_user(&irq_event, argp, sizeof irq_event)) goto out; r = -ENXIO; if (irqchip_in_kernel(kvm)) { __s32 status; status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event.irq, irq_event.level); if (ioctl == KVM_IRQ_LINE_STATUS) { r = -EFAULT; irq_event.status = status; if (copy_to_user(argp, &irq_event, sizeof irq_event)) goto out; } r = 0; } break; } case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); if (r) goto out; break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); if (r) goto out; r = 0; break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); if (r) goto out; r = 0; break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); if (r) goto out; r = 0; break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'KVM: Ensure all vcpus are consistent with in-kernel irqchip settings If some vcpus are created before KVM_CREATE_IRQCHIP, then irqchip_in_kernel() and vcpu->arch.apic will be inconsistent, leading to potential NULL pointer dereferences. Fix by: - ensuring that no vcpus are installed when KVM_CREATE_IRQCHIP is called - ensuring that a vcpu has an apic if it is installed after KVM_CREATE_IRQCHIP This is somewhat long winded because vcpu->arch.apic is created without kvm->lock held. Based on earlier patch by Michael Ellerman. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Avi Kivity <avi@redhat.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int __kvm_set_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, int user_alloc) { int r; gfn_t base_gfn; unsigned long npages; unsigned long i; struct kvm_memory_slot *memslot; struct kvm_memory_slot old, new; struct kvm_memslots *slots, *old_memslots; r = -EINVAL; /* General sanity checks */ if (mem->memory_size & (PAGE_SIZE - 1)) goto out; if (mem->guest_phys_addr & (PAGE_SIZE - 1)) goto out; /* We can read the guest memory with __xxx_user() later on. */ if (user_alloc && ((mem->userspace_addr & (PAGE_SIZE - 1)) || !access_ok(VERIFY_WRITE, (void __user *)(unsigned long)mem->userspace_addr, mem->memory_size))) goto out; if (mem->slot >= KVM_MEM_SLOTS_NUM) goto out; if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) goto out; memslot = id_to_memslot(kvm->memslots, mem->slot); base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; npages = mem->memory_size >> PAGE_SHIFT; r = -EINVAL; if (npages > KVM_MEM_MAX_NR_PAGES) goto out; if (!npages) mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; new = old = *memslot; new.id = mem->slot; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; /* Disallow changing a memory slot's size. */ r = -EINVAL; if (npages && old.npages && npages != old.npages) goto out_free; /* Check for overlaps */ r = -EEXIST; for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { struct kvm_memory_slot *s = &kvm->memslots->memslots[i]; if (s == memslot || !s->npages) continue; if (!((base_gfn + npages <= s->base_gfn) || (base_gfn >= s->base_gfn + s->npages))) goto out_free; } /* Free page dirty bitmap if unneeded */ if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) new.dirty_bitmap = NULL; r = -ENOMEM; /* Allocate if a slot is being created */ if (npages && !old.npages) { new.user_alloc = user_alloc; new.userspace_addr = mem->userspace_addr; #ifndef CONFIG_S390 new.rmap = vzalloc(npages * sizeof(*new.rmap)); if (!new.rmap) goto out_free; #endif /* not defined CONFIG_S390 */ if (kvm_arch_create_memslot(&new, npages)) goto out_free; } /* Allocate page dirty bitmap if needed */ if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { if (kvm_create_dirty_bitmap(&new) < 0) goto out_free; /* destroy any largepage mappings for dirty tracking */ } if (!npages) { struct kvm_memory_slot *slot; r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; slot = id_to_memslot(slots, mem->slot); slot->flags |= KVM_MEMSLOT_INVALID; update_memslots(slots, NULL); old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); /* From this point no new shadow pages pointing to a deleted * memslot will be created. * * validation of sp->gfn happens in: * - gfn_to_hva (kvm_read_guest, gfn_to_pfn) * - kvm_is_visible_gfn (mmu_check_roots) */ kvm_arch_flush_shadow(kvm); kfree(old_memslots); } r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc); if (r) goto out_free; /* map the pages in iommu page table */ if (npages) { r = kvm_iommu_map_pages(kvm, &new); if (r) goto out_free; } r = -ENOMEM; slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; /* actual memory is freed via old in kvm_free_physmem_slot below */ if (!npages) { new.rmap = NULL; new.dirty_bitmap = NULL; memset(&new.arch, 0, sizeof(new.arch)); } update_memslots(slots, &new); old_memslots = kvm->memslots; rcu_assign_pointer(kvm->memslots, slots); synchronize_srcu_expedited(&kvm->srcu); kvm_arch_commit_memory_region(kvm, mem, old, user_alloc); /* * If the new memory slot is created, we need to clear all * mmio sptes. */ if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) kvm_arch_flush_shadow(kvm); kvm_free_physmem_slot(&old, &new); kfree(old_memslots); return 0; out_free: kvm_free_physmem_slot(&new, &old); out: return r; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'KVM: unmap pages from the iommu when slots are removed We've been adding new mappings, but not destroying old mappings. This can lead to a page leak as pages are pinned using get_user_pages, but only unpinned with put_page if they still exist in the memslots list on vm shutdown. A memslot that is destroyed while an iommu domain is enabled for the guest will therefore result in an elevated page reference count that is never cleared. Additionally, without this fix, the iommu is only programmed with the first translation for a gpa. This can result in peer-to-peer errors if a mapping is destroyed and replaced by a new mapping at the same gpa as the iommu will still be pointing to the original, pinned memory address. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; kvm_iommu_unmap_memslots(kvm); iommu_domain_free(domain); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'KVM: lock slots_lock around device assignment As pointed out by Jason Baron, when assigning a device to a guest we first set the iommu domain pointer, which enables mapping and unmapping of memory slots to the iommu. This leaves a window where this path is enabled, but we haven't synchronized the iommu mappings to the existing memory slots. Thus a slot being removed at that point could send us down unexpected code paths removing non-existent pinnings and iommu mappings. Take the slots_lock around creating the iommu domain and initial mappings as well as around iommu teardown to avoid this race. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: _crypt_extended_r(const char *key, const char *setting, struct php_crypt_extended_data *data) { int i; uint32_t count, salt, l, r0, r1, keybuf[2]; u_char *p, *q; if (!data->initialized) des_init_local(data); /* * Copy the key, shifting each character up by one bit * and padding with zeros. */ q = (u_char *) keybuf; while (q - (u_char *) keybuf < sizeof(keybuf)) { if ((*q++ = *key << 1)) key++; } if (des_setkey((u_char *) keybuf, data)) return(NULL); if (*setting == _PASSWORD_EFMT1) { /* * "new"-style: * setting - underscore, 4 chars of count, 4 chars of salt * key - unlimited characters */ for (i = 1, count = 0; i < 5; i++) { int value = ascii_to_bin(setting[i]); if (ascii64[value] != setting[i]) return(NULL); count |= value << (i - 1) * 6; } if (!count) return(NULL); for (i = 5, salt = 0; i < 9; i++) { int value = ascii_to_bin(setting[i]); if (ascii64[value] != setting[i]) return(NULL); salt |= value << (i - 5) * 6; } while (*key) { /* * Encrypt the key with itself. */ if (des_cipher((u_char *) keybuf, (u_char *) keybuf, 0, 1, data)) return(NULL); /* * And XOR with the next 8 characters of the key. */ q = (u_char *) keybuf; while (q - (u_char *) keybuf < sizeof(keybuf) && *key) *q++ ^= *key++ << 1; if (des_setkey((u_char *) keybuf, data)) return(NULL); } memcpy(data->output, setting, 9); data->output[9] = '\0'; p = (u_char *) data->output + 9; } else { /* * "old"-style: * setting - 2 chars of salt * key - up to 8 characters */ count = 25; if (ascii_is_unsafe(setting[0]) || ascii_is_unsafe(setting[1])) return(NULL); salt = (ascii_to_bin(setting[1]) << 6) | ascii_to_bin(setting[0]); data->output[0] = setting[0]; data->output[1] = setting[1]; p = (u_char *) data->output + 2; } setup_salt(salt, data); /* * Do it. */ if (do_des(0, 0, &r0, &r1, count, data)) return(NULL); /* * Now encode the result... */ l = (r0 >> 8); *p++ = ascii64[(l >> 18) & 0x3f]; *p++ = ascii64[(l >> 12) & 0x3f]; *p++ = ascii64[(l >> 6) & 0x3f]; *p++ = ascii64[l & 0x3f]; l = (r0 << 16) | ((r1 >> 16) & 0xffff); *p++ = ascii64[(l >> 18) & 0x3f]; *p++ = ascii64[(l >> 12) & 0x3f]; *p++ = ascii64[(l >> 6) & 0x3f]; *p++ = ascii64[l & 0x3f]; l = r1 << 2; *p++ = ascii64[(l >> 12) & 0x3f]; *p++ = ascii64[(l >> 6) & 0x3f]; *p++ = ascii64[l & 0x3f]; *p = 0; return(data->output); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-310'], 'message': 'fix CVE-2012-2143'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; hfsplus_cat_entry entry; struct hfs_find_data fd; struct hfsplus_readdir_data *rd; u16 type; if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; switch ((u32)filp->f_pos) { case 0: /* This is completely artificial... */ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; filp->f_pos++; /* fall through */ case 1: hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { printk(KERN_ERR "hfs: bad catalog folder thread\n"); err = -EIO; goto out; } if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { printk(KERN_ERR "hfs: truncated catalog thread\n"); err = -EIO; goto out; } if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; filp->f_pos++; /* fall through */ default: if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, filp->f_pos - 1); if (err) goto out; } for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { printk(KERN_ERR "hfs: walked past end of dir\n"); err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); len = HFSPLUS_MAX_STRLEN; err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len); if (err) goto out; if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { printk(KERN_ERR "hfs: small dir entry\n"); err = -EIO; goto out; } if (HFSPLUS_SB(sb)->hidden_dir && HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { printk(KERN_ERR "hfs: small file entry\n"); err = -EIO; goto out; } if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { printk(KERN_ERR "hfs: bad catalog entry type\n"); err = -EIO; goto out; } next: filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } filp->private_data = rd; rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: hfs_find_exit(&fd); return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'hfsplus: Fix potential buffer overflows Commit ec81aecb2966 ("hfs: fix a potential buffer overflow") fixed a few potential buffer overflows in the hfs filesystem. But as Timo Warns pointed out, these changes also need to be made on the hfsplus filesystem as well. Reported-by: Timo Warns <warns@pre-sense.de> Acked-by: WANG Cong <amwang@redhat.com> Cc: Alexey Khoroshilov <khoroshilov@ispras.ru> Cc: Miklos Szeredi <mszeredi@suse.cz> Cc: Sage Weil <sage@newdream.net> Cc: Eugene Teo <eteo@redhat.com> Cc: Roman Zippel <zippel@linux-m68k.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Dave Anderson <anderson@redhat.com> Cc: stable <stable@vger.kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xmlXPtrEvalXPtrPart(xmlXPathParserContextPtr ctxt, xmlChar *name) { xmlChar *buffer, *cur; int len; int level; if (name == NULL) name = xmlXPathParseName(ctxt); if (name == NULL) XP_ERROR(XPATH_EXPR_ERROR); if (CUR != '(') XP_ERROR(XPATH_EXPR_ERROR); NEXT; level = 1; len = xmlStrlen(ctxt->cur); len++; buffer = (xmlChar *) xmlMallocAtomic(len * sizeof (xmlChar)); if (buffer == NULL) { xmlXPtrErrMemory("allocating buffer"); return; } cur = buffer; while (CUR != 0) { if (CUR == ')') { level--; if (level == 0) { NEXT; break; } *cur++ = CUR; } else if (CUR == '(') { level++; *cur++ = CUR; } else if (CUR == '^') { NEXT; if ((CUR == ')') || (CUR == '(') || (CUR == '^')) { *cur++ = CUR; } else { *cur++ = '^'; *cur++ = CUR; } } else { *cur++ = CUR; } NEXT; } *cur = 0; if ((level != 0) && (CUR == 0)) { xmlFree(buffer); XP_ERROR(XPTR_SYNTAX_ERROR); } if (xmlStrEqual(name, (xmlChar *) "xpointer")) { const xmlChar *left = CUR_PTR; CUR_PTR = buffer; /* * To evaluate an xpointer scheme element (4.3) we need: * context initialized to the root * context position initalized to 1 * context size initialized to 1 */ ctxt->context->node = (xmlNodePtr)ctxt->context->doc; ctxt->context->proximityPosition = 1; ctxt->context->contextSize = 1; xmlXPathEvalExpr(ctxt); CUR_PTR=left; } else if (xmlStrEqual(name, (xmlChar *) "element")) { const xmlChar *left = CUR_PTR; xmlChar *name2; CUR_PTR = buffer; if (buffer[0] == '/') { xmlXPathRoot(ctxt); xmlXPtrEvalChildSeq(ctxt, NULL); } else { name2 = xmlXPathParseName(ctxt); if (name2 == NULL) { CUR_PTR = left; xmlFree(buffer); XP_ERROR(XPATH_EXPR_ERROR); } xmlXPtrEvalChildSeq(ctxt, name2); } CUR_PTR = left; #ifdef XPTR_XMLNS_SCHEME } else if (xmlStrEqual(name, (xmlChar *) "xmlns")) { const xmlChar *left = CUR_PTR; xmlChar *prefix; xmlChar *URI; xmlURIPtr value; CUR_PTR = buffer; prefix = xmlXPathParseNCName(ctxt); if (prefix == NULL) { xmlFree(buffer); xmlFree(name); XP_ERROR(XPTR_SYNTAX_ERROR); } SKIP_BLANKS; if (CUR != '=') { xmlFree(prefix); xmlFree(buffer); xmlFree(name); XP_ERROR(XPTR_SYNTAX_ERROR); } NEXT; SKIP_BLANKS; /* @@ check escaping in the XPointer WD */ value = xmlParseURI((const char *)ctxt->cur); if (value == NULL) { xmlFree(prefix); xmlFree(buffer); xmlFree(name); XP_ERROR(XPTR_SYNTAX_ERROR); } URI = xmlSaveUri(value); xmlFreeURI(value); if (URI == NULL) { xmlFree(prefix); xmlFree(buffer); xmlFree(name); XP_ERROR(XPATH_MEMORY_ERROR); } xmlXPathRegisterNs(ctxt->context, prefix, URI); CUR_PTR = left; xmlFree(URI); xmlFree(prefix); #endif /* XPTR_XMLNS_SCHEME */ } else { xmlXPtrErr(ctxt, XML_XPTR_UNKNOWN_SCHEME, "unsupported scheme '%s'\n", name); } xmlFree(buffer); xmlFree(name); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix an off by one pointer access getting out of the range of memory allocated for xpointer decoding'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int phar_parse_tarfile(php_stream* fp, char *fname, int fname_len, char *alias, int alias_len, phar_archive_data** pphar, int is_data, php_uint32 compression, char **error TSRMLS_DC) /* {{{ */ { char buf[512], *actual_alias = NULL, *p; phar_entry_info entry = {0}; size_t pos = 0, read, totalsize; tar_header *hdr; php_uint32 sum1, sum2, size, old; phar_archive_data *myphar, **actual; int last_was_longlink = 0; if (error) { *error = NULL; } php_stream_seek(fp, 0, SEEK_END); totalsize = php_stream_tell(fp); php_stream_seek(fp, 0, SEEK_SET); read = php_stream_read(fp, buf, sizeof(buf)); if (read != sizeof(buf)) { if (error) { spprintf(error, 4096, "phar error: \"%s\" is not a tar file or is truncated", fname); } php_stream_close(fp); return FAILURE; } hdr = (tar_header*)buf; old = (memcmp(hdr->magic, "ustar", sizeof("ustar")-1) != 0); myphar = (phar_archive_data *) pecalloc(1, sizeof(phar_archive_data), PHAR_G(persist)); myphar->is_persistent = PHAR_G(persist); /* estimate number of entries, can't be certain with tar files */ zend_hash_init(&myphar->manifest, 2 + (totalsize >> 12), zend_get_hash_value, destroy_phar_manifest_entry, (zend_bool)myphar->is_persistent); zend_hash_init(&myphar->mounted_dirs, 5, zend_get_hash_value, NULL, (zend_bool)myphar->is_persistent); zend_hash_init(&myphar->virtual_dirs, 4 + (totalsize >> 11), zend_get_hash_value, NULL, (zend_bool)myphar->is_persistent); myphar->is_tar = 1; /* remember whether this entire phar was compressed with gz/bzip2 */ myphar->flags = compression; entry.is_tar = 1; entry.is_crc_checked = 1; entry.phar = myphar; pos += sizeof(buf); do { phar_entry_info *newentry; pos = php_stream_tell(fp); hdr = (tar_header*) buf; sum1 = phar_tar_number(hdr->checksum, sizeof(hdr->checksum)); if (sum1 == 0 && phar_tar_checksum(buf, sizeof(buf)) == 0) { break; } memset(hdr->checksum, ' ', sizeof(hdr->checksum)); sum2 = phar_tar_checksum(buf, old?sizeof(old_tar_header):sizeof(tar_header)); size = entry.uncompressed_filesize = entry.compressed_filesize = phar_tar_number(hdr->size, sizeof(hdr->size)); if (((!old && hdr->prefix[0] == 0) || old) && strlen(hdr->name) == sizeof(".phar/signature.bin")-1 && !strncmp(hdr->name, ".phar/signature.bin", sizeof(".phar/signature.bin")-1)) { off_t curloc; if (size > 511) { if (error) { spprintf(error, 4096, "phar error: tar-based phar \"%s\" has signature that is larger than 511 bytes, cannot process", fname); } bail: php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } curloc = php_stream_tell(fp); read = php_stream_read(fp, buf, size); if (read != size) { if (error) { spprintf(error, 4096, "phar error: tar-based phar \"%s\" signature cannot be read", fname); } goto bail; } #ifdef WORDS_BIGENDIAN # define PHAR_GET_32(buffer) \ (((((unsigned char*)(buffer))[3]) << 24) \ | ((((unsigned char*)(buffer))[2]) << 16) \ | ((((unsigned char*)(buffer))[1]) << 8) \ | (((unsigned char*)(buffer))[0])) #else # define PHAR_GET_32(buffer) (php_uint32) *(buffer) #endif myphar->sig_flags = PHAR_GET_32(buf); if (FAILURE == phar_verify_signature(fp, php_stream_tell(fp) - size - 512, myphar->sig_flags, buf + 8, size - 8, fname, &myphar->signature, &myphar->sig_len, error TSRMLS_CC)) { if (error) { char *save = *error; spprintf(error, 4096, "phar error: tar-based phar \"%s\" signature cannot be verified: %s", fname, save); efree(save); } goto bail; } php_stream_seek(fp, curloc + 512, SEEK_SET); /* signature checked out, let's ensure this is the last file in the phar */ if (((hdr->typeflag == '\0') || (hdr->typeflag == TAR_FILE)) && size > 0) { /* this is not good enough - seek succeeds even on truncated tars */ php_stream_seek(fp, 512, SEEK_CUR); if ((uint)php_stream_tell(fp) > totalsize) { if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (truncated)", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } } read = php_stream_read(fp, buf, sizeof(buf)); if (read != sizeof(buf)) { if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (truncated)", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } hdr = (tar_header*) buf; sum1 = phar_tar_number(hdr->checksum, sizeof(hdr->checksum)); if (sum1 == 0 && phar_tar_checksum(buf, sizeof(buf)) == 0) { break; } if (error) { spprintf(error, 4096, "phar error: \"%s\" has entries after signature, invalid phar", fname); } goto bail; } if (!last_was_longlink && hdr->typeflag == 'L') { last_was_longlink = 1; /* support the ././@LongLink system for storing long filenames */ entry.filename_len = entry.uncompressed_filesize; entry.filename = pemalloc(entry.filename_len+1, myphar->is_persistent); read = php_stream_read(fp, entry.filename, entry.filename_len); if (read != entry.filename_len) { efree(entry.filename); if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (truncated)", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } entry.filename[entry.filename_len] = '\0'; /* skip blank stuff */ size = ((size+511)&~511) - size; /* this is not good enough - seek succeeds even on truncated tars */ php_stream_seek(fp, size, SEEK_CUR); if ((uint)php_stream_tell(fp) > totalsize) { efree(entry.filename); if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (truncated)", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } read = php_stream_read(fp, buf, sizeof(buf)); if (read != sizeof(buf)) { efree(entry.filename); if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (truncated)", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } continue; } else if (!last_was_longlink && !old && hdr->prefix[0] != 0) { char name[256]; int i, j; for (i = 0; i < 155; i++) { name[i] = hdr->prefix[i]; if (name[i] == '\0') { break; } } name[i++] = '/'; for (j = 0; j < 100; j++) { name[i+j] = hdr->name[j]; if (name[i+j] == '\0') { break; } } entry.filename_len = i+j; if (name[entry.filename_len - 1] == '/') { /* some tar programs store directories with trailing slash */ entry.filename_len--; } entry.filename = pestrndup(name, entry.filename_len, myphar->is_persistent); } else if (!last_was_longlink) { int i; /* calculate strlen, which can be no longer than 100 */ for (i = 0; i < 100; i++) { if (hdr->name[i] == '\0') { break; } } entry.filename_len = i; entry.filename = pestrndup(hdr->name, i, myphar->is_persistent); if (entry.filename[entry.filename_len - 1] == '/') { /* some tar programs store directories with trailing slash */ entry.filename[entry.filename_len - 1] = '\0'; entry.filename_len--; } } last_was_longlink = 0; phar_add_virtual_dirs(myphar, entry.filename, entry.filename_len TSRMLS_CC); if (sum1 != sum2) { if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (checksum mismatch of file \"%s\")", fname, entry.filename); } pefree(entry.filename, myphar->is_persistent); php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } entry.tar_type = ((old & (hdr->typeflag == '\0')) ? TAR_FILE : hdr->typeflag); entry.offset = entry.offset_abs = pos; /* header_offset unused in tar */ entry.fp_type = PHAR_FP; entry.flags = phar_tar_number(hdr->mode, sizeof(hdr->mode)) & PHAR_ENT_PERM_MASK; entry.timestamp = phar_tar_number(hdr->mtime, sizeof(hdr->mtime)); entry.is_persistent = myphar->is_persistent; #ifndef S_ISDIR #define S_ISDIR(mode) (((mode)&S_IFMT) == S_IFDIR) #endif if (old && entry.tar_type == TAR_FILE && S_ISDIR(entry.flags)) { entry.tar_type = TAR_DIR; } if (entry.tar_type == TAR_DIR) { entry.is_dir = 1; } else { entry.is_dir = 0; } entry.link = NULL; if (entry.tar_type == TAR_LINK) { if (!zend_hash_exists(&myphar->manifest, hdr->linkname, strlen(hdr->linkname))) { if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file - hard link to non-existent file \"%s\"", fname, hdr->linkname); } pefree(entry.filename, entry.is_persistent); php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } entry.link = estrdup(hdr->linkname); } else if (entry.tar_type == TAR_SYMLINK) { entry.link = estrdup(hdr->linkname); } phar_set_inode(&entry TSRMLS_CC); zend_hash_add(&myphar->manifest, entry.filename, entry.filename_len, (void*)&entry, sizeof(phar_entry_info), (void **) &newentry); if (entry.is_persistent) { ++entry.manifest_pos; } if (entry.filename_len >= sizeof(".phar/.metadata")-1 && !memcmp(entry.filename, ".phar/.metadata", sizeof(".phar/.metadata")-1)) { if (FAILURE == phar_tar_process_metadata(newentry, fp TSRMLS_CC)) { if (error) { spprintf(error, 4096, "phar error: tar-based phar \"%s\" has invalid metadata in magic file \"%s\"", fname, entry.filename); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } } if (!actual_alias && entry.filename_len == sizeof(".phar/alias.txt")-1 && !strncmp(entry.filename, ".phar/alias.txt", sizeof(".phar/alias.txt")-1)) { /* found explicit alias */ if (size > 511) { if (error) { spprintf(error, 4096, "phar error: tar-based phar \"%s\" has alias that is larger than 511 bytes, cannot process", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } read = php_stream_read(fp, buf, size); if (read == size) { buf[size] = '\0'; if (!phar_validate_alias(buf, size)) { if (size > 50) { buf[50] = '.'; buf[51] = '.'; buf[52] = '.'; buf[53] = '\0'; } if (error) { spprintf(error, 4096, "phar error: invalid alias \"%s\" in tar-based phar \"%s\"", buf, fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } actual_alias = pestrndup(buf, size, myphar->is_persistent); myphar->alias = actual_alias; myphar->alias_len = size; php_stream_seek(fp, pos, SEEK_SET); } else { if (error) { spprintf(error, 4096, "phar error: Unable to read alias from tar-based phar \"%s\"", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } } size = (size+511)&~511; if (((hdr->typeflag == '\0') || (hdr->typeflag == TAR_FILE)) && size > 0) { /* this is not good enough - seek succeeds even on truncated tars */ php_stream_seek(fp, size, SEEK_CUR); if ((uint)php_stream_tell(fp) > totalsize) { if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (truncated)", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } } read = php_stream_read(fp, buf, sizeof(buf)); if (read != sizeof(buf)) { if (error) { spprintf(error, 4096, "phar error: \"%s\" is a corrupted tar file (truncated)", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } } while (read != 0); if (zend_hash_exists(&(myphar->manifest), ".phar/stub.php", sizeof(".phar/stub.php")-1)) { myphar->is_data = 0; } else { myphar->is_data = 1; } /* ensure signature set */ if (!myphar->is_data && PHAR_G(require_hash) && !myphar->signature) { php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); if (error) { spprintf(error, 0, "tar-based phar \"%s\" does not have a signature", fname); } return FAILURE; } myphar->fname = pestrndup(fname, fname_len, myphar->is_persistent); #ifdef PHP_WIN32 phar_unixify_path_separators(myphar->fname, fname_len); #endif myphar->fname_len = fname_len; myphar->fp = fp; p = strrchr(myphar->fname, '/'); if (p) { myphar->ext = memchr(p, '.', (myphar->fname + fname_len) - p); if (myphar->ext == p) { myphar->ext = memchr(p + 1, '.', (myphar->fname + fname_len) - p - 1); } if (myphar->ext) { myphar->ext_len = (myphar->fname + fname_len) - myphar->ext; } } phar_request_initialize(TSRMLS_C); if (SUCCESS != zend_hash_add(&(PHAR_GLOBALS->phar_fname_map), myphar->fname, fname_len, (void*)&myphar, sizeof(phar_archive_data*), (void **)&actual)) { if (error) { spprintf(error, 4096, "phar error: Unable to add tar-based phar \"%s\" to phar registry", fname); } php_stream_close(fp); phar_destroy_phar_data(myphar TSRMLS_CC); return FAILURE; } myphar = *actual; if (actual_alias) { phar_archive_data **fd_ptr; myphar->is_temporary_alias = 0; if (SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), actual_alias, myphar->alias_len, (void **)&fd_ptr)) { if (SUCCESS != phar_free_alias(*fd_ptr, actual_alias, myphar->alias_len TSRMLS_CC)) { if (error) { spprintf(error, 4096, "phar error: Unable to add tar-based phar \"%s\", alias is already in use", fname); } zend_hash_del(&(PHAR_GLOBALS->phar_fname_map), myphar->fname, fname_len); return FAILURE; } } zend_hash_add(&(PHAR_GLOBALS->phar_alias_map), actual_alias, myphar->alias_len, (void*)&myphar, sizeof(phar_archive_data*), NULL); } else { phar_archive_data **fd_ptr; if (alias_len) { if (SUCCESS == zend_hash_find(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, (void **)&fd_ptr)) { if (SUCCESS != phar_free_alias(*fd_ptr, alias, alias_len TSRMLS_CC)) { if (error) { spprintf(error, 4096, "phar error: Unable to add tar-based phar \"%s\", alias is already in use", fname); } zend_hash_del(&(PHAR_GLOBALS->phar_fname_map), myphar->fname, fname_len); return FAILURE; } } zend_hash_add(&(PHAR_GLOBALS->phar_alias_map), alias, alias_len, (void*)&myphar, sizeof(phar_archive_data*), NULL); myphar->alias = pestrndup(alias, alias_len, myphar->is_persistent); myphar->alias_len = alias_len; } else { myphar->alias = pestrndup(myphar->fname, fname_len, myphar->is_persistent); myphar->alias_len = fname_len; } myphar->is_temporary_alias = 1; } if (pphar) { *pphar = myphar; } return SUCCESS; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'fix bug #61065'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int phar_tar_process_metadata(phar_entry_info *entry, php_stream *fp TSRMLS_DC) /* {{{ */ { char *metadata; size_t save = php_stream_tell(fp), read; phar_entry_info *mentry; metadata = (char *) emalloc(entry->uncompressed_filesize + 1); read = php_stream_read(fp, metadata, entry->uncompressed_filesize); if (read != entry->uncompressed_filesize) { efree(metadata); php_stream_seek(fp, save, SEEK_SET); return FAILURE; } if (phar_parse_metadata(&metadata, &entry->metadata, entry->uncompressed_filesize TSRMLS_CC) == FAILURE) { /* if not valid serialized data, it is a regular string */ efree(metadata); php_stream_seek(fp, save, SEEK_SET); return FAILURE; } if (entry->filename_len == sizeof(".phar/.metadata.bin")-1 && !memcmp(entry->filename, ".phar/.metadata.bin", sizeof(".phar/.metadata.bin")-1)) { entry->phar->metadata = entry->metadata; entry->metadata = NULL; } else if (entry->filename_len >= sizeof(".phar/.metadata/") + sizeof("/.metadata.bin") - 1 && SUCCESS == zend_hash_find(&(entry->phar->manifest), entry->filename + sizeof(".phar/.metadata/") - 1, entry->filename_len - (sizeof("/.metadata.bin") - 1 + sizeof(".phar/.metadata/") - 1), (void *)&mentry)) { /* transfer this metadata to the entry it refers */ mentry->metadata = entry->metadata; entry->metadata = NULL; } efree(metadata); php_stream_seek(fp, save, SEEK_SET); return SUCCESS; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'fix unchecked emalloc'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; if (args->buffer_count < 1) { DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); return -EINVAL; } exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (exec2_list == NULL) exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); if (exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); return -ENOMEM; } ret = copy_from_user(exec2_list, (struct drm_i915_relocation_entry __user *) (uintptr_t) args->buffers_ptr, sizeof(*exec2_list) * args->buffer_count); if (ret != 0) { DRM_DEBUG("copy %d exec entries failed %d\n", args->buffer_count, ret); drm_free_large(exec2_list); return -EFAULT; } ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ ret = copy_to_user((struct drm_i915_relocation_entry __user *) (uintptr_t) args->buffers_ptr, exec2_list, sizeof(*exec2_list) * args->buffer_count); if (ret) { ret = -EFAULT; DRM_DEBUG("failed to copy %d exec entries " "back to user (%d)\n", args->buffer_count, ret); } } drm_free_large(exec2_list); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'drm/i915: fix integer overflow in i915_gem_execbuffer2() On 32-bit systems, a large args->buffer_count from userspace via ioctl may overflow the allocation size, leading to out-of-bounds access. This vulnerability was introduced in commit 8408c282 ("drm/i915: First try a normal large kmalloc for the temporary exec buffers"). Signed-off-by: Xi Wang <xi.wang@gmail.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: stable@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k) { void * result; DCL_LOCK_STATE; if (EXPECT(GC_have_errors, FALSE)) GC_print_all_errors(); GC_INVOKE_FINALIZERS(); if (SMALL_OBJ(lb)) { LOCK(); result = GC_generic_malloc_inner((word)lb, k); UNLOCK(); } else { size_t lg; size_t lb_rounded; word n_blocks; GC_bool init; lg = ROUNDED_UP_GRANULES(lb); lb_rounded = GRANULES_TO_BYTES(lg); n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded); init = GC_obj_kinds[k].ok_init; LOCK(); result = (ptr_t)GC_alloc_large(lb_rounded, k, 0); if (0 != result) { if (GC_debugging_started) { BZERO(result, n_blocks * HBLKSIZE); } else { # ifdef THREADS /* Clear any memory that might be used for GC descriptors */ /* before we release the lock. */ ((word *)result)[0] = 0; ((word *)result)[1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0; ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0; # endif } } GC_bytes_allocd += lb_rounded; UNLOCK(); if (init && !GC_debugging_started && 0 != result) { BZERO(result, n_blocks * HBLKSIZE); } } if (0 == result) { return((*GC_get_oom_fn())(lb)); } else { return(result); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix allocation size overflows due to rounding. * malloc.c (GC_generic_malloc): Check if the allocation size is rounded to a smaller value. * mallocx.c (GC_generic_malloc_ignore_off_page): Likewise.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void * calloc(size_t n, size_t lb) { # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix calloc() overflow * malloc.c (calloc): Check multiplication overflow in calloc(), assuming REDIRECT_MALLOC.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void * calloc(size_t n, size_t lb) { if (lb && n > GC_SIZE_MAX / lb) return NULL; # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */ /* libpthread allocated some memory that is only pointed to by */ /* mmapped thread stacks. Make sure it's not collectable. */ { static GC_bool lib_bounds_set = FALSE; ptr_t caller = (ptr_t)__builtin_return_address(0); /* This test does not need to ensure memory visibility, since */ /* the bounds will be set when/if we create another thread. */ if (!EXPECT(lib_bounds_set, TRUE)) { GC_init_lib_bounds(); lib_bounds_set = TRUE; } if (((word)caller >= (word)GC_libpthread_start && (word)caller < (word)GC_libpthread_end) || ((word)caller >= (word)GC_libld_start && (word)caller < (word)GC_libld_end)) return GC_malloc_uncollectable(n*lb); /* The two ranges are actually usually adjacent, so there may */ /* be a way to speed this up. */ } # endif return((void *)REDIRECT_MALLOC(n*lb)); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Speedup calloc size overflow check by preventing division if small values * malloc.c (GC_SQRT_SIZE_MAX): New macro. * malloc.c (calloc): Add fast initial size overflow check to avoid integer division for reasonably small values passed.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static char *Sys_PIDFileName( void ) { return va( "%s/%s", Sys_TempPath( ), PID_FILENAME ); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-59'], 'message': 'CVE-2012-3345'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: const char *Sys_TempPath( void ) { static TCHAR path[ MAX_PATH ]; DWORD length; length = GetTempPath( sizeof( path ), path ); if( length > sizeof( path ) || length == 0 ) return Sys_DefaultHomePath( ); else return path; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-59'], 'message': 'CVE-2012-3345'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: vte_sequence_handler_multiple(VteTerminal *terminal, GValueArray *params, VteTerminalSequenceHandler handler) { vte_sequence_handler_multiple_limited(terminal, params, handler, G_MAXLONG); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'emulation: Limit integer arguments to 65535 To guard against malicious sequences containing excessively big numbers, limit all parsed numbers to 16 bit range. Doing this here in the parsing routine is a catch-all guard; this doesn't preclude enforcing more stringent limits in the handlers themselves. https://bugzilla.gnome.org/show_bug.cgi?id=676090'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int copy_creds(struct task_struct *p, unsigned long clone_flags) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred; #endif struct cred *new; int ret; if ( #ifdef CONFIG_KEYS !p->cred->thread_keyring && #endif clone_flags & CLONE_THREAD ) { p->real_cred = get_cred(p->cred); get_cred(p->cred); alter_cred_subscribers(p->cred, 2); kdebug("share_creds(%p{%d,%d})", p->cred, atomic_read(&p->cred->usage), read_cred_subscribers(p->cred)); atomic_inc(&p->cred->user->processes); return 0; } new = prepare_creds(); if (!new) return -ENOMEM; if (clone_flags & CLONE_NEWUSER) { ret = create_user_ns(new); if (ret < 0) goto error_put; } /* cache user_ns in cred. Doesn't need a refcount because it will * stay pinned by cred->user */ new->user_ns = new->user->user_ns; #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ if (new->thread_keyring) { key_put(new->thread_keyring); new->thread_keyring = NULL; if (clone_flags & CLONE_THREAD) install_thread_keyring_to_cred(new); } /* we share the process and session keyrings between all the threads in * a process - this is slightly icky as we violate COW credentials a * bit */ if (!(clone_flags & CLONE_THREAD)) { tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) { ret = -ENOMEM; goto error_put; } atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); tgcred->process_keyring = NULL; tgcred->session_keyring = key_get(new->tgcred->session_keyring); release_tgcred(new); new->tgcred = tgcred; } #endif atomic_inc(&new->user->processes); p->cred = p->real_cred = get_cred(new); alter_cred_subscribers(new, 2); validate_creds(new); return 0; error_put: put_cred(new); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'cred: copy_process() should clear child->replacement_session_keyring keyctl_session_to_parent(task) sets ->replacement_session_keyring, it should be processed and cleared by key_replace_session_keyring(). However, this task can fork before it notices TIF_NOTIFY_RESUME and the new child gets the bogus ->replacement_session_keyring copied by dup_task_struct(). This is obviously wrong and, if nothing else, this leads to put_cred(already_freed_cred). change copy_creds() to clear this member. If copy_process() fails before this point the wrong ->replacement_session_keyring doesn't matter, exit_creds() won't be called. Cc: <stable@vger.kernel.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Acked-by: David Howells <dhowells@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xmlParserEntityCheck(xmlParserCtxtPtr ctxt, unsigned long size, xmlEntityPtr ent) { unsigned long consumed = 0; if ((ctxt == NULL) || (ctxt->options & XML_PARSE_HUGE)) return (0); if (ctxt->lastError.code == XML_ERR_ENTITY_LOOP) return (1); if (size != 0) { /* * Do the check based on the replacement size of the entity */ if (size < XML_PARSER_BIG_ENTITY) return(0); /* * A limit on the amount of text data reasonably used */ if (ctxt->input != NULL) { consumed = ctxt->input->consumed + (ctxt->input->cur - ctxt->input->base); } consumed += ctxt->sizeentities; if ((size < XML_PARSER_NON_LINEAR * consumed) && (ctxt->nbentities * 3 < XML_PARSER_NON_LINEAR * consumed)) return (0); } else if (ent != NULL) { /* * use the number of parsed entities in the replacement */ size = ent->checked; /* * The amount of data parsed counting entities size only once */ if (ctxt->input != NULL) { consumed = ctxt->input->consumed + (ctxt->input->cur - ctxt->input->base); } consumed += ctxt->sizeentities; /* * Check the density of entities for the amount of data * knowing an entity reference will take at least 3 bytes */ if (size * 3 < consumed * XML_PARSER_NON_LINEAR) return (0); } else { /* * strange we got no data for checking just return */ return (0); } xmlFatalErr(ctxt, XML_ERR_ENTITY_LOOP, NULL); return (1); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix parser local buffers size problems'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) { xmlChar limit = 0; xmlChar *buf = NULL; xmlChar *rep = NULL; int len = 0; int buf_size = 0; int c, l, in_space = 0; xmlChar *current = NULL; xmlEntityPtr ent; if (NXT(0) == '"') { ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE; limit = '"'; NEXT; } else if (NXT(0) == '\'') { limit = '\''; ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE; NEXT; } else { xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL); return(NULL); } /* * allocate a translation buffer. */ buf_size = XML_PARSER_BUFFER_SIZE; buf = (xmlChar *) xmlMallocAtomic(buf_size * sizeof(xmlChar)); if (buf == NULL) goto mem_error; /* * OK loop until we reach one of the ending char or a size limit. */ c = CUR_CHAR(l); while ((NXT(0) != limit) && /* checked */ (IS_CHAR(c)) && (c != '<')) { if (c == 0) break; if (c == '&') { in_space = 0; if (NXT(1) == '#') { int val = xmlParseCharRef(ctxt); if (val == '&') { if (ctxt->replaceEntities) { if (len > buf_size - 10) { growBuffer(buf, 10); } buf[len++] = '&'; } else { /* * The reparsing will be done in xmlStringGetNodeList() * called by the attribute() function in SAX.c */ if (len > buf_size - 10) { growBuffer(buf, 10); } buf[len++] = '&'; buf[len++] = '#'; buf[len++] = '3'; buf[len++] = '8'; buf[len++] = ';'; } } else if (val != 0) { if (len > buf_size - 10) { growBuffer(buf, 10); } len += xmlCopyChar(0, &buf[len], val); } } else { ent = xmlParseEntityRef(ctxt); ctxt->nbentities++; if (ent != NULL) ctxt->nbentities += ent->owner; if ((ent != NULL) && (ent->etype == XML_INTERNAL_PREDEFINED_ENTITY)) { if (len > buf_size - 10) { growBuffer(buf, 10); } if ((ctxt->replaceEntities == 0) && (ent->content[0] == '&')) { buf[len++] = '&'; buf[len++] = '#'; buf[len++] = '3'; buf[len++] = '8'; buf[len++] = ';'; } else { buf[len++] = ent->content[0]; } } else if ((ent != NULL) && (ctxt->replaceEntities != 0)) { if (ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) { rep = xmlStringDecodeEntities(ctxt, ent->content, XML_SUBSTITUTE_REF, 0, 0, 0); if (rep != NULL) { current = rep; while (*current != 0) { /* non input consuming */ if ((*current == 0xD) || (*current == 0xA) || (*current == 0x9)) { buf[len++] = 0x20; current++; } else buf[len++] = *current++; if (len > buf_size - 10) { growBuffer(buf, 10); } } xmlFree(rep); rep = NULL; } } else { if (len > buf_size - 10) { growBuffer(buf, 10); } if (ent->content != NULL) buf[len++] = ent->content[0]; } } else if (ent != NULL) { int i = xmlStrlen(ent->name); const xmlChar *cur = ent->name; /* * This may look absurd but is needed to detect * entities problems */ if ((ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) && (ent->content != NULL)) { rep = xmlStringDecodeEntities(ctxt, ent->content, XML_SUBSTITUTE_REF, 0, 0, 0); if (rep != NULL) { xmlFree(rep); rep = NULL; } } /* * Just output the reference */ buf[len++] = '&'; while (len > buf_size - i - 10) { growBuffer(buf, i + 10); } for (;i > 0;i--) buf[len++] = *cur++; buf[len++] = ';'; } } } else { if ((c == 0x20) || (c == 0xD) || (c == 0xA) || (c == 0x9)) { if ((len != 0) || (!normalize)) { if ((!normalize) || (!in_space)) { COPY_BUF(l,buf,len,0x20); while (len > buf_size - 10) { growBuffer(buf, 10); } } in_space = 1; } } else { in_space = 0; COPY_BUF(l,buf,len,c); if (len > buf_size - 10) { growBuffer(buf, 10); } } NEXTL(l); } GROW; c = CUR_CHAR(l); } if ((in_space) && (normalize)) { while (buf[len - 1] == 0x20) len--; } buf[len] = 0; if (RAW == '<') { xmlFatalErr(ctxt, XML_ERR_LT_IN_ATTRIBUTE, NULL); } else if (RAW != limit) { if ((c != 0) && (!IS_CHAR(c))) { xmlFatalErrMsg(ctxt, XML_ERR_INVALID_CHAR, "invalid character in attribute value\n"); } else { xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED, "AttValue: ' expected\n"); } } else NEXT; if (attlen != NULL) *attlen = len; return(buf); mem_error: xmlErrMemory(ctxt, NULL); if (buf != NULL) xmlFree(buf); if (rep != NULL) xmlFree(rep); return(NULL); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix parser local buffers size problems'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xmlEncodeEntitiesReentrant(xmlDocPtr doc, const xmlChar *input) { const xmlChar *cur = input; xmlChar *buffer = NULL; xmlChar *out = NULL; int buffer_size = 0; int html = 0; if (input == NULL) return(NULL); if (doc != NULL) html = (doc->type == XML_HTML_DOCUMENT_NODE); /* * allocate an translation buffer. */ buffer_size = 1000; buffer = (xmlChar *) xmlMalloc(buffer_size * sizeof(xmlChar)); if (buffer == NULL) { xmlEntitiesErrMemory("xmlEncodeEntitiesReentrant: malloc failed"); return(NULL); } out = buffer; while (*cur != '\0') { if (out - buffer > buffer_size - 100) { int indx = out - buffer; growBufferReentrant(); out = &buffer[indx]; } /* * By default one have to encode at least '<', '>', '"' and '&' ! */ if (*cur == '<') { *out++ = '&'; *out++ = 'l'; *out++ = 't'; *out++ = ';'; } else if (*cur == '>') { *out++ = '&'; *out++ = 'g'; *out++ = 't'; *out++ = ';'; } else if (*cur == '&') { *out++ = '&'; *out++ = 'a'; *out++ = 'm'; *out++ = 'p'; *out++ = ';'; } else if (((*cur >= 0x20) && (*cur < 0x80)) || (*cur == '\n') || (*cur == '\t') || ((html) && (*cur == '\r'))) { /* * default case, just copy ! */ *out++ = *cur; } else if (*cur >= 0x80) { if (((doc != NULL) && (doc->encoding != NULL)) || (html)) { /* * Bjrn Reese <br@sseusa.com> provided the patch xmlChar xc; xc = (*cur & 0x3F) << 6; if (cur[1] != 0) { xc += *(++cur) & 0x3F; *out++ = xc; } else */ *out++ = *cur; } else { /* * We assume we have UTF-8 input. */ char buf[11], *ptr; int val = 0, l = 1; if (*cur < 0xC0) { xmlEntitiesErr(XML_CHECK_NOT_UTF8, "xmlEncodeEntitiesReentrant : input not UTF-8"); if (doc != NULL) doc->encoding = xmlStrdup(BAD_CAST "ISO-8859-1"); snprintf(buf, sizeof(buf), "&#%d;", *cur); buf[sizeof(buf) - 1] = 0; ptr = buf; while (*ptr != 0) *out++ = *ptr++; cur++; continue; } else if (*cur < 0xE0) { val = (cur[0]) & 0x1F; val <<= 6; val |= (cur[1]) & 0x3F; l = 2; } else if (*cur < 0xF0) { val = (cur[0]) & 0x0F; val <<= 6; val |= (cur[1]) & 0x3F; val <<= 6; val |= (cur[2]) & 0x3F; l = 3; } else if (*cur < 0xF8) { val = (cur[0]) & 0x07; val <<= 6; val |= (cur[1]) & 0x3F; val <<= 6; val |= (cur[2]) & 0x3F; val <<= 6; val |= (cur[3]) & 0x3F; l = 4; } if ((l == 1) || (!IS_CHAR(val))) { xmlEntitiesErr(XML_ERR_INVALID_CHAR, "xmlEncodeEntitiesReentrant : char out of range\n"); if (doc != NULL) doc->encoding = xmlStrdup(BAD_CAST "ISO-8859-1"); snprintf(buf, sizeof(buf), "&#%d;", *cur); buf[sizeof(buf) - 1] = 0; ptr = buf; while (*ptr != 0) *out++ = *ptr++; cur++; continue; } /* * We could do multiple things here. Just save as a char ref */ snprintf(buf, sizeof(buf), "&#x%X;", val); buf[sizeof(buf) - 1] = 0; ptr = buf; while (*ptr != 0) *out++ = *ptr++; cur += l; continue; } } else if (IS_BYTE_CHAR(*cur)) { char buf[11], *ptr; snprintf(buf, sizeof(buf), "&#%d;", *cur); buf[sizeof(buf) - 1] = 0; ptr = buf; while (*ptr != 0) *out++ = *ptr++; } cur++; } *out = 0; return(buffer); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix entities local buffers size problems'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int udf_load_logicalvol(struct super_block *sb, sector_t block, struct kernel_lb_addr *fileset) { struct logicalVolDesc *lvd; int i, j, offset; uint8_t type; struct udf_sb_info *sbi = UDF_SB(sb); struct genericPartitionMap *gpm; uint16_t ident; struct buffer_head *bh; unsigned int table_len; int ret = 0; bh = udf_read_tagged(sb, block, block, &ident); if (!bh) return 1; BUG_ON(ident != TAG_IDENT_LVD); lvd = (struct logicalVolDesc *)bh->b_data; table_len = le32_to_cpu(lvd->mapTableLength); if (sizeof(*lvd) + table_len > sb->s_blocksize) { udf_err(sb, "error loading logical volume descriptor: " "Partition table too long (%u > %lu)\n", table_len, sb->s_blocksize - sizeof(*lvd)); goto out_bh; } ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps)); if (ret) goto out_bh; for (i = 0, offset = 0; i < sbi->s_partitions && offset < table_len; i++, offset += gpm->partitionMapLength) { struct udf_part_map *map = &sbi->s_partmaps[i]; gpm = (struct genericPartitionMap *) &(lvd->partitionMaps[offset]); type = gpm->partitionMapType; if (type == 1) { struct genericPartitionMap1 *gpm1 = (struct genericPartitionMap1 *)gpm; map->s_partition_type = UDF_TYPE1_MAP15; map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum); map->s_partition_num = le16_to_cpu(gpm1->partitionNum); map->s_partition_func = NULL; } else if (type == 2) { struct udfPartitionMap2 *upm2 = (struct udfPartitionMap2 *)gpm; if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL, strlen(UDF_ID_VIRTUAL))) { u16 suf = le16_to_cpu(((__le16 *)upm2->partIdent. identSuffix)[0]); if (suf < 0x0200) { map->s_partition_type = UDF_VIRTUAL_MAP15; map->s_partition_func = udf_get_pblock_virt15; } else { map->s_partition_type = UDF_VIRTUAL_MAP20; map->s_partition_func = udf_get_pblock_virt20; } } else if (!strncmp(upm2->partIdent.ident, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { uint32_t loc; struct sparingTable *st; struct sparablePartitionMap *spm = (struct sparablePartitionMap *)gpm; map->s_partition_type = UDF_SPARABLE_MAP15; map->s_type_specific.s_sparing.s_packet_len = le16_to_cpu(spm->packetLength); for (j = 0; j < spm->numSparingTables; j++) { struct buffer_head *bh2; loc = le32_to_cpu( spm->locSparingTable[j]); bh2 = udf_read_tagged(sb, loc, loc, &ident); map->s_type_specific.s_sparing. s_spar_map[j] = bh2; if (bh2 == NULL) continue; st = (struct sparingTable *)bh2->b_data; if (ident != 0 || strncmp( st->sparingIdent.ident, UDF_ID_SPARING, strlen(UDF_ID_SPARING))) { brelse(bh2); map->s_type_specific.s_sparing. s_spar_map[j] = NULL; } } map->s_partition_func = udf_get_pblock_spar15; } else if (!strncmp(upm2->partIdent.ident, UDF_ID_METADATA, strlen(UDF_ID_METADATA))) { struct udf_meta_data *mdata = &map->s_type_specific.s_metadata; struct metadataPartitionMap *mdm = (struct metadataPartitionMap *) &(lvd->partitionMaps[offset]); udf_debug("Parsing Logical vol part %d type %d id=%s\n", i, type, UDF_ID_METADATA); map->s_partition_type = UDF_METADATA_MAP25; map->s_partition_func = udf_get_pblock_meta25; mdata->s_meta_file_loc = le32_to_cpu(mdm->metadataFileLoc); mdata->s_mirror_file_loc = le32_to_cpu(mdm->metadataMirrorFileLoc); mdata->s_bitmap_file_loc = le32_to_cpu(mdm->metadataBitmapFileLoc); mdata->s_alloc_unit_size = le32_to_cpu(mdm->allocUnitSize); mdata->s_align_unit_size = le16_to_cpu(mdm->alignUnitSize); if (mdm->flags & 0x01) mdata->s_flags |= MF_DUPLICATE_MD; udf_debug("Metadata Ident suffix=0x%x\n", le16_to_cpu(*(__le16 *) mdm->partIdent.identSuffix)); udf_debug("Metadata part num=%d\n", le16_to_cpu(mdm->partitionNum)); udf_debug("Metadata part alloc unit size=%d\n", le32_to_cpu(mdm->allocUnitSize)); udf_debug("Metadata file loc=%d\n", le32_to_cpu(mdm->metadataFileLoc)); udf_debug("Mirror file loc=%d\n", le32_to_cpu(mdm->metadataMirrorFileLoc)); udf_debug("Bitmap file loc=%d\n", le32_to_cpu(mdm->metadataBitmapFileLoc)); udf_debug("Flags: %d %d\n", mdata->s_flags, mdm->flags); } else { udf_debug("Unknown ident: %s\n", upm2->partIdent.ident); continue; } map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum); map->s_partition_num = le16_to_cpu(upm2->partitionNum); } udf_debug("Partition (%d:%d) type %d on volume %d\n", i, map->s_partition_num, type, map->s_volumeseqnum); } if (fileset) { struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]); *fileset = lelb_to_cpu(la->extLocation); udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n", fileset->logicalBlockNum, fileset->partitionReferenceNum); } if (lvd->integritySeqExt.extLength) udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt)); out_bh: brelse(bh); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'udf: Fortify loading of sparing table Add sanity checks when loading sparing table from disk to avoid accessing unallocated memory or writing to it. Signed-off-by: Jan Kara <jack@suse.cz>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: PKL_CommandLength(CMD_Request *r) { int type; type = ntohs(r->command); if (type < 0 || type >= N_REQUEST_TYPES) { return 0; } else { switch (type) { case REQ_NULL: return offsetof(CMD_Request, data); case REQ_ONLINE: return offsetof(CMD_Request, data.online.EOR); case REQ_OFFLINE: return offsetof(CMD_Request, data.offline.EOR); case REQ_BURST: return offsetof(CMD_Request, data.burst.EOR); case REQ_MODIFY_MINPOLL: return offsetof(CMD_Request, data.modify_minpoll.EOR); case REQ_MODIFY_MAXPOLL: return offsetof(CMD_Request, data.modify_maxpoll.EOR); case REQ_DUMP: return offsetof(CMD_Request, data.dump.EOR); case REQ_MODIFY_MAXDELAY: return offsetof(CMD_Request, data.modify_maxdelay.EOR); case REQ_MODIFY_MAXDELAYRATIO: return offsetof(CMD_Request, data.modify_maxdelayratio.EOR); case REQ_MODIFY_MAXDELAYDEVRATIO: return offsetof(CMD_Request, data.modify_maxdelaydevratio.EOR); case REQ_MODIFY_MAXUPDATESKEW: return offsetof(CMD_Request, data.modify_maxupdateskew.EOR); case REQ_LOGON : return offsetof(CMD_Request, data.logon.EOR); case REQ_SETTIME : return offsetof(CMD_Request, data.settime.EOR); case REQ_LOCAL : return offsetof(CMD_Request, data.local.EOR); case REQ_MANUAL : return offsetof(CMD_Request, data.manual.EOR); case REQ_N_SOURCES : return offsetof(CMD_Request, data.n_sources.EOR); case REQ_SOURCE_DATA : return offsetof(CMD_Request, data.source_data.EOR); case REQ_REKEY : return offsetof(CMD_Request, data.rekey.EOR); case REQ_ALLOW : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_ALLOWALL : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_DENY : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_DENYALL : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_CMDALLOW : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_CMDALLOWALL : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_CMDDENY : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_CMDDENYALL : return offsetof(CMD_Request, data.allow_deny.EOR); case REQ_ACCHECK : return offsetof(CMD_Request, data.ac_check.EOR); case REQ_CMDACCHECK : return offsetof(CMD_Request, data.ac_check.EOR); case REQ_ADD_SERVER : return offsetof(CMD_Request, data.ntp_source.EOR); case REQ_ADD_PEER : return offsetof(CMD_Request, data.ntp_source.EOR); case REQ_DEL_SOURCE : return offsetof(CMD_Request, data.del_source.EOR); case REQ_WRITERTC : return offsetof(CMD_Request, data.writertc.EOR); case REQ_DFREQ : return offsetof(CMD_Request, data.dfreq.EOR); case REQ_DOFFSET : return offsetof(CMD_Request, data.doffset.EOR); case REQ_TRACKING : return offsetof(CMD_Request, data.tracking.EOR); case REQ_SOURCESTATS : return offsetof(CMD_Request, data.sourcestats.EOR); case REQ_RTCREPORT : return offsetof(CMD_Request, data.rtcreport.EOR); case REQ_TRIMRTC : return offsetof(CMD_Request, data.trimrtc.EOR); case REQ_CYCLELOGS : return offsetof(CMD_Request, data.cyclelogs.EOR); case REQ_SUBNETS_ACCESSED : { unsigned long ns; ns = ntohl(r->data.subnets_accessed.n_subnets); return (offsetof(CMD_Request, data.subnets_accessed.subnets) + ns * sizeof(REQ_SubnetsAccessed_Subnet)); } case REQ_CLIENT_ACCESSES: { unsigned long nc; nc = ntohl(r->data.client_accesses.n_clients); return (offsetof(CMD_Request, data.client_accesses.client_ips) + nc * sizeof(unsigned long)); } case REQ_CLIENT_ACCESSES_BY_INDEX: return offsetof(CMD_Request, data.client_accesses_by_index.EOR); case REQ_MANUAL_LIST: return offsetof(CMD_Request, data.manual_list.EOR); case REQ_MANUAL_DELETE: return offsetof(CMD_Request, data.manual_delete.EOR); case REQ_MAKESTEP: return offsetof(CMD_Request, data.make_step.EOR); case REQ_ACTIVITY: return offsetof(CMD_Request, data.activity.EOR); case REQ_RESELECT: return offsetof(CMD_Request, data.reselect.EOR); case REQ_RESELECTDISTANCE: return offsetof(CMD_Request, data.reselect_distance.EOR); case REQ_MODIFY_MINSTRATUM: return offsetof(CMD_Request, data.modify_minstratum.EOR); case REQ_MODIFY_POLLTARGET: return offsetof(CMD_Request, data.modify_polltarget.EOR); default: /* If we fall through the switch, it most likely means we've forgotten to implement a new case */ assert(0); } } /* Catch-all case */ return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix buffer overflow when processing crafted command packets When the length of the REQ_SUBNETS_ACCESSED, REQ_CLIENT_ACCESSES command requests and the RPY_SUBNETS_ACCESSED, RPY_CLIENT_ACCESSES, RPY_CLIENT_ACCESSES_BY_INDEX, RPY_MANUAL_LIST command replies is calculated, the number of items stored in the packet is not validated. A crafted command request/reply can be used to crash the server/client. Only clients allowed by cmdallow (by default only localhost) can crash the server. With chrony versions 1.25 and 1.26 this bug has a smaller security impact as the server requires the clients to be authenticated in order to process the subnet and client accesses commands. In 1.27 and 1.28, however, the invalid calculated length is included also in the authentication check which may cause another crash.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: read_from_cmd_socket(void *anything) { int status; int read_length; /* Length of packet read */ int expected_length; /* Expected length of packet without auth data */ unsigned long flags; CMD_Request rx_message; CMD_Reply tx_message, *prev_tx_message; int rx_message_length, tx_message_length; int sock_fd; union sockaddr_in46 where_from; socklen_t from_length; IPAddr remote_ip; unsigned short remote_port; int auth_length; int auth_ok; int utoken_ok, token_ok; int issue_token; int valid_ts; int authenticated; int localhost; int allowed; unsigned short rx_command; unsigned long rx_message_token; unsigned long tx_message_token; unsigned long rx_message_seq; unsigned long rx_attempt; struct timeval now; struct timeval cooked_now; flags = 0; rx_message_length = sizeof(rx_message); from_length = sizeof(where_from); sock_fd = (long)anything; status = recvfrom(sock_fd, (char *)&rx_message, rx_message_length, flags, &where_from.u, &from_length); if (status < 0) { LOG(LOGS_WARN, LOGF_CmdMon, "Error [%s] reading from control socket %d", strerror(errno), sock_fd); return; } read_length = status; expected_length = PKL_CommandLength(&rx_message); rx_command = ntohs(rx_message.command); LCL_ReadRawTime(&now); LCL_CookTime(&now, &cooked_now, NULL); tx_message.version = PROTO_VERSION_NUMBER; tx_message.pkt_type = PKT_TYPE_CMD_REPLY; tx_message.res1 = 0; tx_message.res2 = 0; tx_message.command = rx_message.command; tx_message.sequence = rx_message.sequence; tx_message.reply = htons(RPY_NULL); tx_message.number = htons(1); tx_message.total = htons(1); tx_message.pad1 = 0; tx_message.utoken = htonl(utoken); /* Set this to a default (invalid) value. This protects against the token field being set to an arbitrary value if we reject the message, e.g. due to the host failing the access check. */ tx_message.token = htonl(0xffffffffUL); memset(&tx_message.auth, 0, sizeof(tx_message.auth)); switch (where_from.u.sa_family) { case AF_INET: remote_ip.family = IPADDR_INET4; remote_ip.addr.in4 = ntohl(where_from.in4.sin_addr.s_addr); remote_port = ntohs(where_from.in4.sin_port); localhost = (remote_ip.addr.in4 == 0x7f000001UL); break; #ifdef HAVE_IPV6 case AF_INET6: remote_ip.family = IPADDR_INET6; memcpy(&remote_ip.addr.in6, where_from.in6.sin6_addr.s6_addr, sizeof (remote_ip.addr.in6)); remote_port = ntohs(where_from.in6.sin6_port); /* Check for ::1 */ for (localhost = 0; localhost < 16; localhost++) if (remote_ip.addr.in6[localhost] != 0) break; localhost = (localhost == 15 && remote_ip.addr.in6[localhost] == 1); break; #endif default: assert(0); } allowed = ADF_IsAllowed(access_auth_table, &remote_ip) || localhost; if (read_length < offsetof(CMD_Request, data) || rx_message.pkt_type != PKT_TYPE_CMD_REQUEST || rx_message.res1 != 0 || rx_message.res2 != 0) { /* We don't know how to process anything like this */ if (allowed) CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec); return; } if (rx_message.version != PROTO_VERSION_NUMBER) { tx_message.status = htons(STT_NOHOSTACCESS); if (!LOG_RateLimited()) { LOG(LOGS_WARN, LOGF_CmdMon, "Read command packet with protocol version %d (expected %d) from %s:%hu", rx_message.version, PROTO_VERSION_NUMBER, UTI_IPToString(&remote_ip), remote_port); } if (allowed) CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec); if (rx_message.version >= PROTO_VERSION_MISMATCH_COMPAT) { tx_message.status = htons(STT_BADPKTVERSION); /* add empty MD5 auth so older clients will not drop the reply due to bad length */ memset(((char *)&tx_message) + PKL_ReplyLength(&tx_message), 0, 16); transmit_reply(&tx_message, &where_from, 16); } return; } if (rx_command >= N_REQUEST_TYPES) { if (!LOG_RateLimited()) { LOG(LOGS_WARN, LOGF_CmdMon, "Read command packet with invalid command %d from %s:%hu", rx_command, UTI_IPToString(&remote_ip), remote_port); } if (allowed) CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec); tx_message.status = htons(STT_INVALID); transmit_reply(&tx_message, &where_from, 0); return; } if (read_length < expected_length) { if (!LOG_RateLimited()) { LOG(LOGS_WARN, LOGF_CmdMon, "Read incorrectly sized command packet from %s:%hu", UTI_IPToString(&remote_ip), remote_port); } if (allowed) CLG_LogCommandAccess(&remote_ip, CLG_CMD_BAD_PKT, cooked_now.tv_sec); tx_message.status = htons(STT_BADPKTLENGTH); transmit_reply(&tx_message, &where_from, 0); return; } if (!allowed) { /* The client is not allowed access, so don't waste any more time on him. Note that localhost is always allowed access regardless of the defined access rules - otherwise, we could shut ourselves out completely! */ if (!LOG_RateLimited()) { LOG(LOGS_WARN, LOGF_CmdMon, "Command packet received from unauthorised host %s port %d", UTI_IPToString(&remote_ip), remote_port); } tx_message.status = htons(STT_NOHOSTACCESS); transmit_reply(&tx_message, &where_from, 0); return; } /* OK, we have a valid message. Now dispatch on message type and process it. */ /* Do authentication stuff and command tokens here. Well-behaved clients will set their utokens to 0 to save us wasting our time if the packet is unauthenticatable. */ if (rx_message.utoken != 0) { auth_ok = check_rx_packet_auth(&rx_message, read_length); } else { auth_ok = 0; } /* All this malarky is to protect the system against various forms of attack. Simple packet forgeries are blocked by requiring the packet to authenticate properly with MD5 or other crypto hash. (The assumption is that the command key is in a read-only keys file read by the daemon, and is known only to administrators.) Replay attacks are prevented by 2 fields in the packet. The 'token' field is where the client plays back to us a token that he was issued in an earlier reply. Each time we reply to a suitable packet, we issue a new token. The 'utoken' field is set to a new (hopefully increasing) value each time the daemon is run. This prevents packets from a previous incarnation being played back at us when the same point in the 'token' sequence comes up. (The token mechanism also prevents a non-idempotent command from being executed twice from the same client, if the client fails to receive our reply the first time and tries a resend.) The problem is how a client should get its first token. Our token handling only remembers a finite number of issued tokens (actually 32) - if a client replies with a (legitimate) token older than that, it will be treated as though a duplicate token has been supplied. If a simple token-request protocol were used, the whole thing would be vulnerable to a denial of service attack, where an attacker just replays valid token-request packets at us, causing us to keep issuing new tokens, invalidating all the ones we have given out to true clients already. To protect against this, the token-request (REQ_LOGON) packet includes a timestamp field. To issue a token, we require that this field is different from any we have processed before. To bound our storage, we require that the timestamp is within a certain period of our current time. For clients running on the same host this will be easily satisfied. */ utoken_ok = (ntohl(rx_message.utoken) == utoken); /* Avoid binning a valid user's token if we merely get a forged packet */ rx_message_token = ntohl(rx_message.token); rx_message_seq = ntohl(rx_message.sequence); rx_attempt = ntohs(rx_message.attempt); if (auth_ok && utoken_ok) { token_ok = check_token(rx_message_token); } else { token_ok = 0; } if (auth_ok && utoken_ok && !token_ok) { /* This might be a resent message, due to the client not getting our reply to the first attempt. See if we can find the message. */ prev_tx_message = lookup_reply(rx_message_token, rx_message_seq, rx_attempt); if (prev_tx_message) { /* Just send this message again */ tx_message_length = PKL_ReplyLength(prev_tx_message); status = sendto(sock_fd, (void *) prev_tx_message, tx_message_length, 0, &where_from.u, from_length); if (status < 0 && !LOG_RateLimited()) { LOG(LOGS_WARN, LOGF_CmdMon, "Could not send response to %s:%hu", UTI_IPToString(&remote_ip), remote_port); } return; } /* Otherwise, just fall through into normal processing */ } if (auth_ok && utoken_ok && token_ok) { /* See whether we can discard the previous reply from storage */ token_acknowledged(rx_message_token, &now); } valid_ts = 0; if (auth_ok) { struct timeval ts; UTI_TimevalNetworkToHost(&rx_message.data.logon.ts, &ts); if ((utoken_ok && token_ok) || ((ntohl(rx_message.utoken) == SPECIAL_UTOKEN) && (rx_command == REQ_LOGON) && (valid_ts = ts_is_unique_and_not_stale(&ts, &now)))) issue_token = 1; else issue_token = 0; } else { issue_token = 0; } authenticated = auth_ok & utoken_ok & token_ok; if (authenticated) { CLG_LogCommandAccess(&remote_ip, CLG_CMD_AUTH, cooked_now.tv_sec); } else { CLG_LogCommandAccess(&remote_ip, CLG_CMD_NORMAL, cooked_now.tv_sec); } if (issue_token) { /* Only command clients where the user has apparently 'logged on' get a token to allow them to emit an authenticated command next time */ tx_message_token = get_token(); } else { tx_message_token = 0xffffffffUL; } tx_message.token = htonl(tx_message_token); if (rx_command >= N_REQUEST_TYPES) { /* This should be already handled */ assert(0); } else { allowed = 0; /* Check level of authority required to issue the command */ switch(permissions[rx_command]) { case PERMIT_AUTH: if (authenticated) { allowed = 1; } else { allowed = 0; } break; case PERMIT_LOCAL: if (authenticated || localhost) { allowed = 1; } else { allowed = 0; } break; case PERMIT_OPEN: allowed = 1; break; default: assert(0); } if (allowed) { switch(rx_command) { case REQ_NULL: handle_null(&rx_message, &tx_message); break; case REQ_ONLINE: handle_online(&rx_message, &tx_message); break; case REQ_OFFLINE: handle_offline(&rx_message, &tx_message); break; case REQ_BURST: handle_burst(&rx_message, &tx_message); break; case REQ_MODIFY_MINPOLL: handle_modify_minpoll(&rx_message, &tx_message); break; case REQ_MODIFY_MAXPOLL: handle_modify_maxpoll(&rx_message, &tx_message); break; case REQ_DUMP: SRC_DumpSources(); tx_message.status = htons(STT_SUCCESS); break; case REQ_MODIFY_MAXDELAY: handle_modify_maxdelay(&rx_message, &tx_message); break; case REQ_MODIFY_MAXDELAYRATIO: handle_modify_maxdelayratio(&rx_message, &tx_message); break; case REQ_MODIFY_MAXDELAYDEVRATIO: handle_modify_maxdelaydevratio(&rx_message, &tx_message); break; case REQ_MODIFY_MAXUPDATESKEW: handle_modify_maxupdateskew(&rx_message, &tx_message); break; case REQ_LOGON: /* If the log-on fails, record the reason why */ if (!issue_token && !LOG_RateLimited()) { LOG(LOGS_WARN, LOGF_CmdMon, "Bad command logon from %s port %d (auth_ok=%d valid_ts=%d)", UTI_IPToString(&remote_ip), remote_port, auth_ok, valid_ts); } if (issue_token == 1) { tx_message.status = htons(STT_SUCCESS); } else if (!auth_ok) { tx_message.status = htons(STT_UNAUTH); } else if (!valid_ts) { tx_message.status = htons(STT_INVALIDTS); } else { tx_message.status = htons(STT_FAILED); } break; case REQ_SETTIME: handle_settime(&rx_message, &tx_message); break; case REQ_LOCAL: handle_local(&rx_message, &tx_message); break; case REQ_MANUAL: handle_manual(&rx_message, &tx_message); break; case REQ_N_SOURCES: handle_n_sources(&rx_message, &tx_message); break; case REQ_SOURCE_DATA: handle_source_data(&rx_message, &tx_message); break; case REQ_REKEY: handle_rekey(&rx_message, &tx_message); break; case REQ_ALLOW: handle_allow(&rx_message, &tx_message); break; case REQ_ALLOWALL: handle_allowall(&rx_message, &tx_message); break; case REQ_DENY: handle_deny(&rx_message, &tx_message); break; case REQ_DENYALL: handle_denyall(&rx_message, &tx_message); break; case REQ_CMDALLOW: handle_cmdallow(&rx_message, &tx_message); break; case REQ_CMDALLOWALL: handle_cmdallowall(&rx_message, &tx_message); break; case REQ_CMDDENY: handle_cmddeny(&rx_message, &tx_message); break; case REQ_CMDDENYALL: handle_cmddenyall(&rx_message, &tx_message); break; case REQ_ACCHECK: handle_accheck(&rx_message, &tx_message); break; case REQ_CMDACCHECK: handle_cmdaccheck(&rx_message, &tx_message); break; case REQ_ADD_SERVER: handle_add_source(NTP_SERVER, &rx_message, &tx_message); break; case REQ_ADD_PEER: handle_add_source(NTP_PEER, &rx_message, &tx_message); break; case REQ_DEL_SOURCE: handle_del_source(&rx_message, &tx_message); break; case REQ_WRITERTC: handle_writertc(&rx_message, &tx_message); break; case REQ_DFREQ: handle_dfreq(&rx_message, &tx_message); break; case REQ_DOFFSET: handle_doffset(&rx_message, &tx_message); break; case REQ_TRACKING: handle_tracking(&rx_message, &tx_message); break; case REQ_SOURCESTATS: handle_sourcestats(&rx_message, &tx_message); break; case REQ_RTCREPORT: handle_rtcreport(&rx_message, &tx_message); break; case REQ_TRIMRTC: handle_trimrtc(&rx_message, &tx_message); break; case REQ_CYCLELOGS: handle_cyclelogs(&rx_message, &tx_message); break; case REQ_SUBNETS_ACCESSED: handle_subnets_accessed(&rx_message, &tx_message); break; case REQ_CLIENT_ACCESSES: handle_client_accesses(&rx_message, &tx_message); break; case REQ_CLIENT_ACCESSES_BY_INDEX: handle_client_accesses_by_index(&rx_message, &tx_message); break; case REQ_MANUAL_LIST: handle_manual_list(&rx_message, &tx_message); break; case REQ_MANUAL_DELETE: handle_manual_delete(&rx_message, &tx_message); break; case REQ_MAKESTEP: handle_make_step(&rx_message, &tx_message); break; case REQ_ACTIVITY: handle_activity(&rx_message, &tx_message); break; case REQ_RESELECTDISTANCE: handle_reselect_distance(&rx_message, &tx_message); break; case REQ_RESELECT: handle_reselect(&rx_message, &tx_message); break; case REQ_MODIFY_MINSTRATUM: handle_modify_minstratum(&rx_message, &tx_message); break; case REQ_MODIFY_POLLTARGET: handle_modify_polltarget(&rx_message, &tx_message); break; default: assert(0); break; } } else { tx_message.status = htons(STT_UNAUTH); } } if (auth_ok) { auth_length = generate_tx_packet_auth(&tx_message); } else { auth_length = 0; } if (token_ok) { save_reply(&tx_message, rx_message_token, tx_message_token, rx_message_seq, rx_attempt, &now); } /* Transmit the response */ { /* Include a simple way to lose one message in three to test resend */ static int do_it=1; if (do_it) { transmit_reply(&tx_message, &where_from, auth_length); } #if 0 do_it = ((do_it + 1) % 3); #endif } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Fix buffer overflow when processing crafted command packets When the length of the REQ_SUBNETS_ACCESSED, REQ_CLIENT_ACCESSES command requests and the RPY_SUBNETS_ACCESSED, RPY_CLIENT_ACCESSES, RPY_CLIENT_ACCESSES_BY_INDEX, RPY_MANUAL_LIST command replies is calculated, the number of items stored in the packet is not validated. A crafted command request/reply can be used to crash the server/client. Only clients allowed by cmdallow (by default only localhost) can crash the server. With chrony versions 1.25 and 1.26 this bug has a smaller security impact as the server requires the clients to be authenticated in order to process the subnet and client accesses commands. In 1.27 and 1.28, however, the invalid calculated length is included also in the authentication check which may cause another crash.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: handle_client_accesses(CMD_Request *rx_message, CMD_Reply *tx_message) { CLG_Status result; RPT_ClientAccess_Report report; unsigned long nc; IPAddr ip; int i; struct timeval now; LCL_ReadCookedTime(&now, NULL); nc = ntohl(rx_message->data.client_accesses.n_clients); tx_message->status = htons(STT_SUCCESS); tx_message->reply = htons(RPY_CLIENT_ACCESSES); tx_message->data.client_accesses.n_clients = htonl(nc); printf("%d %d\n", (int)sizeof(RPY_ClientAccesses_Client), (int)offsetof(CMD_Reply, data.client_accesses.clients)); for (i=0; i<nc; i++) { UTI_IPNetworkToHost(&rx_message->data.client_accesses.client_ips[i], &ip); UTI_IPHostToNetwork(&ip, &tx_message->data.client_accesses.clients[i].ip); result = CLG_GetClientAccessReportByIP(&ip, &report, now.tv_sec); switch (result) { case CLG_SUCCESS: tx_message->data.client_accesses.clients[i].client_hits = htonl(report.client_hits); tx_message->data.client_accesses.clients[i].peer_hits = htonl(report.peer_hits); tx_message->data.client_accesses.clients[i].cmd_hits_auth = htonl(report.cmd_hits_auth); tx_message->data.client_accesses.clients[i].cmd_hits_normal = htonl(report.cmd_hits_normal); tx_message->data.client_accesses.clients[i].cmd_hits_bad = htonl(report.cmd_hits_bad); tx_message->data.client_accesses.clients[i].last_ntp_hit_ago = htonl(report.last_ntp_hit_ago); tx_message->data.client_accesses.clients[i].last_cmd_hit_ago = htonl(report.last_cmd_hit_ago); printf("%s %lu %lu %lu %lu %lu %lu %lu\n", UTI_IPToString(&ip), report.client_hits, report.peer_hits, report.cmd_hits_auth, report.cmd_hits_normal, report.cmd_hits_bad, report.last_ntp_hit_ago, report.last_cmd_hit_ago); break; case CLG_EMPTYSUBNET: /* Signal back to the client that this single client address was unknown */ ip.family = IPADDR_UNSPEC; UTI_IPHostToNetwork(&ip, &tx_message->data.client_accesses.clients[i].ip); break; case CLG_INACTIVE: tx_message->status = htons(STT_INACTIVE); return; default: assert(0); break; } } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'Don't send uninitialized data in command replies The RPY_SUBNETS_ACCESSED and RPY_CLIENT_ACCESSES command replies can contain uninitalized data from stack when the client logging is disabled or a bad subnet is requested. These commands were never used by chronyc and they require the client to be authenticated since version 1.25.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static long madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) { loff_t offset; int error; *prev = NULL; /* tell sys_madvise we drop mmap_sem */ if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB)) return -EINVAL; if (!vma->vm_file || !vma->vm_file->f_mapping || !vma->vm_file->f_mapping->host) { return -EINVAL; } if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) return -EACCES; offset = (loff_t)(start - vma->vm_start) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* filesystem's fallocate may need to take i_mutex */ up_read(&current->mm->mmap_sem); error = do_fallocate(vma->vm_file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, end - start); down_read(&current->mm->mmap_sem); return error; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'mm: Hold a file reference in madvise_remove Otherwise the code races with munmap (causing a use-after-free of the vma) or with close (causing a use-after-free of the struct file). The bug was introduced by commit 90ed52ebe481 ("[PATCH] holepunch: fix mmap_sem i_mutex deadlock") Cc: Hugh Dickins <hugh@veritas.com> Cc: Miklos Szeredi <mszeredi@suse.cz> Cc: Badari Pulavarty <pbadari@us.ibm.com> Cc: Nick Piggin <npiggin@suse.de> Cc: stable@vger.kernel.org Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: dns_resolve(edge_connection_t *exitconn) { or_circuit_t *oncirc = TO_OR_CIRCUIT(exitconn->on_circuit); int is_resolve, r; char *hostname = NULL; is_resolve = exitconn->_base.purpose == EXIT_PURPOSE_RESOLVE; r = dns_resolve_impl(exitconn, is_resolve, oncirc, &hostname); switch (r) { case 1: /* We got an answer without a lookup -- either the answer was * cached, or it was obvious (like an IP address). */ if (is_resolve) { /* Send the answer back right now, and detach. */ if (hostname) send_resolved_hostname_cell(exitconn, hostname); else send_resolved_cell(exitconn, RESOLVED_TYPE_IPV4); exitconn->on_circuit = NULL; } else { /* Add to the n_streams list; the calling function will send back a * connected cell. */ exitconn->next_stream = oncirc->n_streams; oncirc->n_streams = exitconn; } break; case 0: /* The request is pending: add the connection into the linked list of * resolving_streams on this circuit. */ exitconn->_base.state = EXIT_CONN_STATE_RESOLVING; exitconn->next_stream = oncirc->resolving_streams; oncirc->resolving_streams = exitconn; break; case -2: case -1: /* The request failed before it could start: cancel this connection, * and stop everybody waiting for the same connection. */ if (is_resolve) { send_resolved_cell(exitconn, (r == -1) ? RESOLVED_TYPE_ERROR : RESOLVED_TYPE_ERROR_TRANSIENT); } exitconn->on_circuit = NULL; dns_cancel_pending_resolve(exitconn->_base.address); if (!exitconn->_base.marked_for_close) { connection_free(TO_CONN(exitconn)); // XXX ... and we just leak exitconn otherwise? -RD // If it's marked for close, it's on closeable_connection_lst in // main.c. If it's on the closeable list, it will get freed from // main.c. -NM // "<armadev> If that's true, there are other bugs around, where we // don't check if it's marked, and will end up double-freeing." // On the other hand, I don't know of any actual bugs here, so this // shouldn't be holding up the rc. -RD } break; default: tor_assert(0); } tor_free(hostname); return r; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'Avoid hard (impossible?)-to-trigger double-free in dns_resolve() Fixes 6480; fix on 0.2.0.1-alpha; based on pseudonymous patch.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: smartlist_choose_by_bandwidth(smartlist_t *sl, bandwidth_weight_rule_t rule, int statuses) { unsigned int i; routerinfo_t *router; routerstatus_t *status=NULL; int32_t *bandwidths; int is_exit; int is_guard; uint64_t total_nonexit_bw = 0, total_exit_bw = 0, total_bw = 0; uint64_t total_nonguard_bw = 0, total_guard_bw = 0; uint64_t rand_bw, tmp; double exit_weight; double guard_weight; int n_unknown = 0; bitarray_t *exit_bits; bitarray_t *guard_bits; int me_idx = -1; // This function does not support WEIGHT_FOR_DIR // or WEIGHT_FOR_MID if (rule == WEIGHT_FOR_DIR || rule == WEIGHT_FOR_MID) { rule = NO_WEIGHTING; } /* Can't choose exit and guard at same time */ tor_assert(rule == NO_WEIGHTING || rule == WEIGHT_FOR_EXIT || rule == WEIGHT_FOR_GUARD); if (smartlist_len(sl) == 0) { log_info(LD_CIRC, "Empty routerlist passed in to old node selection for rule %s", bandwidth_weight_rule_to_string(rule)); return NULL; } /* First count the total bandwidth weight, and make a list * of each value. <0 means "unknown; no routerinfo." We use the * bits of negative values to remember whether the router was fast (-x)&1 * and whether it was an exit (-x)&2 or guard (-x)&4. Yes, it's a hack. */ bandwidths = tor_malloc(sizeof(int32_t)*smartlist_len(sl)); exit_bits = bitarray_init_zero(smartlist_len(sl)); guard_bits = bitarray_init_zero(smartlist_len(sl)); /* Iterate over all the routerinfo_t or routerstatus_t, and */ for (i = 0; i < (unsigned)smartlist_len(sl); ++i) { /* first, learn what bandwidth we think i has */ int is_known = 1; int32_t flags = 0; uint32_t this_bw = 0; if (statuses) { status = smartlist_get(sl, i); if (router_digest_is_me(status->identity_digest)) me_idx = i; router = router_get_by_digest(status->identity_digest); is_exit = status->is_exit; is_guard = status->is_possible_guard; if (status->has_bandwidth) { this_bw = kb_to_bytes(status->bandwidth); } else { /* guess */ /* XXX023 once consensuses always list bandwidths, we can take * this guessing business out. -RD */ is_known = 0; flags = status->is_fast ? 1 : 0; flags |= is_exit ? 2 : 0; flags |= is_guard ? 4 : 0; } } else { routerstatus_t *rs; router = smartlist_get(sl, i); rs = router_get_consensus_status_by_id( router->cache_info.identity_digest); if (router_digest_is_me(router->cache_info.identity_digest)) me_idx = i; is_exit = router->is_exit; is_guard = router->is_possible_guard; if (rs && rs->has_bandwidth) { this_bw = kb_to_bytes(rs->bandwidth); } else if (rs) { /* guess; don't trust the descriptor */ /* XXX023 once consensuses always list bandwidths, we can take * this guessing business out. -RD */ is_known = 0; flags = router->is_fast ? 1 : 0; flags |= is_exit ? 2 : 0; flags |= is_guard ? 4 : 0; } else /* bridge or other descriptor not in our consensus */ this_bw = bridge_get_advertised_bandwidth_bounded(router); } if (is_exit) bitarray_set(exit_bits, i); if (is_guard) bitarray_set(guard_bits, i); if (is_known) { bandwidths[i] = (int32_t) this_bw; // safe since MAX_BELIEVABLE<INT32_MAX // XXX this is no longer true! We don't always cap the bw anymore. Can // a consensus make us overflow?-sh tor_assert(bandwidths[i] >= 0); if (is_guard) total_guard_bw += this_bw; else total_nonguard_bw += this_bw; if (is_exit) total_exit_bw += this_bw; else total_nonexit_bw += this_bw; } else { ++n_unknown; bandwidths[i] = -flags; } } /* Now, fill in the unknown values. */ if (n_unknown) { int32_t avg_fast, avg_slow; if (total_exit_bw+total_nonexit_bw) { /* if there's some bandwidth, there's at least one known router, * so no worries about div by 0 here */ int n_known = smartlist_len(sl)-n_unknown; avg_fast = avg_slow = (int32_t) ((total_exit_bw+total_nonexit_bw)/((uint64_t) n_known)); } else { avg_fast = 40000; avg_slow = 20000; } for (i=0; i<(unsigned)smartlist_len(sl); ++i) { int32_t bw = bandwidths[i]; if (bw>=0) continue; is_exit = ((-bw)&2); is_guard = ((-bw)&4); bandwidths[i] = ((-bw)&1) ? avg_fast : avg_slow; if (is_exit) total_exit_bw += bandwidths[i]; else total_nonexit_bw += bandwidths[i]; if (is_guard) total_guard_bw += bandwidths[i]; else total_nonguard_bw += bandwidths[i]; } } /* If there's no bandwidth at all, pick at random. */ if (!(total_exit_bw+total_nonexit_bw)) { tor_free(bandwidths); tor_free(exit_bits); tor_free(guard_bits); return smartlist_choose(sl); } /* Figure out how to weight exits and guards */ { double all_bw = U64_TO_DBL(total_exit_bw+total_nonexit_bw); double exit_bw = U64_TO_DBL(total_exit_bw); double guard_bw = U64_TO_DBL(total_guard_bw); /* * For detailed derivation of this formula, see * http://archives.seul.org/or/dev/Jul-2007/msg00056.html */ if (rule == WEIGHT_FOR_EXIT || !total_exit_bw) exit_weight = 1.0; else exit_weight = 1.0 - all_bw/(3.0*exit_bw); if (rule == WEIGHT_FOR_GUARD || !total_guard_bw) guard_weight = 1.0; else guard_weight = 1.0 - all_bw/(3.0*guard_bw); if (exit_weight <= 0.0) exit_weight = 0.0; if (guard_weight <= 0.0) guard_weight = 0.0; total_bw = 0; sl_last_weighted_bw_of_me = 0; for (i=0; i < (unsigned)smartlist_len(sl); i++) { uint64_t bw; is_exit = bitarray_is_set(exit_bits, i); is_guard = bitarray_is_set(guard_bits, i); if (is_exit && is_guard) bw = ((uint64_t)(bandwidths[i] * exit_weight * guard_weight)); else if (is_guard) bw = ((uint64_t)(bandwidths[i] * guard_weight)); else if (is_exit) bw = ((uint64_t)(bandwidths[i] * exit_weight)); else bw = bandwidths[i]; total_bw += bw; if (i == (unsigned) me_idx) sl_last_weighted_bw_of_me = bw; } } /* XXXX023 this is a kludge to expose these values. */ sl_last_total_weighted_bw = total_bw; log_debug(LD_CIRC, "Total weighted bw = "U64_FORMAT ", exit bw = "U64_FORMAT ", nonexit bw = "U64_FORMAT", exit weight = %f " "(for exit == %d)" ", guard bw = "U64_FORMAT ", nonguard bw = "U64_FORMAT", guard weight = %f " "(for guard == %d)", U64_PRINTF_ARG(total_bw), U64_PRINTF_ARG(total_exit_bw), U64_PRINTF_ARG(total_nonexit_bw), exit_weight, (int)(rule == WEIGHT_FOR_EXIT), U64_PRINTF_ARG(total_guard_bw), U64_PRINTF_ARG(total_nonguard_bw), guard_weight, (int)(rule == WEIGHT_FOR_GUARD)); /* Almost done: choose a random value from the bandwidth weights. */ rand_bw = crypto_rand_uint64(total_bw); rand_bw++; /* crypto_rand_uint64() counts from 0, and we need to count * from 1 below. See bug 1203 for details. */ /* Last, count through sl until we get to the element we picked */ tmp = 0; for (i=0; i < (unsigned)smartlist_len(sl); i++) { is_exit = bitarray_is_set(exit_bits, i); is_guard = bitarray_is_set(guard_bits, i); /* Weights can be 0 if not counting guards/exits */ if (is_exit && is_guard) tmp += ((uint64_t)(bandwidths[i] * exit_weight * guard_weight)); else if (is_guard) tmp += ((uint64_t)(bandwidths[i] * guard_weight)); else if (is_exit) tmp += ((uint64_t)(bandwidths[i] * exit_weight)); else tmp += bandwidths[i]; if (tmp >= rand_bw) break; } if (i == (unsigned)smartlist_len(sl)) { /* This was once possible due to round-off error, but shouldn't be able * to occur any longer. */ tor_fragile_assert(); --i; log_warn(LD_BUG, "Round-off error in computing bandwidth had an effect on " " which router we chose. Please tell the developers. " U64_FORMAT " " U64_FORMAT " " U64_FORMAT, U64_PRINTF_ARG(tmp), U64_PRINTF_ARG(rand_bw), U64_PRINTF_ARG(total_bw)); } tor_free(bandwidths); tor_free(exit_bits); tor_free(guard_bits); return smartlist_get(sl, i); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'Mitigate a side-channel leak of which relays Tor chooses for a circuit Tor's and OpenSSL's current design guarantee that there are other leaks, but this one is likely to be more easily exploitable, and is easy to fix.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: smartlist_choose_by_bandwidth_weights(smartlist_t *sl, bandwidth_weight_rule_t rule, int statuses) { int64_t weight_scale; int64_t rand_bw; double Wg = -1, Wm = -1, We = -1, Wd = -1; double Wgb = -1, Wmb = -1, Web = -1, Wdb = -1; double weighted_bw = 0; double *bandwidths; double tmp = 0; unsigned int i; int have_unknown = 0; /* true iff sl contains element not in consensus. */ /* Can't choose exit and guard at same time */ tor_assert(rule == NO_WEIGHTING || rule == WEIGHT_FOR_EXIT || rule == WEIGHT_FOR_GUARD || rule == WEIGHT_FOR_MID || rule == WEIGHT_FOR_DIR); if (smartlist_len(sl) == 0) { log_info(LD_CIRC, "Empty routerlist passed in to consensus weight node " "selection for rule %s", bandwidth_weight_rule_to_string(rule)); return NULL; } weight_scale = circuit_build_times_get_bw_scale(NULL); if (rule == WEIGHT_FOR_GUARD) { Wg = networkstatus_get_bw_weight(NULL, "Wgg", -1); Wm = networkstatus_get_bw_weight(NULL, "Wgm", -1); /* Bridges */ We = 0; Wd = networkstatus_get_bw_weight(NULL, "Wgd", -1); Wgb = networkstatus_get_bw_weight(NULL, "Wgb", -1); Wmb = networkstatus_get_bw_weight(NULL, "Wmb", -1); Web = networkstatus_get_bw_weight(NULL, "Web", -1); Wdb = networkstatus_get_bw_weight(NULL, "Wdb", -1); } else if (rule == WEIGHT_FOR_MID) { Wg = networkstatus_get_bw_weight(NULL, "Wmg", -1); Wm = networkstatus_get_bw_weight(NULL, "Wmm", -1); We = networkstatus_get_bw_weight(NULL, "Wme", -1); Wd = networkstatus_get_bw_weight(NULL, "Wmd", -1); Wgb = networkstatus_get_bw_weight(NULL, "Wgb", -1); Wmb = networkstatus_get_bw_weight(NULL, "Wmb", -1); Web = networkstatus_get_bw_weight(NULL, "Web", -1); Wdb = networkstatus_get_bw_weight(NULL, "Wdb", -1); } else if (rule == WEIGHT_FOR_EXIT) { // Guards CAN be exits if they have weird exit policies // They are d then I guess... We = networkstatus_get_bw_weight(NULL, "Wee", -1); Wm = networkstatus_get_bw_weight(NULL, "Wem", -1); /* Odd exit policies */ Wd = networkstatus_get_bw_weight(NULL, "Wed", -1); Wg = networkstatus_get_bw_weight(NULL, "Weg", -1); /* Odd exit policies */ Wgb = networkstatus_get_bw_weight(NULL, "Wgb", -1); Wmb = networkstatus_get_bw_weight(NULL, "Wmb", -1); Web = networkstatus_get_bw_weight(NULL, "Web", -1); Wdb = networkstatus_get_bw_weight(NULL, "Wdb", -1); } else if (rule == WEIGHT_FOR_DIR) { We = networkstatus_get_bw_weight(NULL, "Wbe", -1); Wm = networkstatus_get_bw_weight(NULL, "Wbm", -1); Wd = networkstatus_get_bw_weight(NULL, "Wbd", -1); Wg = networkstatus_get_bw_weight(NULL, "Wbg", -1); Wgb = Wmb = Web = Wdb = weight_scale; } else if (rule == NO_WEIGHTING) { Wg = Wm = We = Wd = weight_scale; Wgb = Wmb = Web = Wdb = weight_scale; } if (Wg < 0 || Wm < 0 || We < 0 || Wd < 0 || Wgb < 0 || Wmb < 0 || Wdb < 0 || Web < 0) { log_debug(LD_CIRC, "Got negative bandwidth weights. Defaulting to old selection" " algorithm."); return NULL; // Use old algorithm. } Wg /= weight_scale; Wm /= weight_scale; We /= weight_scale; Wd /= weight_scale; Wgb /= weight_scale; Wmb /= weight_scale; Web /= weight_scale; Wdb /= weight_scale; bandwidths = tor_malloc_zero(sizeof(double)*smartlist_len(sl)); // Cycle through smartlist and total the bandwidth. for (i = 0; i < (unsigned)smartlist_len(sl); ++i) { int is_exit = 0, is_guard = 0, is_dir = 0, this_bw = 0, is_me = 0; double weight = 1; if (statuses) { routerstatus_t *status = smartlist_get(sl, i); is_exit = status->is_exit && !status->is_bad_exit; is_guard = status->is_possible_guard; is_dir = (status->dir_port != 0); if (!status->has_bandwidth) { tor_free(bandwidths); /* This should never happen, unless all the authorites downgrade * to 0.2.0 or rogue routerstatuses get inserted into our consensus. */ log_warn(LD_BUG, "Consensus is not listing bandwidths. Defaulting back to " "old router selection algorithm."); return NULL; } this_bw = kb_to_bytes(status->bandwidth); if (router_digest_is_me(status->identity_digest)) is_me = 1; } else { routerstatus_t *rs; routerinfo_t *router = smartlist_get(sl, i); rs = router_get_consensus_status_by_id( router->cache_info.identity_digest); is_exit = router->is_exit && !router->is_bad_exit; is_guard = router->is_possible_guard; is_dir = (router->dir_port != 0); if (rs && rs->has_bandwidth) { this_bw = kb_to_bytes(rs->bandwidth); } else { /* bridge or other descriptor not in our consensus */ this_bw = bridge_get_advertised_bandwidth_bounded(router); have_unknown = 1; } if (router_digest_is_me(router->cache_info.identity_digest)) is_me = 1; } if (is_guard && is_exit) { weight = (is_dir ? Wdb*Wd : Wd); } else if (is_guard) { weight = (is_dir ? Wgb*Wg : Wg); } else if (is_exit) { weight = (is_dir ? Web*We : We); } else { // middle weight = (is_dir ? Wmb*Wm : Wm); } bandwidths[i] = weight*this_bw; weighted_bw += weight*this_bw; if (is_me) sl_last_weighted_bw_of_me = weight*this_bw; } /* XXXX023 this is a kludge to expose these values. */ sl_last_total_weighted_bw = weighted_bw; log_debug(LD_CIRC, "Choosing node for rule %s based on weights " "Wg=%f Wm=%f We=%f Wd=%f with total bw %f", bandwidth_weight_rule_to_string(rule), Wg, Wm, We, Wd, weighted_bw); /* If there is no bandwidth, choose at random */ if (DBL_TO_U64(weighted_bw) == 0) { /* Don't warn when using bridges/relays not in the consensus */ if (!have_unknown) log_warn(LD_CIRC, "Weighted bandwidth is %f in node selection for rule %s", weighted_bw, bandwidth_weight_rule_to_string(rule)); tor_free(bandwidths); return smartlist_choose(sl); } rand_bw = crypto_rand_uint64(DBL_TO_U64(weighted_bw)); rand_bw++; /* crypto_rand_uint64() counts from 0, and we need to count * from 1 below. See bug 1203 for details. */ /* Last, count through sl until we get to the element we picked */ tmp = 0.0; for (i=0; i < (unsigned)smartlist_len(sl); i++) { tmp += bandwidths[i]; if (tmp >= rand_bw) break; } if (i == (unsigned)smartlist_len(sl)) { /* This was once possible due to round-off error, but shouldn't be able * to occur any longer. */ tor_fragile_assert(); --i; log_warn(LD_BUG, "Round-off error in computing bandwidth had an effect on " " which router we chose. Please tell the developers. " "%f " U64_FORMAT " %f", tmp, U64_PRINTF_ARG(rand_bw), weighted_bw); } tor_free(bandwidths); return smartlist_get(sl, i); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'Mitigate a side-channel leak of which relays Tor chooses for a circuit Tor's and OpenSSL's current design guarantee that there are other leaks, but this one is likely to be more easily exploitable, and is easy to fix.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) { memset(scm, 0, sizeof(*scm)); unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; return __scm_send(sock, msg, scm); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-287', 'CWE-284'], 'message': 'af_netlink: force credentials passing [CVE-2012-3520] Pablo Neira Ayuso discovered that avahi and potentially NetworkManager accept spoofed Netlink messages because of a kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data to the receiver if the sender did not provide such data, instead of not including any such data at all or including the correct data from the peer (as it is the case with AF_UNIX). This bug was introduced in commit 16e572626961 (af_unix: dont send SCM_CREDENTIALS by default) This patch forces passing credentials for netlink, as before the regression. Another fix would be to not add SCM_CREDENTIALS in netlink messages if not provided by the sender, but it might break some programs. With help from Florian Weimer & Petr Matousek This issue is designated as CVE-2012-3520 Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Petr Matousek <pmatouse@redhat.com> Cc: Florian Weimer <fweimer@redhat.com> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = msg->msg_name; struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; unsigned int hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; int max_level; int data_len = 0; if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; if (msg->msg_namelen) { err = unix_mkname(sunaddr, msg->msg_namelen, &hash); if (err < 0) goto out; namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer_get(sk); if (!other) goto out; } if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; if (len > SKB_MAX_ALLOC) data_len = min_t(size_t, len - SKB_MAX_ALLOC, MAX_SKB_FRAGS * PAGE_SIZE); skb = sock_alloc_send_pskb(sk, len - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; err = unix_scm_to_skb(siocb->scm, skb, true); if (err < 0) goto out_free; max_level = err + 1; unix_get_secdata(siocb->scm, skb); skb_put(skb, len - data_len); skb->data_len = data_len; skb->len = len; err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); restart: if (!other) { err = -ECONNRESET; if (sunaddr == NULL) goto out_free; other = unix_find_other(net, sunaddr, namelen, sk->sk_type, hash, &err); if (other == NULL) goto out_free; } if (sk_filter(other, skb) < 0) { /* Toss the packet but do not return any error to the sender */ err = len; goto out_free; } unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; if (sock_flag(other, SOCK_DEAD)) { /* * Check with 1003.1g - what should * datagram error */ unix_state_unlock(other); sock_put(other); err = 0; unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk) = NULL; unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { unix_state_unlock(sk); } other = NULL; if (err) goto out_free; goto restart; } err = -EPIPE; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } if (unix_peer(other) != sk && unix_recvq_full(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; } timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free; goto restart; } if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); maybe_add_creds(skb, sock, other); skb_queue_tail(&other->sk_receive_queue, skb); if (max_level > unix_sk(other)->recursion_level) unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); scm_destroy(siocb->scm); return len; out_unlock: unix_state_unlock(other); out_free: kfree_skb(skb); out: if (other) sock_put(other); scm_destroy(siocb->scm); return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-287', 'CWE-284'], 'message': 'af_netlink: force credentials passing [CVE-2012-3520] Pablo Neira Ayuso discovered that avahi and potentially NetworkManager accept spoofed Netlink messages because of a kernel bug. The kernel passes all-zero SCM_CREDENTIALS ancillary data to the receiver if the sender did not provide such data, instead of not including any such data at all or including the correct data from the peer (as it is the case with AF_UNIX). This bug was introduced in commit 16e572626961 (af_unix: dont send SCM_CREDENTIALS by default) This patch forces passing credentials for netlink, as before the regression. Another fix would be to not add SCM_CREDENTIALS in netlink messages if not provided by the sender, but it might break some programs. With help from Florian Weimer & Petr Matousek This issue is designated as CVE-2012-3520 Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Petr Matousek <pmatouse@redhat.com> Cc: Florian Weimer <fweimer@redhat.com> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void _out_result(conn_t out, nad_t nad) { int attr; jid_t from, to; char *rkey; int rkeylen; attr = nad_find_attr(nad, 0, -1, "from", NULL); if(attr < 0 || (from = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid from on db result packet"); nad_free(nad); return; } attr = nad_find_attr(nad, 0, -1, "to", NULL); if(attr < 0 || (to = jid_new(NAD_AVAL(nad, attr), NAD_AVAL_L(nad, attr))) == NULL) { log_debug(ZONE, "missing or invalid to on db result packet"); jid_free(from); nad_free(nad); return; } rkey = s2s_route_key(NULL, to->domain, from->domain); rkeylen = strlen(rkey); /* key is valid */ if(nad_find_attr(nad, 0, -1, "type", "valid") >= 0) { log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now valid%s%s", out->fd->fd, out->ip, out->port, rkey, (out->s->flags & SX_SSL_WRAPPER) ? ", TLS negotiated" : "", out->s->compressed ? ", ZLIB compression enabled" : ""); xhash_put(out->states, pstrdup(xhash_pool(out->states), rkey), (void *) conn_VALID); /* !!! small leak here */ log_debug(ZONE, "%s valid, flushing queue", rkey); /* flush the queue */ out_flush_route_queue(out->s2s, rkey, rkeylen); free(rkey); jid_free(from); jid_free(to); nad_free(nad); return; } /* invalid */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] outgoing route '%s' is now invalid", out->fd->fd, out->ip, out->port, rkey); /* close connection */ log_write(out->s2s->log, LOG_NOTICE, "[%d] [%s, port=%d] closing connection", out->fd->fd, out->ip, out->port); /* report stream error */ sx_error(out->s, stream_err_INVALID_ID, "dialback negotiation failed"); /* close the stream */ sx_close(out->s); /* bounce queue */ out_bounce_route_queue(out->s2s, rkey, rkeylen, stanza_err_SERVICE_UNAVAILABLE); free(rkey); jid_free(from); jid_free(to); nad_free(nad); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Fixed possibility of Unsolicited Dialback Attacks'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int cbtls_verify(int ok, X509_STORE_CTX *ctx) { char subject[1024]; /* Used for the subject name */ char issuer[1024]; /* Used for the issuer name */ char common_name[1024]; char cn_str[1024]; char buf[64]; EAP_HANDLER *handler = NULL; X509 *client_cert; X509 *issuer_cert; SSL *ssl; int err, depth, lookup, loc; EAP_TLS_CONF *conf; int my_ok = ok; REQUEST *request; ASN1_INTEGER *sn = NULL; ASN1_TIME *asn_time = NULL; #ifdef HAVE_OPENSSL_OCSP_H X509_STORE *ocsp_store = NULL; #endif client_cert = X509_STORE_CTX_get_current_cert(ctx); err = X509_STORE_CTX_get_error(ctx); depth = X509_STORE_CTX_get_error_depth(ctx); lookup = depth; /* * Log client/issuing cert. If there's an error, log * issuing cert. */ if ((lookup > 1) && !my_ok) lookup = 1; /* * Retrieve the pointer to the SSL of the connection currently treated * and the application specific data stored into the SSL object. */ ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); handler = (EAP_HANDLER *)SSL_get_ex_data(ssl, 0); request = handler->request; conf = (EAP_TLS_CONF *)SSL_get_ex_data(ssl, 1); #ifdef HAVE_OPENSSL_OCSP_H ocsp_store = (X509_STORE *)SSL_get_ex_data(ssl, 2); #endif /* * Get the Serial Number */ buf[0] = '\0'; sn = X509_get_serialNumber(client_cert); /* * For this next bit, we create the attributes *only* if * we're at the client or issuing certificate. */ if ((lookup <= 1) && sn && (sn->length < (sizeof(buf) / 2))) { char *p = buf; int i; for (i = 0; i < sn->length; i++) { sprintf(p, "%02x", (unsigned int)sn->data[i]); p += 2; } pairadd(&handler->certs, pairmake(cert_attr_names[EAPTLS_SERIAL][lookup], buf, T_OP_SET)); } /* * Get the Expiration Date */ buf[0] = '\0'; asn_time = X509_get_notAfter(client_cert); if ((lookup <= 1) && asn_time && (asn_time->length < MAX_STRING_LEN)) { memcpy(buf, (char*) asn_time->data, asn_time->length); buf[asn_time->length] = '\0'; pairadd(&handler->certs, pairmake(cert_attr_names[EAPTLS_EXPIRATION][lookup], buf, T_OP_SET)); } /* * Get the Subject & Issuer */ subject[0] = issuer[0] = '\0'; X509_NAME_oneline(X509_get_subject_name(client_cert), subject, sizeof(subject)); subject[sizeof(subject) - 1] = '\0'; if ((lookup <= 1) && subject[0] && (strlen(subject) < MAX_STRING_LEN)) { pairadd(&handler->certs, pairmake(cert_attr_names[EAPTLS_SUBJECT][lookup], subject, T_OP_SET)); } X509_NAME_oneline(X509_get_issuer_name(ctx->current_cert), issuer, sizeof(issuer)); issuer[sizeof(issuer) - 1] = '\0'; if ((lookup <= 1) && issuer[0] && (strlen(issuer) < MAX_STRING_LEN)) { pairadd(&handler->certs, pairmake(cert_attr_names[EAPTLS_ISSUER][lookup], issuer, T_OP_SET)); } /* * Get the Common Name, if there is a subject. */ X509_NAME_get_text_by_NID(X509_get_subject_name(client_cert), NID_commonName, common_name, sizeof(common_name)); common_name[sizeof(common_name) - 1] = '\0'; if ((lookup <= 1) && common_name[0] && subject[0] && (strlen(common_name) < MAX_STRING_LEN)) { pairadd(&handler->certs, pairmake(cert_attr_names[EAPTLS_CN][lookup], common_name, T_OP_SET)); } #ifdef GEN_EMAIL /* * Get the RFC822 Subject Alternative Name */ loc = X509_get_ext_by_NID(client_cert, NID_subject_alt_name, 0); if (lookup <= 1 && loc >= 0) { X509_EXTENSION *ext = NULL; GENERAL_NAMES *names = NULL; int i; if ((ext = X509_get_ext(client_cert, loc)) && (names = X509V3_EXT_d2i(ext))) { for (i = 0; i < sk_GENERAL_NAME_num(names); i++) { GENERAL_NAME *name = sk_GENERAL_NAME_value(names, i); switch (name->type) { case GEN_EMAIL: if (ASN1_STRING_length(name->d.rfc822Name) >= MAX_STRING_LEN) break; pairadd(&handler->certs, pairmake(cert_attr_names[EAPTLS_SAN_EMAIL][lookup], ASN1_STRING_data(name->d.rfc822Name), T_OP_SET)); break; default: /* XXX TODO handle other SAN types */ break; } } } if (names != NULL) sk_GENERAL_NAME_free(names); } #endif /* GEN_EMAIL */ /* * If the CRL has expired, that might still be OK. */ if (!my_ok && (conf->allow_expired_crl) && (err == X509_V_ERR_CRL_HAS_EXPIRED)) { my_ok = 1; X509_STORE_CTX_set_error( ctx, 0 ); } if (!my_ok) { const char *p = X509_verify_cert_error_string(err); radlog(L_ERR,"--> verify error:num=%d:%s\n",err, p); radius_pairmake(request, &request->packet->vps, "Module-Failure-Message", p, T_OP_SET); return my_ok; } switch (ctx->error) { case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT: radlog(L_ERR, "issuer= %s\n", issuer); break; case X509_V_ERR_CERT_NOT_YET_VALID: case X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD: radlog(L_ERR, "notBefore="); #if 0 ASN1_TIME_print(bio_err, X509_get_notBefore(ctx->current_cert)); #endif break; case X509_V_ERR_CERT_HAS_EXPIRED: case X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD: radlog(L_ERR, "notAfter="); #if 0 ASN1_TIME_print(bio_err, X509_get_notAfter(ctx->current_cert)); #endif break; } /* * If we're at the actual client cert, apply additional * checks. */ if (depth == 0) { /* * If the conf tells us to, check cert issuer * against the specified value and fail * verification if they don't match. */ if (conf->check_cert_issuer && (strcmp(issuer, conf->check_cert_issuer) != 0)) { radlog(L_AUTH, "rlm_eap_tls: Certificate issuer (%s) does not match specified value (%s)!", issuer, conf->check_cert_issuer); my_ok = 0; } /* * If the conf tells us to, check the CN in the * cert against xlat'ed value, but only if the * previous checks passed. */ if (my_ok && conf->check_cert_cn) { if (!radius_xlat(cn_str, sizeof(cn_str), conf->check_cert_cn, handler->request, NULL)) { radlog(L_ERR, "rlm_eap_tls (%s): xlat failed.", conf->check_cert_cn); /* if this fails, fail the verification */ my_ok = 0; } else { RDEBUG2("checking certificate CN (%s) with xlat'ed value (%s)", common_name, cn_str); if (strcmp(cn_str, common_name) != 0) { radlog(L_AUTH, "rlm_eap_tls: Certificate CN (%s) does not match specified value (%s)!", common_name, cn_str); my_ok = 0; } } } /* check_cert_cn */ #ifdef HAVE_OPENSSL_OCSP_H if (my_ok && conf->ocsp_enable){ RDEBUG2("--> Starting OCSP Request"); if(X509_STORE_CTX_get1_issuer(&issuer_cert, ctx, client_cert)!=1) { radlog(L_ERR, "Error: Couldn't get issuer_cert for %s", common_name); } my_ok = ocsp_check(ocsp_store, issuer_cert, client_cert, conf); } #endif while (conf->verify_client_cert_cmd) { char filename[256]; int fd; FILE *fp; snprintf(filename, sizeof(filename), "%s/%s.client.XXXXXXXX", conf->verify_tmp_dir, progname); fd = mkstemp(filename); if (fd < 0) { RDEBUG("Failed creating file in %s: %s", conf->verify_tmp_dir, strerror(errno)); break; } fp = fdopen(fd, "w"); if (!fp) { RDEBUG("Failed opening file %s: %s", filename, strerror(errno)); break; } if (!PEM_write_X509(fp, client_cert)) { fclose(fp); RDEBUG("Failed writing certificate to file"); goto do_unlink; } fclose(fp); if (!radius_pairmake(request, &request->packet->vps, "TLS-Client-Cert-Filename", filename, T_OP_SET)) { RDEBUG("Failed creating TLS-Client-Cert-Filename"); goto do_unlink; } RDEBUG("Verifying client certificate: %s", conf->verify_client_cert_cmd); if (radius_exec_program(conf->verify_client_cert_cmd, request, 1, NULL, 0, request->packet->vps, NULL, 1) != 0) { radlog(L_AUTH, "rlm_eap_tls: Certificate CN (%s) fails external verification!", common_name); my_ok = 0; } else { RDEBUG("Client certificate CN %s passed external validation", common_name); } do_unlink: unlink(filename); break; } } /* depth == 0 */ if (debug_flag > 0) { RDEBUG2("chain-depth=%d, ", depth); RDEBUG2("error=%d", err); RDEBUG2("--> User-Name = %s", handler->identity); RDEBUG2("--> BUF-Name = %s", common_name); RDEBUG2("--> subject = %s", subject); RDEBUG2("--> issuer = %s", issuer); RDEBUG2("--> verify return:%d", my_ok); } return my_ok; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'Fix CVE-2012-3547. Found by Timo Warns'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int ip_queue_xmit(struct sk_buff *skb) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct ip_options *opt = inet->opt; struct rtable *rt; struct iphdr *iph; int res; /* Skip all of this if the packet is already routed, * f.e. by something like SCTP. */ rcu_read_lock(); rt = skb_rtable(skb); if (rt != NULL) goto packet_routed; /* Make sure we can route this packet. */ rt = (struct rtable *)__sk_dst_check(sk, 0); if (rt == NULL) { __be32 daddr; /* Use correct destination address if we have options. */ daddr = inet->inet_daddr; if(opt && opt->srr) daddr = opt->faddr; /* If this fails, retransmit mechanism of transport layer will * keep trying until route appears or the connection times * itself out. */ rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; sk_setup_caps(sk, &rt->dst); } skb_dst_set_noref(skb, &rt->dst); packet_routed: if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto no_route; /* OK, we know where to send it, allocate and build IP header. */ skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->protocol = sk->sk_protocol; iph->saddr = rt->rt_src; iph->daddr = rt->rt_dst; /* Transport layer set skb->h.foo itself. */ if (opt && opt->optlen) { iph->ihl += opt->optlen >> 2; ip_options_build(skb, opt, inet->inet_daddr, rt, 0); } ip_select_ident_more(iph, &rt->dst, sk, (skb_shinfo(skb)->gso_segs ?: 1) - 1); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; res = ip_local_out(skb); rcu_read_unlock(); return res; no_route: rcu_read_unlock(); IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EHOSTUNREACH; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static struct ip_options *ip_options_get_alloc(const int optlen) { return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), GFP_KERNEL); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt) { struct tcp_options_received tcp_opt; u8 *hash_location; struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; int mss; struct rtable *rt; __u8 rcv_wscale; bool ecn_ok; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk) || (mss = cookie_check(skb, cookie)) == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, &hash_location, 0); if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) goto out; ret = NULL; req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ if (!req) goto out; ireq = inet_rsk(req); treq = tcp_rsk(req); treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; req->mss = mss; ireq->loc_port = th->dest; ireq->rmt_port = th->source; ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ if (opt && opt->optlen) { int opt_size = sizeof(struct ip_options) + opt->optlen; ireq->opt = kmalloc(opt_size, GFP_ATOMIC); if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) { kfree(ireq->opt); ireq->opt = NULL; } } if (security_inet_conn_request(sk, skb, req)) { reqsk_free(req); goto out; } req->expires = 0UL; req->retrans = 0; /* * We need to lookup the route here to get at the correct * window size. We should better make sure that the window size * hasn't changed since we received the original syn, but I see * no easy way to do this. */ { struct flowi4 fl4; flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, inet_sk_flowi_flags(sk), (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ireq->loc_addr, th->source, th->dest); security_req_classify_flow(req, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(sock_net(sk), &fl4); if (IS_ERR(rt)) { reqsk_free(req); goto out; } } /* Try to redo what tcp_v4_send_synack did. */ req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, &req->rcv_wnd, &req->window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; ret = get_cookie_sock(sk, skb, req, &rt->dst); out: return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int inet_sk_rebuild_header(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); __be32 daddr; int err; /* Route is OK, nothing to do. */ if (rt) return 0; /* Reroute. */ daddr = inet->inet_daddr; if (inet->opt && inet->opt->srr) daddr = inet->opt->faddr; rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (!IS_ERR(rt)) { err = 0; sk_setup_caps(sk, &rt->dst); } else { err = PTR_ERR(rt); /* Routing failed... */ sk->sk_route_caps = 0; /* * Other protocols have to map its equivalent state to TCP_SYN_SENT. * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme */ if (!sysctl_ip_dynaddr || sk->sk_state != TCP_SYN_SENT || (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || (err = inet_sk_reselect_saddr(sk)) != 0) sk->sk_err_soft = -err; } return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, struct ipcm_cookie *ipc, struct rtable **rtp) { struct inet_sock *inet = inet_sk(sk); struct ip_options *opt; struct rtable *rt; /* * setup for corking. */ opt = ipc->opt; if (opt) { if (cork->opt == NULL) { cork->opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation); if (unlikely(cork->opt == NULL)) return -ENOBUFS; } memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen); cork->flags |= IPCORK_OPT; cork->addr = ipc->addr; } rt = *rtp; if (unlikely(!rt)) return -EFAULT; /* * We steal reference to this route, caller should not release it */ *rtp = NULL; cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(rt->dst.path); cork->dst = &rt->dst; cork->length = 0; cork->tx_flags = ipc->tx_flags; cork->page = NULL; cork->off = 0; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet6_request_sock *treq; struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct tcp6_sock *newtcp6sk; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; struct ipv6_txoptions *opt; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); if (newsk == NULL) return NULL; newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newinet = inet_sk(newsk); newnp = inet6_sk(newsk); newtp = tcp_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, tcp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } treq = inet6_rsk(req); opt = np->opt; if (sk_acceptq_is_full(sk)) goto out_overflow; if (!dst) { dst = inet6_csk_route_req(sk, req); if (!dst) goto out; } newsk = tcp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, tcp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ newsk->sk_gso_type = SKB_GSO_TCPV6; __ip6_dst_store(newsk, dst, NULL, NULL); newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newtp = tcp_sk(newsk); newinet = inet_sk(newsk); newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr); ipv6_addr_copy(&newnp->saddr, &treq->loc_addr); ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr); newsk->sk_bound_dev_if = treq->iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->opt = NULL; newnp->ipv6_fl_list = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; /* Clone pktoptions received with SYN */ newnp->pktoptions = NULL; if (treq->pktopts != NULL) { newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); kfree_skb(treq->pktopts); treq->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); } newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* Clone native IPv6 options from listening socket (if any) Yes, keeping reference count would be much more clever, but we make one more one thing there: reattach optmem to newsk. */ if (opt) { newnp->opt = ipv6_dup_options(newsk, opt); if (opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (newnp->opt) inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + newnp->opt->opt_flen); tcp_mtup_init(newsk); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); tcp_initialize_rcv_mss(newsk); newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) { /* We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); if (newkey != NULL) tcp_v6_md5_do_add(newsk, &newnp->daddr, newkey, key->keylen); } #endif if (__inet_inherit_port(sk, newsk) < 0) { sock_put(newsk); goto out; } __inet6_hash(newsk, NULL); return newsk; out_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: if (opt && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); dst_release(dst); out: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct dccp_sock *dp = dccp_sk(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 fl4; struct rtable *rt; int err; dp->dccps_role = DCCP_ROLE_CLIENT; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; if (inet->opt != NULL && inet->opt->srr) { if (daddr == 0) return -EINVAL; nexthop = inet->opt->faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; rt = ip_route_connect(&fl4, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_DCCP, orig_sport, orig_dport, sk, true); if (IS_ERR(rt)) return PTR_ERR(rt); if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (inet->opt == NULL || !inet->opt->srr) daddr = rt->rt_dst; if (inet->inet_saddr == 0) inet->inet_saddr = rt->rt_src; inet->inet_rcv_saddr = inet->inet_saddr; inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet->opt != NULL) inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; /* * Socket identity is still unknown (sport may be zero). * However we set state to DCCP_REQUESTING and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ dccp_set_state(sk, DCCP_REQUESTING); err = inet_hash_connect(&dccp_death_row, sk); if (err != 0) goto failure; rt = ip_route_newports(&fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { rt = NULL; goto failure; } /* OK, now commit destination to socket. */ sk_setup_caps(sk, &rt->dst); dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, inet->inet_dport); inet->inet_id = dp->dccps_iss ^ jiffies; err = dccp_connect(sk); rt = NULL; if (err != 0) goto failure; out: return err; failure: /* * This unhashes the socket and releases the local port, if necessary. */ dccp_set_state(sk, DCCP_CLOSED); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; goto out; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, __be32 saddr, __be32 daddr, struct ip_options *opt) { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = skb_rtable(skb); struct iphdr *iph; /* Build the IP header. */ skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; iph->tos = inet->tos; if (ip_dont_fragment(sk, &rt->dst)) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; iph->protocol = sk->sk_protocol; ip_select_ident(iph, &rt->dst, sk); if (opt && opt->optlen) { iph->ihl += opt->optlen>>2; ip_options_build(skb, opt, daddr, rt, 0); } skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; /* Send it out. */ return ip_local_out(skb); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static struct ip_options *tcp_v4_save_options(struct sock *sk, struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options *dopt = NULL; if (opt && opt->optlen) { int opt_size = optlength(opt); dopt = kmalloc(opt_size, GFP_ATOMIC); if (dopt) { if (ip_options_echo(dopt, skb)) { kfree(dopt); dopt = NULL; } } } return dopt; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int ip_options_get_finish(struct net *net, struct ip_options **optp, struct ip_options *opt, int optlen) { while (optlen & 3) opt->__data[optlen++] = IPOPT_END; opt->optlen = optlen; if (optlen && ip_options_compile(net, opt, NULL)) { kfree(opt); return -EINVAL; } kfree(*optp); *optp = opt; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) { struct ip_options *sopt; unsigned char *sptr, *dptr; int soffset, doffset; int optlen; __be32 daddr; memset(dopt, 0, sizeof(struct ip_options)); sopt = &(IPCB(skb)->opt); if (sopt->optlen == 0) { dopt->optlen = 0; return 0; } sptr = skb_network_header(skb); dptr = dopt->__data; daddr = skb_rtable(skb)->rt_spec_dst; if (sopt->rr) { optlen = sptr[sopt->rr+1]; soffset = sptr[sopt->rr+2]; dopt->rr = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->rr, optlen); if (sopt->rr_needaddr && soffset <= optlen) { if (soffset + 3 > optlen) return -EINVAL; dptr[2] = soffset + 4; dopt->rr_needaddr = 1; } dptr += optlen; dopt->optlen += optlen; } if (sopt->ts) { optlen = sptr[sopt->ts+1]; soffset = sptr[sopt->ts+2]; dopt->ts = dopt->optlen + sizeof(struct iphdr); memcpy(dptr, sptr+sopt->ts, optlen); if (soffset <= optlen) { if (sopt->ts_needaddr) { if (soffset + 3 > optlen) return -EINVAL; dopt->ts_needaddr = 1; soffset += 4; } if (sopt->ts_needtime) { if (soffset + 3 > optlen) return -EINVAL; if ((dptr[3]&0xF) != IPOPT_TS_PRESPEC) { dopt->ts_needtime = 1; soffset += 4; } else { dopt->ts_needtime = 0; if (soffset + 7 <= optlen) { __be32 addr; memcpy(&addr, dptr+soffset-1, 4); if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) { dopt->ts_needtime = 1; soffset += 8; } } } } dptr[2] = soffset; } dptr += optlen; dopt->optlen += optlen; } if (sopt->srr) { unsigned char * start = sptr+sopt->srr; __be32 faddr; optlen = start[1]; soffset = start[2]; doffset = 0; if (soffset > optlen) soffset = optlen + 1; soffset -= 4; if (soffset > 3) { memcpy(&faddr, &start[soffset-1], 4); for (soffset-=4, doffset=4; soffset > 3; soffset-=4, doffset+=4) memcpy(&dptr[doffset-1], &start[soffset-1], 4); /* * RFC1812 requires to fix illegal source routes. */ if (memcmp(&ip_hdr(skb)->saddr, &start[soffset + 3], 4) == 0) doffset -= 4; } if (doffset > 3) { memcpy(&start[doffset-1], &daddr, 4); dopt->faddr = faddr; dptr[0] = start[0]; dptr[1] = doffset+3; dptr[2] = 4; dptr += doffset+3; dopt->srr = dopt->optlen + sizeof(struct iphdr); dopt->optlen += doffset+3; dopt->is_strictroute = sopt->is_strictroute; } } if (sopt->cipso) { optlen = sptr[sopt->cipso+1]; dopt->cipso = dopt->optlen+sizeof(struct iphdr); memcpy(dptr, sptr+sopt->cipso, optlen); dptr += optlen; dopt->optlen += optlen; } while (dopt->optlen & 3) { *dptr++ = IPOPT_END; dopt->optlen++; } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg, unsigned int len) { struct inet_sock *inet = inet_sk(sk); struct { struct ip_options opt; char data[40]; } replyopts; struct ipcm_cookie ipc; __be32 daddr; struct rtable *rt = skb_rtable(skb); if (ip_options_echo(&replyopts.opt, skb)) return; daddr = ipc.addr = rt->rt_src; ipc.opt = NULL; ipc.tx_flags = 0; if (replyopts.opt.optlen) { ipc.opt = &replyopts.opt; if (ipc.opt->srr) daddr = replyopts.opt.faddr; } { struct flowi4 fl4; flowi4_init_output(&fl4, arg->bound_dev_if, 0, RT_TOS(ip_hdr(skb)->tos), RT_SCOPE_UNIVERSE, sk->sk_protocol, ip_reply_arg_flowi_flags(arg), daddr, rt->rt_spec_dst, tcp_hdr(skb)->source, tcp_hdr(skb)->dest); security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(sock_net(sk), &fl4); if (IS_ERR(rt)) return; } /* And let IP do all the hard work. This chunk is not reenterable, hence spinlock. Note that it uses the fact, that this function is called with locally disabled BH and that sk cannot be already spinlocked. */ bh_lock_sock(sk); inet->tos = ip_hdr(skb)->tos; sk->sk_priority = skb->priority; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { if (arg->csumoffset >= 0) *((__sum16 *)skb_transport_header(skb) + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk); } bh_unlock_sock(sk); ip_rt_put(rt); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in, const struct iphdr *iph, __be32 saddr, u8 tos, int type, int code, struct icmp_bxm *param) { struct flowi4 fl4 = { .daddr = (param->replyopts.srr ? param->replyopts.faddr : iph->saddr), .saddr = saddr, .flowi4_tos = RT_TOS(tos), .flowi4_proto = IPPROTO_ICMP, .fl4_icmp_type = type, .fl4_icmp_code = code, }; struct rtable *rt, *rt2; int err; security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4)); rt = __ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return rt; /* No need to clone since we're just using its address. */ rt2 = rt; if (!fl4.saddr) fl4.saddr = rt->rt_src; rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flowi4_to_flowi(&fl4), NULL, 0); if (!IS_ERR(rt)) { if (rt != rt2) return rt; } else if (PTR_ERR(rt) == -EPERM) { rt = NULL; } else return rt; err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET); if (err) goto relookup_failed; if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4); if (IS_ERR(rt2)) err = PTR_ERR(rt2); } else { struct flowi4 fl4_2 = {}; unsigned long orefdst; fl4_2.daddr = fl4.saddr; rt2 = ip_route_output_key(net, &fl4_2); if (IS_ERR(rt2)) { err = PTR_ERR(rt2); goto relookup_failed; } /* Ugh! */ orefdst = skb_in->_skb_refdst; /* save old refdst */ err = ip_route_input(skb_in, fl4.daddr, fl4.saddr, RT_TOS(tos), rt2->dst.dev); dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); skb_in->_skb_refdst = orefdst; /* restore old refdst */ } if (err) goto relookup_failed; rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4), NULL, XFRM_LOOKUP_ICMP); if (!IS_ERR(rt2)) { dst_release(&rt->dst); rt = rt2; } else if (PTR_ERR(rt2) == -EPERM) { if (rt) dst_release(&rt->dst); return rt2; } else { err = PTR_ERR(rt2); goto relookup_failed; } return rt; relookup_failed: if (rt) return rt; return ERR_PTR(err); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts; ipc.tx_flags = 0; rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static struct sock *dccp_v6_request_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet6_request_sock *ireq6 = inet6_rsk(req); struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct inet_sock *newinet; struct dccp6_sock *newdp6; struct sock *newsk; struct ipv6_txoptions *opt; if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); if (newsk == NULL) return NULL; newdp6 = (struct dccp6_sock *)newsk; newinet = inet_sk(newsk); newinet->pinet6 = &newdp6->inet6; newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; newsk->sk_backlog_rcv = dccp_v4_do_rcv; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, dccp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } opt = np->opt; if (sk_acceptq_is_full(sk)) goto out_overflow; if (dst == NULL) { struct in6_addr *final_p, final; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); final_p = fl6_update_dst(&fl6, opt, &final); ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); if (IS_ERR(dst)) goto out; } newsk = dccp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, dccp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ __ip6_dst_store(newsk, dst, NULL, NULL); newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | NETIF_F_TSO); newdp6 = (struct dccp6_sock *)newsk; newinet = inet_sk(newsk); newinet->pinet6 = &newdp6->inet6; newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr); ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr); ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr); newsk->sk_bound_dev_if = ireq6->iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->opt = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; /* Clone pktoptions received with SYN */ newnp->pktoptions = NULL; if (ireq6->pktopts != NULL) { newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); kfree_skb(ireq6->pktopts); ireq6->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); } newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * Clone native IPv6 options from listening socket (if any) * * Yes, keeping reference count would be much more clever, but we make * one more one thing there: reattach optmem to newsk. */ if (opt != NULL) { newnp->opt = ipv6_dup_options(newsk, opt); if (opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (newnp->opt != NULL) inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + newnp->opt->opt_flen); dccp_sync_mss(newsk, dst_mtu(dst)); newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; if (__inet_inherit_port(sk, newsk) < 0) { sock_put(newsk); goto out; } __inet6_hash(newsk, NULL); return newsk; out_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); if (opt != NULL && opt != np->opt) sock_kfree_s(sk, opt, opt->tot_len); return NULL; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void inet_sock_destruct(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); __skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_error_queue); sk_mem_reclaim(sk); if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { pr_err("Attempt to release TCP socket in state %d %p\n", sk->sk_state, sk); return; } if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive inet socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); kfree(inet->opt); dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); sk_refcnt_debug_dec(sk); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'inet: add RCU protection to inet->opt We lack proper synchronization to manipulate inet->opt ip_options Problem is ip_make_skb() calls ip_setup_cork() and ip_setup_cork() possibly makes a copy of ipc->opt (struct ip_options), without any protection against another thread manipulating inet->opt. Another thread can change inet->opt pointer and free old one under us. Use RCU to protect inet->opt (changed to inet->inet_opt). Instead of handling atomic refcounts, just copy ip_options when necessary, to avoid cache line dirtying. We cant insert an rcu_head in struct ip_options since its included in skb->cb[], so this patch is large because I had to introduce a new ip_options_rcu structure. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: process (GeglOperation *operation, GeglBuffer *output, const GeglRectangle *result, gint level) { GeglChantO *o = GEGL_CHANT_PROPERTIES (operation); FILE *fp; pnm_struct img; GeglRectangle rect = {0,0,0,0}; gboolean ret = FALSE; fp = (!strcmp (o->path, "-") ? stdin : fopen (o->path,"rb")); if (!fp) return FALSE; if (!ppm_load_read_header (fp, &img)) goto out; rect.height = img.height; rect.width = img.width; /* Allocating Array Size */ img.data = (guchar*) g_malloc (img.numsamples * img.bpc); switch (img.bpc) { case 1: gegl_buffer_get (output, &rect, 1.0, babl_format ("R'G'B' u8"), img.data, GEGL_AUTO_ROWSTRIDE, GEGL_ABYSS_NONE); break; case 2: gegl_buffer_get (output, &rect, 1.0, babl_format ("R'G'B' u16"), img.data, GEGL_AUTO_ROWSTRIDE, GEGL_ABYSS_NONE); break; default: g_warning ("%s: Programmer stupidity error", G_STRLOC); } ppm_load_read_image (fp, &img); switch (img.bpc) { case 1: gegl_buffer_set (output, &rect, 0, babl_format ("R'G'B' u8"), img.data, GEGL_AUTO_ROWSTRIDE); break; case 2: gegl_buffer_set (output, &rect, 0, babl_format ("R'G'B' u16"), img.data, GEGL_AUTO_ROWSTRIDE); break; default: g_warning ("%s: Programmer stupidity error", G_STRLOC); } g_free (img.data); ret = TRUE; out: if (stdin != fp) fclose (fp); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'ppm-load: CVE-2012-4433: don't overflow memory allocation Carefully selected width/height values could cause the size of a later allocation to overflow, resulting in a buffer much too small to store the data which would then written beyond its end.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ppm_load_read_header(FILE *fp, pnm_struct *img) { /* PPM Headers Variable Declaration */ gchar *ptr; //gchar *retval; gchar header[MAX_CHARS_IN_ROW]; gint maxval; /* Check the PPM file Type P2 or P5 */ fgets (header,MAX_CHARS_IN_ROW,fp); if (header[0] != ASCII_P || (header[1] != PIXMAP_ASCII && header[1] != PIXMAP_RAW)) { g_warning ("Image is not a portable pixmap"); return FALSE; } img->type = header[1]; /* Check the Comments */ fgets (header,MAX_CHARS_IN_ROW,fp); while(header[0] == '#') { fgets (header,MAX_CHARS_IN_ROW,fp); } /* Get Width and Height */ img->width = strtol (header,&ptr,0); img->height = atoi (ptr); fgets (header,MAX_CHARS_IN_ROW,fp); maxval = strtol (header,&ptr,0); if ((maxval != 255) && (maxval != 65535)) { g_warning ("Image is not an 8-bit or 16-bit portable pixmap"); return FALSE; } switch (maxval) { case 255: img->bpc = sizeof (guchar); break; case 65535: img->bpc = sizeof (gushort); break; default: g_warning ("%s: Programmer stupidity error", G_STRLOC); } /* Later on, img->numsamples is multiplied with img->bpc to allocate * memory. Ensure it doesn't overflow. */ if (!img->width || !img->height || G_MAXSIZE / img->width / img->height / CHANNEL_COUNT < img->bpc) { g_warning ("Illegal width/height: %ld/%ld", img->width, img->height); return FALSE; } img->numsamples = img->width * img->height * CHANNEL_COUNT; return TRUE; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'ppm-load: CVE-2012-4433: add plausibility checks for header fields Refuse values that are non-decimal, negative or overflow the target type.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xsltAttrTemplateProcess(xsltTransformContextPtr ctxt, xmlNodePtr target, xmlAttrPtr attr) { const xmlChar *value; xmlAttrPtr ret; if ((ctxt == NULL) || (attr == NULL) || (target == NULL) || (target->type != XML_ELEMENT_NODE)) return(NULL); if (attr->type != XML_ATTRIBUTE_NODE) return(NULL); /* * Skip all XSLT attributes. */ #ifdef XSLT_REFACTORED if (attr->psvi == xsltXSLTAttrMarker) return(NULL); #else if ((attr->ns != NULL) && xmlStrEqual(attr->ns->href, XSLT_NAMESPACE)) return(NULL); #endif /* * Get the value. */ if (attr->children != NULL) { if ((attr->children->type != XML_TEXT_NODE) || (attr->children->next != NULL)) { xsltTransformError(ctxt, NULL, attr->parent, "Internal error: The children of an attribute node of a " "literal result element are not in the expected form.\n"); return(NULL); } value = attr->children->content; if (value == NULL) value = xmlDictLookup(ctxt->dict, BAD_CAST "", 0); } else value = xmlDictLookup(ctxt->dict, BAD_CAST "", 0); /* * Overwrite duplicates. */ ret = target->properties; while (ret != NULL) { if (((attr->ns != NULL) == (ret->ns != NULL)) && xmlStrEqual(ret->name, attr->name) && ((attr->ns == NULL) || xmlStrEqual(ret->ns->href, attr->ns->href))) { break; } ret = ret->next; } if (ret != NULL) { /* free the existing value */ xmlFreeNodeList(ret->children); ret->children = ret->last = NULL; /* * Adjust ns-prefix if needed. */ if ((ret->ns != NULL) && (! xmlStrEqual(ret->ns->prefix, attr->ns->prefix))) { ret->ns = xsltGetNamespace(ctxt, attr->parent, attr->ns, target); } } else { /* create a new attribute */ if (attr->ns != NULL) ret = xmlNewNsProp(target, xsltGetNamespace(ctxt, attr->parent, attr->ns, target), attr->name, NULL); else ret = xmlNewNsProp(target, NULL, attr->name, NULL); } /* * Set the value. */ if (ret != NULL) { xmlNodePtr text; text = xmlNewText(NULL); if (text != NULL) { ret->last = ret->children = text; text->parent = (xmlNodePtr) ret; text->doc = ret->doc; if (attr->psvi != NULL) { /* * Evaluate the Attribute Value Template. */ xmlChar *val; val = xsltEvalAVT(ctxt, attr->psvi, attr->parent); if (val == NULL) { /* * TODO: Damn, we need an easy mechanism to report * qualified names! */ if (attr->ns) { xsltTransformError(ctxt, NULL, attr->parent, "Internal error: Failed to evaluate the AVT " "of attribute '{%s}%s'.\n", attr->ns->href, attr->name); } else { xsltTransformError(ctxt, NULL, attr->parent, "Internal error: Failed to evaluate the AVT " "of attribute '%s'.\n", attr->name); } text->content = xmlStrdup(BAD_CAST ""); } else { text->content = val; } } else if ((ctxt->internalized) && (target != NULL) && (target->doc != NULL) && (target->doc->dict == ctxt->dict)) { text->content = (xmlChar *) value; } else { text->content = xmlStrdup(value); } } } else { if (attr->ns) { xsltTransformError(ctxt, NULL, attr->parent, "Internal error: Failed to create attribute '{%s}%s'.\n", attr->ns->href, attr->name); } else { xsltTransformError(ctxt, NULL, attr->parent, "Internal error: Failed to create attribute '%s'.\n", attr->name); } } return(ret); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399'], 'message': 'Fix a dictionary string usage Raised in chromium, but also affecting xsltproc Also updated AUTHORS to list Chris and other contributors'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int load_script(struct linux_binprm *bprm) { const char *i_arg, *i_name; char *cp; struct file *file; char interp[BINPRM_BUF_SIZE]; int retval; if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) return -ENOEXEC; /* * This section does the #! interpretation. * Sorta complicated, but hopefully it will work. -TYT */ allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; if ((cp = strchr(bprm->buf, '\n')) == NULL) cp = bprm->buf+BINPRM_BUF_SIZE-1; *cp = '\0'; while (cp > bprm->buf) { cp--; if ((*cp == ' ') || (*cp == '\t')) *cp = '\0'; else break; } for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); if (*cp == '\0') return -ENOEXEC; /* No interpreter name found */ i_name = cp; i_arg = NULL; for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++) /* nothing */ ; while ((*cp == ' ') || (*cp == '\t')) *cp++ = '\0'; if (*cp) i_arg = cp; strcpy (interp, i_name); /* * OK, we've parsed out the interpreter name and * (optional) argument. * Splice in (1) the interpreter's name for argv[0] * (2) (optional) argument to interpreter * (3) filename of shell script (replace argv[0]) * * This is done in reverse order, because of how the * user environment and arguments are stored. */ retval = remove_arg_zero(bprm); if (retval) return retval; retval = copy_strings_kernel(1, &bprm->interp, bprm); if (retval < 0) return retval; bprm->argc++; if (i_arg) { retval = copy_strings_kernel(1, &i_arg, bprm); if (retval < 0) return retval; bprm->argc++; } retval = copy_strings_kernel(1, &i_name, bprm); if (retval) return retval; bprm->argc++; bprm->interp = interp; /* * OK, now restart the process with the interpreter's dentry. */ file = open_exec(interp); if (IS_ERR(file)) return PTR_ERR(file); bprm->file = file; retval = prepare_binprm(bprm); if (retval < 0) return retval; return search_binary_handler(bprm); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'exec: do not leave bprm->interp on stack If a series of scripts are executed, each triggering module loading via unprintable bytes in the script header, kernel stack contents can leak into the command line. Normally execution of binfmt_script and binfmt_misc happens recursively. However, when modules are enabled, and unprintable bytes exist in the bprm->buf, execution will restart after attempting to load matching binfmt modules. Unfortunately, the logic in binfmt_script and binfmt_misc does not expect to get restarted. They leave bprm->interp pointing to their local stack. This means on restart bprm->interp is left pointing into unused stack memory which can then be copied into the userspace argv areas. After additional study, it seems that both recursion and restart remains the desirable way to handle exec with scripts, misc, and modules. As such, we need to protect the changes to interp. This changes the logic to require allocation for any changes to the bprm->interp. To avoid adding a new kmalloc to every exec, the default value is left as-is. Only when passing through binfmt_script or binfmt_misc does an allocation take place. For a proof of concept, see DoTest.sh from: http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/ Signed-off-by: Kees Cook <keescook@chromium.org> Cc: halfdog <me@halfdog.net> Cc: P J P <ppandit@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); if (bprm->cred) { mutex_unlock(&current->signal->cred_guard_mutex); abort_creds(bprm->cred); } kfree(bprm); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'exec: do not leave bprm->interp on stack If a series of scripts are executed, each triggering module loading via unprintable bytes in the script header, kernel stack contents can leak into the command line. Normally execution of binfmt_script and binfmt_misc happens recursively. However, when modules are enabled, and unprintable bytes exist in the bprm->buf, execution will restart after attempting to load matching binfmt modules. Unfortunately, the logic in binfmt_script and binfmt_misc does not expect to get restarted. They leave bprm->interp pointing to their local stack. This means on restart bprm->interp is left pointing into unused stack memory which can then be copied into the userspace argv areas. After additional study, it seems that both recursion and restart remains the desirable way to handle exec with scripts, misc, and modules. As such, we need to protect the changes to interp. This changes the logic to require allocation for any changes to the bprm->interp. To avoid adding a new kmalloc to every exec, the default value is left as-is. Only when passing through binfmt_script or binfmt_misc does an allocation take place. For a proof of concept, see DoTest.sh from: http://www.halfdog.net/Security/2012/LinuxKernelBinfmtScriptStackDataDisclosure/ Signed-off-by: Kees Cook <keescook@chromium.org> Cc: halfdog <me@halfdog.net> Cc: P J P <ppandit@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int search_binary_handler(struct linux_binprm *bprm) { unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; pid_t old_pid, old_vpid; retval = security_bprm_check(bprm); if (retval) return retval; retval = audit_bprm(bprm); if (retval) return retval; /* Need to fetch pid before load_binary changes it */ old_pid = current->pid; rcu_read_lock(); old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); rcu_read_unlock(); retval = -ENOENT; for (try=0; try<2; try++) { read_lock(&binfmt_lock); list_for_each_entry(fmt, &formats, lh) { int (*fn)(struct linux_binprm *) = fmt->load_binary; if (!fn) continue; if (!try_module_get(fmt->module)) continue; read_unlock(&binfmt_lock); retval = fn(bprm); /* * Restore the depth counter to its starting value * in this call, so we don't have to rely on every * load_binary function to restore it on return. */ bprm->recursion_depth = depth; if (retval >= 0) { if (depth == 0) { trace_sched_process_exec(current, old_pid, bprm); ptrace_event(PTRACE_EVENT_EXEC, old_vpid); } put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) fput(bprm->file); bprm->file = NULL; current->did_exec = 1; proc_exec_connector(current); return retval; } read_lock(&binfmt_lock); put_binfmt(fmt); if (retval != -ENOEXEC || bprm->mm == NULL) break; if (!bprm->file) { read_unlock(&binfmt_lock); return retval; } } read_unlock(&binfmt_lock); #ifdef CONFIG_MODULES if (retval != -ENOEXEC || bprm->mm == NULL) { break; } else { #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && printable(bprm->buf[2]) && printable(bprm->buf[3])) break; /* -ENOEXEC */ if (try) break; /* -ENOEXEC */ request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2])); } #else break; #endif } return retval; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'exec: use -ELOOP for max recursion depth To avoid an explosion of request_module calls on a chain of abusive scripts, fail maximum recursion with -ELOOP instead of -ENOEXEC. As soon as maximum recursion depth is hit, the error will fail all the way back up the chain, aborting immediately. This also has the side-effect of stopping the user's shell from attempting to reexecute the top-level file as a shell script. As seen in the dash source: if (cmd != path_bshell && errno == ENOEXEC) { *argv-- = cmd; *argv = cmd = path_bshell; goto repeat; } The above logic was designed for running scripts automatically that lacked the "#!" header, not to re-try failed recursion. On a legitimate -ENOEXEC, things continue to behave as the shell expects. Additionally, when tracking recursion, the binfmt handlers should not be involved. The recursion being tracked is the depth of calls through search_binary_handler(), so that function should be exclusively responsible for tracking the depth. Signed-off-by: Kees Cook <keescook@chromium.org> Cc: halfdog <me@halfdog.net> Cc: P J P <ppandit@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int load_script(struct linux_binprm *bprm) { const char *i_arg, *i_name; char *cp; struct file *file; char interp[BINPRM_BUF_SIZE]; int retval; if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!') || (bprm->recursion_depth > BINPRM_MAX_RECURSION)) return -ENOEXEC; /* * This section does the #! interpretation. * Sorta complicated, but hopefully it will work. -TYT */ bprm->recursion_depth++; allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; if ((cp = strchr(bprm->buf, '\n')) == NULL) cp = bprm->buf+BINPRM_BUF_SIZE-1; *cp = '\0'; while (cp > bprm->buf) { cp--; if ((*cp == ' ') || (*cp == '\t')) *cp = '\0'; else break; } for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); if (*cp == '\0') return -ENOEXEC; /* No interpreter name found */ i_name = cp; i_arg = NULL; for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++) /* nothing */ ; while ((*cp == ' ') || (*cp == '\t')) *cp++ = '\0'; if (*cp) i_arg = cp; strcpy (interp, i_name); /* * OK, we've parsed out the interpreter name and * (optional) argument. * Splice in (1) the interpreter's name for argv[0] * (2) (optional) argument to interpreter * (3) filename of shell script (replace argv[0]) * * This is done in reverse order, because of how the * user environment and arguments are stored. */ retval = remove_arg_zero(bprm); if (retval) return retval; retval = copy_strings_kernel(1, &bprm->interp, bprm); if (retval < 0) return retval; bprm->argc++; if (i_arg) { retval = copy_strings_kernel(1, &i_arg, bprm); if (retval < 0) return retval; bprm->argc++; } retval = copy_strings_kernel(1, &i_name, bprm); if (retval) return retval; bprm->argc++; bprm->interp = interp; /* * OK, now restart the process with the interpreter's dentry. */ file = open_exec(interp); if (IS_ERR(file)) return PTR_ERR(file); bprm->file = file; retval = prepare_binprm(bprm); if (retval < 0) return retval; return search_binary_handler(bprm); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'exec: use -ELOOP for max recursion depth To avoid an explosion of request_module calls on a chain of abusive scripts, fail maximum recursion with -ELOOP instead of -ENOEXEC. As soon as maximum recursion depth is hit, the error will fail all the way back up the chain, aborting immediately. This also has the side-effect of stopping the user's shell from attempting to reexecute the top-level file as a shell script. As seen in the dash source: if (cmd != path_bshell && errno == ENOEXEC) { *argv-- = cmd; *argv = cmd = path_bshell; goto repeat; } The above logic was designed for running scripts automatically that lacked the "#!" header, not to re-try failed recursion. On a legitimate -ENOEXEC, things continue to behave as the shell expects. Additionally, when tracking recursion, the binfmt handlers should not be involved. The recursion being tracked is the depth of calls through search_binary_handler(), so that function should be exclusively responsible for tracking the depth. Signed-off-by: Kees Cook <keescook@chromium.org> Cc: halfdog <me@halfdog.net> Cc: P J P <ppandit@redhat.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int ext4_split_unwritten_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path *path, int flags) { ext4_lblk_t eof_block; ext4_lblk_t ee_block; struct ext4_extent *ex; unsigned int ee_len; int split_flag = 0, depth; ext_debug("ext4_split_unwritten_extents: inode %lu, logical" "block %llu, max_blocks %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len); eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; if (eof_block < map->m_lblk + map->m_len) eof_block = map->m_lblk + map->m_len; /* * It is safe to convert extent to initialized via explicit * zeroout only if extent is fully insde i_size or new_size. */ depth = ext_depth(inode); ex = path[depth].p_ext; ee_block = le32_to_cpu(ex->ee_block); ee_len = ext4_ext_get_actual_len(ex); split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; split_flag |= EXT4_EXT_MARK_UNINIT2; flags |= EXT4_GET_BLOCKS_PRE_IO; return ext4_split_extent(handle, inode, path, map, split_flag, flags); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'ext4: race-condition protection for ext4_convert_unwritten_extents_endio We assumed that at the time we call ext4_convert_unwritten_extents_endio() extent in question is fully inside [map.m_lblk, map->m_len] because it was already split during submission. But this may not be true due to a race between writeback vs fallocate. If extent in question is larger than requested we will split it again. Special precautions should being done if zeroout required because [map.m_lblk, map->m_len] already contains valid data. Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@vger.kernel.org'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, struct ext4_map_blocks *map, struct ext4_ext_path *path, int flags, unsigned int allocated, ext4_fsblk_t newblock) { int ret = 0; int err = 0; ext4_io_end_t *io = ext4_inode_aio(inode); ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical " "block %llu, max_blocks %u, flags %x, allocated %u\n", inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, flags, allocated); ext4_ext_show_leaf(inode, path); trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated, newblock); /* get_block() before submit the IO, split the extent */ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { ret = ext4_split_unwritten_extents(handle, inode, map, path, flags); if (ret <= 0) goto out; /* * Flag the inode(non aio case) or end_io struct (aio case) * that this IO needs to conversion to written when IO is * completed */ if (io) ext4_set_io_unwritten_flag(inode, io); else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); if (ext4_should_dioread_nolock(inode)) map->m_flags |= EXT4_MAP_UNINIT; goto out; } /* IO end_io complete, convert the filled extent to written */ if ((flags & EXT4_GET_BLOCKS_CONVERT)) { ret = ext4_convert_unwritten_extents_endio(handle, inode, path); if (ret >= 0) { ext4_update_inode_fsync_trans(handle, inode, 1); err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); } else err = ret; goto out2; } /* buffered IO case */ /* * repeat fallocate creation request * we already have an unwritten extent */ if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) goto map_out; /* buffered READ or buffered write_begin() lookup */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * We have blocks reserved already. We * return allocated blocks so that delalloc * won't do block reservation for us. But * the buffer head will be unmapped so that * a read from the block returns 0s. */ map->m_flags |= EXT4_MAP_UNWRITTEN; goto out1; } /* buffered write, writepage time, convert*/ ret = ext4_ext_convert_to_initialized(handle, inode, map, path); if (ret >= 0) ext4_update_inode_fsync_trans(handle, inode, 1); out: if (ret <= 0) { err = ret; goto out2; } else allocated = ret; map->m_flags |= EXT4_MAP_NEW; /* * if we allocated more blocks than requested * we need to make sure we unmap the extra block * allocated. The actual needed block will get * unmapped later when we find the buffer_head marked * new. */ if (allocated > map->m_len) { unmap_underlying_metadata_blocks(inode->i_sb->s_bdev, newblock + map->m_len, allocated - map->m_len); allocated = map->m_len; } /* * If we have done fallocate with the offset that is already * delayed allocated, we would have block reservation * and quota reservation done in the delayed write path. * But fallocate would have already updated quota and block * count for this offset. So cancel these reservation */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { unsigned int reserved_clusters; reserved_clusters = get_reserved_cluster_alloc(inode, map->m_lblk, map->m_len); if (reserved_clusters) ext4_da_update_reserve_space(inode, reserved_clusters, 0); } map_out: map->m_flags |= EXT4_MAP_MAPPED; if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) { err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); if (err < 0) goto out2; } out1: if (allocated > map->m_len) allocated = map->m_len; ext4_ext_show_leaf(inode, path); map->m_pblk = newblock; map->m_len = allocated; out2: if (path) { ext4_ext_drop_refs(path); kfree(path); } return err ? err : allocated; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'ext4: race-condition protection for ext4_convert_unwritten_extents_endio We assumed that at the time we call ext4_convert_unwritten_extents_endio() extent in question is fully inside [map.m_lblk, map->m_len] because it was already split during submission. But this may not be true due to a race between writeback vs fallocate. If extent in question is larger than requested we will split it again. Special precautions should being done if zeroout required because [map.m_lblk, map->m_len] already contains valid data. Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@vger.kernel.org'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: irc_color_decode (const char *string, int keep_colors) { unsigned char *out, *ptr_string; int out_length, length, out_pos; char str_fg[3], str_bg[3], str_color[128], str_key[128]; const char *remapped_color; int fg, bg, bold, reverse, italic, underline, rc; out_length = (strlen (string) * 2) + 1; out = malloc (out_length); if (!out) return NULL; bold = 0; reverse = 0; italic = 0; underline = 0; ptr_string = (unsigned char *)string; out[0] = '\0'; while (ptr_string && ptr_string[0]) { switch (ptr_string[0]) { case IRC_COLOR_BOLD_CHAR: if (keep_colors) strcat ((char *)out, weechat_color((bold) ? "-bold" : "bold")); bold ^= 1; ptr_string++; break; case IRC_COLOR_RESET_CHAR: if (keep_colors) strcat ((char *)out, weechat_color("reset")); bold = 0; reverse = 0; italic = 0; underline = 0; ptr_string++; break; case IRC_COLOR_FIXED_CHAR: ptr_string++; break; case IRC_COLOR_REVERSE_CHAR: case IRC_COLOR_REVERSE2_CHAR: if (keep_colors) strcat ((char *)out, weechat_color((reverse) ? "-reverse" : "reverse")); reverse ^= 1; ptr_string++; break; case IRC_COLOR_ITALIC_CHAR: if (keep_colors) strcat ((char *)out, weechat_color((italic) ? "-italic" : "italic")); italic ^= 1; ptr_string++; break; case IRC_COLOR_UNDERLINE_CHAR: if (keep_colors) strcat ((char *)out, weechat_color((underline) ? "-underline" : "underline")); underline ^= 1; ptr_string++; break; case IRC_COLOR_COLOR_CHAR: ptr_string++; str_fg[0] = '\0'; str_bg[0] = '\0'; if (isdigit (ptr_string[0])) { str_fg[0] = ptr_string[0]; str_fg[1] = '\0'; ptr_string++; if (isdigit (ptr_string[0])) { str_fg[1] = ptr_string[0]; str_fg[2] = '\0'; ptr_string++; } } if ((ptr_string[0] == ',') && (isdigit (ptr_string[1]))) { ptr_string++; str_bg[0] = ptr_string[0]; str_bg[1] = '\0'; ptr_string++; if (isdigit (ptr_string[0])) { str_bg[1] = ptr_string[0]; str_bg[2] = '\0'; ptr_string++; } } if (keep_colors) { if (str_fg[0] || str_bg[0]) { fg = -1; bg = -1; if (str_fg[0]) { rc = sscanf (str_fg, "%d", &fg); if ((rc != EOF) && (rc >= 1)) { fg %= IRC_NUM_COLORS; } } if (str_bg[0]) { rc = sscanf (str_bg, "%d", &bg); if ((rc != EOF) && (rc >= 1)) { bg %= IRC_NUM_COLORS; } } /* search "fg,bg" in hashtable of remapped colors */ snprintf (str_key, sizeof (str_key), "%d,%d", fg, bg); remapped_color = weechat_hashtable_get ( irc_config_hashtable_color_mirc_remap, str_key); if (remapped_color) { snprintf (str_color, sizeof (str_color), "|%s", remapped_color); } else { snprintf (str_color, sizeof (str_color), "|%s%s%s", (fg >= 0) ? irc_color_to_weechat[fg] : "", (bg >= 0) ? "," : "", (bg >= 0) ? irc_color_to_weechat[bg] : ""); } strcat ((char *)out, weechat_color(str_color)); } else strcat ((char *)out, weechat_color("resetcolor")); } break; default: length = weechat_utf8_char_size ((char *)ptr_string); if (length == 0) length = 1; out_pos = strlen ((char *)out); memcpy (out + out_pos, ptr_string, length); out[out_pos + length] = '\0'; ptr_string += length; break; } } return (char *)out; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'irc: fix crash when decoding IRC colors in strings (bug #37704)'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int unix_getpw(UNUSED void *instance, REQUEST *request, VALUE_PAIR **vp_list) { const char *name; const char *encrypted_pass; #ifdef HAVE_GETSPNAM struct spwd *spwd = NULL; #endif #ifdef OSFC2 struct pr_passwd *pr_pw; #else struct passwd *pwd; #endif #ifdef HAVE_GETUSERSHELL char *shell; #endif VALUE_PAIR *vp; /* * We can only authenticate user requests which HAVE * a User-Name attribute. */ if (!request->username) { return RLM_MODULE_NOOP; } name = (char *)request->username->vp_strvalue; encrypted_pass = NULL; #ifdef OSFC2 if ((pr_pw = getprpwnam(name)) == NULL) return RLM_MODULE_NOTFOUND; encrypted_pass = pr_pw->ufld.fd_encrypt; /* * Check if account is locked. */ if (pr_pw->uflg.fg_lock!=1) { radlog(L_AUTH, "rlm_unix: [%s]: account locked", name); return RLM_MODULE_USERLOCK; } #else /* OSFC2 */ if ((pwd = getpwnam(name)) == NULL) { return RLM_MODULE_NOTFOUND; } encrypted_pass = pwd->pw_passwd; #endif /* OSFC2 */ #ifdef HAVE_GETSPNAM /* * See if there is a shadow password. * * Only query the _system_ shadow file if the encrypted * password from the passwd file is < 10 characters (i.e. * a valid password would never crypt() to it). This will * prevents users from using NULL password fields as things * stand right now. */ if ((encrypted_pass == NULL) || (strlen(encrypted_pass) < 10)) { if ((spwd = getspnam(name)) == NULL) { return RLM_MODULE_NOTFOUND; } encrypted_pass = spwd->sp_pwdp; } #endif /* HAVE_GETSPNAM */ /* * These require 'pwd != NULL', which isn't true on OSFC2 */ #ifndef OSFC2 #ifdef DENY_SHELL /* * Users with a particular shell are denied access */ if (strcmp(pwd->pw_shell, DENY_SHELL) == 0) { radlog_request(L_AUTH, 0, request, "rlm_unix: [%s]: invalid shell", name); return RLM_MODULE_REJECT; } #endif #ifdef HAVE_GETUSERSHELL /* * Check /etc/shells for a valid shell. If that file * contains /RADIUSD/ANY/SHELL then any shell will do. */ while ((shell = getusershell()) != NULL) { if (strcmp(shell, pwd->pw_shell) == 0 || strcmp(shell, "/RADIUSD/ANY/SHELL") == 0) { break; } } endusershell(); if (shell == NULL) { radlog_request(L_AUTH, 0, request, "[%s]: invalid shell [%s]", name, pwd->pw_shell); return RLM_MODULE_REJECT; } #endif #endif /* OSFC2 */ #if defined(HAVE_GETSPNAM) && !defined(M_UNIX) /* * Check if password has expired. */ if (spwd && spwd->sp_expire > 0 && (request->timestamp / 86400) > spwd->sp_expire) { radlog_request(L_AUTH, 0, request, "[%s]: password has expired", name); return RLM_MODULE_REJECT; } #endif #if defined(__FreeBSD__) || defined(bsdi) || defined(_PWF_EXPIRE) /* * Check if password has expired. */ if ((pwd->pw_expire > 0) && (request->timestamp > pwd->pw_expire)) { radlog_request(L_AUTH, 0, request, "[%s]: password has expired", name); return RLM_MODULE_REJECT; } #endif /* * We might have a passwordless account. * * FIXME: Maybe add Auth-Type := Accept? */ if (encrypted_pass[0] == 0) return RLM_MODULE_NOOP; vp = pairmake("Crypt-Password", encrypted_pass, T_OP_SET); if (!vp) return RLM_MODULE_FAIL; pairmove(vp_list, &vp); pairfree(&vp); /* might not be NULL; */ return RLM_MODULE_UPDATED; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-522', 'CWE-255'], 'message': 'heck for account and password expiration'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: load_xwd_f2_d24_b32 (const gchar *filename, FILE *ifp, L_XWDFILEHEADER *xwdhdr, L_XWDCOLOR *xwdcolmap) { register guchar *dest, lsbyte_first; gint width, height, linepad, i, j, c0, c1, c2, c3; gint tile_height, scan_lines; L_CARD32 pixelval; gint red, green, blue, ncols; gint maxred, maxgreen, maxblue; gulong redmask, greenmask, bluemask; guint redshift, greenshift, blueshift; guchar redmap[256], greenmap[256], bluemap[256]; guchar *data; PIXEL_MAP pixel_map; gint err = 0; gint32 layer_ID, image_ID; GimpPixelRgn pixel_rgn; GimpDrawable *drawable; #ifdef XWD_DEBUG printf ("load_xwd_f2_d24_b32 (%s)\n", filename); #endif width = xwdhdr->l_pixmap_width; height = xwdhdr->l_pixmap_height; image_ID = create_new_image (filename, width, height, GIMP_RGB, &layer_ID, &drawable, &pixel_rgn); tile_height = gimp_tile_height (); data = g_malloc (tile_height * width * 3); redmask = xwdhdr->l_red_mask; greenmask = xwdhdr->l_green_mask; bluemask = xwdhdr->l_blue_mask; if (redmask == 0) redmask = 0xff0000; if (greenmask == 0) greenmask = 0x00ff00; if (bluemask == 0) bluemask = 0x0000ff; /* How to shift RGB to be right aligned ? */ /* (We rely on the the mask bits are grouped and not mixed) */ redshift = greenshift = blueshift = 0; while (((1 << redshift) & redmask) == 0) redshift++; while (((1 << greenshift) & greenmask) == 0) greenshift++; while (((1 << blueshift) & bluemask) == 0) blueshift++; /* The bits_per_rgb may not be correct. Use redmask instead */ maxred = 0; while (redmask >> (redshift + maxred)) maxred++; maxred = (1 << maxred) - 1; maxgreen = 0; while (greenmask >> (greenshift + maxgreen)) maxgreen++; maxgreen = (1 << maxgreen) - 1; maxblue = 0; while (bluemask >> (blueshift + maxblue)) maxblue++; maxblue = (1 << maxblue) - 1; /* Set map-arrays for red, green, blue */ for (red = 0; red <= maxred; red++) redmap[red] = (red * 255) / maxred; for (green = 0; green <= maxgreen; green++) greenmap[green] = (green * 255) / maxgreen; for (blue = 0; blue <= maxblue; blue++) bluemap[blue] = (blue * 255) / maxblue; ncols = xwdhdr->l_colormap_entries; if (xwdhdr->l_ncolors < ncols) ncols = xwdhdr->l_ncolors; set_pixelmap (ncols, xwdcolmap, &pixel_map); /* What do we have to consume after a line has finished ? */ linepad = xwdhdr->l_bytes_per_line - (xwdhdr->l_pixmap_width*xwdhdr->l_bits_per_pixel)/8; if (linepad < 0) linepad = 0; lsbyte_first = (xwdhdr->l_byte_order == 0); dest = data; scan_lines = 0; if (xwdhdr->l_bits_per_pixel == 32) { for (i = 0; i < height; i++) { for (j = 0; j < width; j++) { c0 = getc (ifp); c1 = getc (ifp); c2 = getc (ifp); c3 = getc (ifp); if (c3 < 0) { err = 1; break; } if (lsbyte_first) pixelval = c0 | (c1 << 8) | (c2 << 16) | (c3 << 24); else pixelval = (c0 << 24) | (c1 << 16) | (c2 << 8) | c3; if (get_pixelmap (pixelval, &pixel_map, dest, dest+1, dest+2)) { dest += 3; } else { *(dest++) = redmap[(pixelval & redmask) >> redshift]; *(dest++) = greenmap[(pixelval & greenmask) >> greenshift]; *(dest++) = bluemap[(pixelval & bluemask) >> blueshift]; } } scan_lines++; if (err) break; for (j = 0; j < linepad; j++) getc (ifp); if ((i % 20) == 0) gimp_progress_update ((gdouble) (i + 1) / (gdouble) height); if ((scan_lines == tile_height) || ((i+1) == height)) { gimp_pixel_rgn_set_rect (&pixel_rgn, data, 0, i-scan_lines+1, width, scan_lines); scan_lines = 0; dest = data; } } } else /* 24 bits per pixel */ { for (i = 0; i < height; i++) { for (j = 0; j < width; j++) { c0 = getc (ifp); c1 = getc (ifp); c2 = getc (ifp); if (c2 < 0) { err = 1; break; } if (lsbyte_first) pixelval = c0 | (c1 << 8) | (c2 << 16); else pixelval = (c0 << 16) | (c1 << 8) | c2; if (get_pixelmap (pixelval, &pixel_map, dest, dest+1, dest+2)) { dest += 3; } else { *(dest++) = redmap[(pixelval & redmask) >> redshift]; *(dest++) = greenmap[(pixelval & greenmask) >> greenshift]; *(dest++) = bluemap[(pixelval & bluemask) >> blueshift]; } } scan_lines++; if (err) break; for (j = 0; j < linepad; j++) getc (ifp); if ((i % 20) == 0) gimp_progress_update ((gdouble) (i + 1) / (gdouble) height); if ((scan_lines == tile_height) || ((i+1) == height)) { gimp_pixel_rgn_set_rect (&pixel_rgn, data, 0, i-scan_lines+1, width, scan_lines); scan_lines = 0; dest = data; } } } g_free (data); if (err) g_message (_("EOF encountered on reading")); gimp_drawable_flush (drawable); return err ? -1 : image_ID; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-787'], 'message': 'Bug 687392 - Memory corruption vulnerability when reading XWD files Applied and enhanced patch from andres which makes file-xwd detect this kind of file corruption and abort loading with an error message.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xmlParseAttValueComplex(xmlParserCtxtPtr ctxt, int *attlen, int normalize) { xmlChar limit = 0; xmlChar *buf = NULL; xmlChar *rep = NULL; size_t len = 0; size_t buf_size = 0; int c, l, in_space = 0; xmlChar *current = NULL; xmlEntityPtr ent; if (NXT(0) == '"') { ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE; limit = '"'; NEXT; } else if (NXT(0) == '\'') { limit = '\''; ctxt->instate = XML_PARSER_ATTRIBUTE_VALUE; NEXT; } else { xmlFatalErr(ctxt, XML_ERR_ATTRIBUTE_NOT_STARTED, NULL); return(NULL); } /* * allocate a translation buffer. */ buf_size = XML_PARSER_BUFFER_SIZE; buf = (xmlChar *) xmlMallocAtomic(buf_size); if (buf == NULL) goto mem_error; /* * OK loop until we reach one of the ending char or a size limit. */ c = CUR_CHAR(l); while (((NXT(0) != limit) && /* checked */ (IS_CHAR(c)) && (c != '<')) && (ctxt->instate != XML_PARSER_EOF)) { /* * Impose a reasonable limit on attribute size, unless XML_PARSE_HUGE * special option is given */ if ((len > XML_MAX_TEXT_LENGTH) && ((ctxt->options & XML_PARSE_HUGE) == 0)) { xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED, "AttValue lenght too long\n"); goto mem_error; } if (c == 0) break; if (c == '&') { in_space = 0; if (NXT(1) == '#') { int val = xmlParseCharRef(ctxt); if (val == '&') { if (ctxt->replaceEntities) { if (len + 10 > buf_size) { growBuffer(buf, 10); } buf[len++] = '&'; } else { /* * The reparsing will be done in xmlStringGetNodeList() * called by the attribute() function in SAX.c */ if (len + 10 > buf_size) { growBuffer(buf, 10); } buf[len++] = '&'; buf[len++] = '#'; buf[len++] = '3'; buf[len++] = '8'; buf[len++] = ';'; } } else if (val != 0) { if (len + 10 > buf_size) { growBuffer(buf, 10); } len += xmlCopyChar(0, &buf[len], val); } } else { ent = xmlParseEntityRef(ctxt); ctxt->nbentities++; if (ent != NULL) ctxt->nbentities += ent->owner; if ((ent != NULL) && (ent->etype == XML_INTERNAL_PREDEFINED_ENTITY)) { if (len + 10 > buf_size) { growBuffer(buf, 10); } if ((ctxt->replaceEntities == 0) && (ent->content[0] == '&')) { buf[len++] = '&'; buf[len++] = '#'; buf[len++] = '3'; buf[len++] = '8'; buf[len++] = ';'; } else { buf[len++] = ent->content[0]; } } else if ((ent != NULL) && (ctxt->replaceEntities != 0)) { if (ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) { rep = xmlStringDecodeEntities(ctxt, ent->content, XML_SUBSTITUTE_REF, 0, 0, 0); if (rep != NULL) { current = rep; while (*current != 0) { /* non input consuming */ if ((*current == 0xD) || (*current == 0xA) || (*current == 0x9)) { buf[len++] = 0x20; current++; } else buf[len++] = *current++; if (len + 10 > buf_size) { growBuffer(buf, 10); } } xmlFree(rep); rep = NULL; } } else { if (len + 10 > buf_size) { growBuffer(buf, 10); } if (ent->content != NULL) buf[len++] = ent->content[0]; } } else if (ent != NULL) { int i = xmlStrlen(ent->name); const xmlChar *cur = ent->name; /* * This may look absurd but is needed to detect * entities problems */ if ((ent->etype != XML_INTERNAL_PREDEFINED_ENTITY) && (ent->content != NULL)) { rep = xmlStringDecodeEntities(ctxt, ent->content, XML_SUBSTITUTE_REF, 0, 0, 0); if (rep != NULL) { xmlFree(rep); rep = NULL; } } /* * Just output the reference */ buf[len++] = '&'; while (len + i + 10 > buf_size) { growBuffer(buf, i + 10); } for (;i > 0;i--) buf[len++] = *cur++; buf[len++] = ';'; } } } else { if ((c == 0x20) || (c == 0xD) || (c == 0xA) || (c == 0x9)) { if ((len != 0) || (!normalize)) { if ((!normalize) || (!in_space)) { COPY_BUF(l,buf,len,0x20); while (len + 10 > buf_size) { growBuffer(buf, 10); } } in_space = 1; } } else { in_space = 0; COPY_BUF(l,buf,len,c); if (len + 10 > buf_size) { growBuffer(buf, 10); } } NEXTL(l); } GROW; c = CUR_CHAR(l); } if (ctxt->instate == XML_PARSER_EOF) goto error; if ((in_space) && (normalize)) { while (buf[len - 1] == 0x20) len--; } buf[len] = 0; if (RAW == '<') { xmlFatalErr(ctxt, XML_ERR_LT_IN_ATTRIBUTE, NULL); } else if (RAW != limit) { if ((c != 0) && (!IS_CHAR(c))) { xmlFatalErrMsg(ctxt, XML_ERR_INVALID_CHAR, "invalid character in attribute value\n"); } else { xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED, "AttValue: ' expected\n"); } } else NEXT; /* * There we potentially risk an overflow, don't allow attribute value of * lenght more than INT_MAX it is a very reasonnable assumption ! */ if (len >= INT_MAX) { xmlFatalErrMsg(ctxt, XML_ERR_ATTRIBUTE_NOT_FINISHED, "AttValue lenght too long\n"); goto mem_error; } if (attlen != NULL) *attlen = (int) len; return(buf); mem_error: xmlErrMemory(ctxt, NULL); error: if (buf != NULL) xmlFree(buf); if (rep != NULL) xmlFree(rep); return(NULL); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119'], 'message': 'Fix potential out of bound access'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void dd_sanitize_mode_and_owner(struct dump_dir *dd) { /* Don't sanitize if we aren't run under root: * we assume that during file creation (by whatever means, * even by "hostname >file" in abrt_event.conf) * normal umask-based mode setting takes care of correct mode, * and uid:gid is, of course, set to user's uid and gid. * * For root operating on /var/spool/abrt/USERS_PROBLEM, this isn't true: * "hostname >file", for example, would create file OWNED BY ROOT! * This routine resets mode and uid:gid for all such files. */ if (dd->dd_uid == (uid_t)-1) return; if (!dd->locked) error_msg_and_die("dump_dir is not opened"); /* bug */ DIR *d = opendir(dd->dd_dirname); if (!d) return; struct dirent *dent; while ((dent = readdir(d)) != NULL) { if (dent->d_name[0] == '.') /* ".lock", ".", ".."? skip */ continue; char *full_path = concat_path_file(dd->dd_dirname, dent->d_name); struct stat statbuf; if (lstat(full_path, &statbuf) == 0 && S_ISREG(statbuf.st_mode)) { if ((statbuf.st_mode & 0777) != dd->mode) chmod(full_path, dd->mode); if (statbuf.st_uid != dd->dd_uid || statbuf.st_gid != dd->dd_gid) { if (chown(full_path, dd->dd_uid, dd->dd_gid) != 0) { perror_msg("Can't change '%s' ownership to %lu:%lu", full_path, (long)dd->dd_uid, (long)dd->dd_gid); } } } free(full_path); } closedir(d); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'never follow symlinks rhbz#887866 - use lchown instead chown and O_NOFOLLOW where we use open'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static bool not_valid_time_file(const char *filename) { /* Open input file, and parse it. */ int fd = open(filename, O_RDONLY); if (fd < 0) { VERB2 perror_msg("Can't open '%s'", filename); return true; } /* ~ maximal number of digits for positive time stamp string*/ char time_buf[sizeof(time_t) * 3 + 1]; ssize_t rdsz = read(fd, time_buf, sizeof(time_buf)); /* Just reading, so no need to check the returned value. */ close(fd); if (rdsz == -1) { VERB2 perror_msg("Can't read from '%s'", filename); return true; } /* approximate maximal number of digits in timestamp is sizeof(time_t)*3 */ /* buffer has this size + 1 byte for trailing '\0' */ /* if whole size of buffer was read then file is bigger */ /* than string representing maximal time stamp */ if (rdsz == sizeof(time_buf)) { VERB2 log("File '%s' is too long to be valid unix " "time stamp (max size %zdB)", filename, sizeof(time_buf)); return true; } /* Our tools don't put trailing newline into time file, * but we allow such format too: */ if (rdsz > 0 && time_buf[rdsz - 1] == '\n') rdsz--; time_buf[rdsz] = '\0'; /* the value should fit to timestamp number range because of file size */ /* condition above, hence check if the value string consists only from digits */ if (!isdigit_str(time_buf)) { VERB2 log("File '%s' doesn't contain valid unix " "time stamp ('%s')", filename, time_buf); return true; } return false; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'never follow symlinks rhbz#887866 - use lchown instead chown and O_NOFOLLOW where we use open'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static char *load_text_file(const char *path, unsigned flags) { FILE *fp = fopen(path, "r"); if (!fp) { if (!(flags & DD_FAIL_QUIETLY_ENOENT)) perror_msg("Can't open file '%s'", path); return (flags & DD_LOAD_TEXT_RETURN_NULL_ON_FAILURE ? NULL : xstrdup("")); } struct strbuf *buf_content = strbuf_new(); int oneline = 0; int ch; while ((ch = fgetc(fp)) != EOF) { //TODO? \r -> \n? //TODO? strip trailing spaces/tabs? if (ch == '\n') oneline = (oneline << 1) | 1; if (ch == '\0') ch = ' '; if (isspace(ch) || ch >= ' ') /* used !iscntrl, but it failed on unicode */ strbuf_append_char(buf_content, ch); } fclose(fp); char last = oneline != 0 ? buf_content->buf[buf_content->len - 1] : 0; if (last == '\n') { /* If file contains exactly one '\n' and it is at the end, remove it. * This enables users to use simple "echo blah >file" in order to create * short string items in dump dirs. */ if (oneline == 1) buf_content->buf[--buf_content->len] = '\0'; } else /* last != '\n' */ { /* Last line is unterminated, fix it */ /* Cases: */ /* oneline=0: "qwe" - DONT fix this! */ /* oneline=1: "qwe\nrty" - two lines in fact */ /* oneline>1: "qwe\nrty\uio" */ if (oneline >= 1) strbuf_append_char(buf_content, '\n'); } return strbuf_free_nobuf(buf_content); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-264'], 'message': 'never follow symlinks rhbz#887866 - use lchown instead chown and O_NOFOLLOW where we use open'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size) { E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque; struct e1000_rx_desc desc; dma_addr_t base; unsigned int n, rdt; uint32_t rdh_start; uint16_t vlan_special = 0; uint8_t vlan_status = 0, vlan_offset = 0; uint8_t min_buf[MIN_BUF_SIZE]; size_t desc_offset; size_t desc_size; size_t total_size; if (!(s->mac_reg[RCTL] & E1000_RCTL_EN)) return -1; /* Pad to minimum Ethernet frame length */ if (size < sizeof(min_buf)) { memcpy(min_buf, buf, size); memset(&min_buf[size], 0, sizeof(min_buf) - size); buf = min_buf; size = sizeof(min_buf); } /* Discard oversized packets if !LPE and !SBP. */ if (size > MAXIMUM_ETHERNET_VLAN_SIZE && !(s->mac_reg[RCTL] & E1000_RCTL_LPE) && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) { return size; } if (!receive_filter(s, buf, size)) return size; if (vlan_enabled(s) && is_vlan_packet(s, buf)) { vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14))); memmove((uint8_t *)buf + 4, buf, 12); vlan_status = E1000_RXD_STAT_VP; vlan_offset = 4; size -= 4; } rdh_start = s->mac_reg[RDH]; desc_offset = 0; total_size = size + fcs_len(s); if (!e1000_has_rxbufs(s, total_size)) { set_ics(s, 0, E1000_ICS_RXO); return -1; } do { desc_size = total_size - desc_offset; if (desc_size > s->rxbuf_size) { desc_size = s->rxbuf_size; } base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH]; pci_dma_read(&s->dev, base, &desc, sizeof(desc)); desc.special = vlan_special; desc.status |= (vlan_status | E1000_RXD_STAT_DD); if (desc.buffer_addr) { if (desc_offset < size) { size_t copy_size = size - desc_offset; if (copy_size > s->rxbuf_size) { copy_size = s->rxbuf_size; } pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr), buf + desc_offset + vlan_offset, copy_size); } desc_offset += desc_size; desc.length = cpu_to_le16(desc_size); if (desc_offset >= total_size) { desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM; } else { /* Guest zeroing out status is not a hardware requirement. Clear EOP in case guest didn't do it. */ desc.status &= ~E1000_RXD_STAT_EOP; } } else { // as per intel docs; skip descriptors with null buf addr DBGOUT(RX, "Null RX descriptor!!\n"); } pci_dma_write(&s->dev, base, &desc, sizeof(desc)); if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN]) s->mac_reg[RDH] = 0; /* see comment in start_xmit; same here */ if (s->mac_reg[RDH] == rdh_start) { DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n", rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]); set_ics(s, 0, E1000_ICS_RXO); return -1; } } while (desc_offset < total_size); s->mac_reg[GPRC]++; s->mac_reg[TPR]++; /* TOR - Total Octets Received: * This register includes bytes received in a packet from the <Destination * Address> field through the <CRC> field, inclusively. */ n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4; if (n < s->mac_reg[TORL]) s->mac_reg[TORH]++; s->mac_reg[TORL] = n; n = E1000_ICS_RXT0; if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH]) rdt += s->mac_reg[RDLEN] / sizeof(desc); if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >> s->rxbuf_min_shift) n |= E1000_ICS_RXDMT0; set_ics(s, 0, n); return size; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-120'], 'message': 'e1000: Discard oversized packets based on SBP|LPE Discard packets longer than 16384 when !SBP to match the hardware behavior. Signed-off-by: Michael Contreras <michael@inetric.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: transit_state_consume_1char (struct dfa *d, int s, unsigned char const **pp, int *match_lens, int *mbclen, position_set *pps) { int i, j; int s1, s2; int* work_mbls; status_transit_state rs = TRANSIT_STATE_DONE; /* Calculate the length of the (single/multi byte) character to which p points. */ *mbclen = (mblen_buf[*pp - buf_begin] == 0)? 1 : mblen_buf[*pp - buf_begin]; /* Calculate the state which can be reached from the state `s' by consuming `*mbclen' single bytes from the buffer. */ s1 = s; for (i = 0; i < *mbclen; i++) { s2 = s1; rs = transit_state_singlebyte(d, s2, (*pp)++, &s1); } /* Copy the positions contained by `s1' to the set `pps'. */ copy(&(d->states[s1].elems), pps); /* Check (inputed)match_lens, and initialize if it is NULL. */ if (match_lens == NULL && d->states[s].mbps.nelem != 0) work_mbls = check_matching_with_multibyte_ops(d, s, *pp - buf_begin); else work_mbls = match_lens; /* Add all of the positions which can be reached from `s' by consuming a single character. */ for (i = 0; i < d->states[s].mbps.nelem ; i++) { if (work_mbls[i] == *mbclen) for (j = 0; j < d->follows[d->states[s].mbps.elems[i].index].nelem; j++) insert(d->follows[d->states[s].mbps.elems[i].index].elems[j], pps); } if (match_lens == NULL && work_mbls != NULL) free(work_mbls); return rs; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'grep: fix some core dumps with long lines etc. These problems mostly occur because the code attempts to stuff sizes into int or into unsigned int; this doesn't work on most 64-bit hosts and the errors can lead to core dumps. * NEWS: Document this. * src/dfa.c (token): Typedef to ptrdiff_t, since the enum's range could be as small as -128 .. 127 on practical hosts. (position.index): Now size_t, not unsigned int. (leaf_set.elems): Now size_t *, not unsigned int *. (dfa_state.hash, struct mb_char_classes.nchars, .nch_classes) (.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex) (.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets): (.mbcsets_alloc): Now size_t, not int. (dfa_state.first_end): Now token, not int. (state_num): New type. (struct mb_char_classes.cset): Now ptrdiff_t, not int. (struct dfa.utf8_anychar_classes): Now token[5], not int[5]. (struct dfa.sindex, .salloc, .tralloc): Now state_num, not int. (struct dfa.trans, .realtrans, .fails): Now state_num **, not int **. (struct dfa.newlines): Now state_num *, not int *. (prtok): Don't assume 'token' is no wider than int. (lexleft, parens, depth): Now size_t, not int. (charclass_index, nsubtoks) (parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete) (state_index, epsclosure, state_separate_contexts) (dfaanalyze, dfastate, build_state, realloc_trans_if_necessary) (transit_state_singlebyte, match_anychar, match_mb_charset) (check_matching_with_multibyte_ops, transit_state_consume_1char) (transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree) (freelist, enlist, addlists, inboth, dfamust): Don't assume indexes fit in 'int'. (lex): Avoid overflow in string-to-{hi,lo} conversions. (dfaanalyze): Redo indexing so that it works with size_t values, which cannot go negative. * src/dfa.h (dfaexec): Count argument is now size_t *, not int *. (dfastate): State numbers are now ptrdiff_t, not int. * src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM. (kwset_exact_matches): Now size_t, not int. (EGexecute): Don't assume indexes fit in 'int'. Check for overflow before converting a ptrdiff_t to a regoff_t, as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX). Check for memory exhaustion in re_search rather than treating it merely as failure to match; use xalloc_die () to report any error. * src/kwset.c (struct trie.accepting): Now size_t, not unsigned int. (struct kwset.words): Now ptrdiff_t, not int. * src/kwset.h (struct kwsmatch.index): Now size_t, not int.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: dfafree (struct dfa *d) { int i; struct dfamust *dm, *ndm; free(d->charclasses); free(d->tokens); if (d->mb_cur_max > 1) free_mbdata(d); for (i = 0; i < d->sindex; ++i) { free(d->states[i].elems.elems); if (MBS_SUPPORT) free(d->states[i].mbps.elems); } free(d->states); for (i = 0; i < d->tindex; ++i) free(d->follows[i].elems); free(d->follows); for (i = 0; i < d->tralloc; ++i) { free(d->trans[i]); free(d->fails[i]); } free(d->realtrans); free(d->fails); free(d->newlines); free(d->success); for (dm = d->musts; dm; dm = ndm) { ndm = dm->next; free(dm->must); free(dm); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'grep: fix some core dumps with long lines etc. These problems mostly occur because the code attempts to stuff sizes into int or into unsigned int; this doesn't work on most 64-bit hosts and the errors can lead to core dumps. * NEWS: Document this. * src/dfa.c (token): Typedef to ptrdiff_t, since the enum's range could be as small as -128 .. 127 on practical hosts. (position.index): Now size_t, not unsigned int. (leaf_set.elems): Now size_t *, not unsigned int *. (dfa_state.hash, struct mb_char_classes.nchars, .nch_classes) (.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex) (.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets): (.mbcsets_alloc): Now size_t, not int. (dfa_state.first_end): Now token, not int. (state_num): New type. (struct mb_char_classes.cset): Now ptrdiff_t, not int. (struct dfa.utf8_anychar_classes): Now token[5], not int[5]. (struct dfa.sindex, .salloc, .tralloc): Now state_num, not int. (struct dfa.trans, .realtrans, .fails): Now state_num **, not int **. (struct dfa.newlines): Now state_num *, not int *. (prtok): Don't assume 'token' is no wider than int. (lexleft, parens, depth): Now size_t, not int. (charclass_index, nsubtoks) (parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete) (state_index, epsclosure, state_separate_contexts) (dfaanalyze, dfastate, build_state, realloc_trans_if_necessary) (transit_state_singlebyte, match_anychar, match_mb_charset) (check_matching_with_multibyte_ops, transit_state_consume_1char) (transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree) (freelist, enlist, addlists, inboth, dfamust): Don't assume indexes fit in 'int'. (lex): Avoid overflow in string-to-{hi,lo} conversions. (dfaanalyze): Redo indexing so that it works with size_t values, which cannot go negative. * src/dfa.h (dfaexec): Count argument is now size_t *, not int *. (dfastate): State numbers are now ptrdiff_t, not int. * src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM. (kwset_exact_matches): Now size_t, not int. (EGexecute): Don't assume indexes fit in 'int'. Check for overflow before converting a ptrdiff_t to a regoff_t, as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX). Check for memory exhaustion in re_search rather than treating it merely as failure to match; use xalloc_die () to report any error. * src/kwset.c (struct trie.accepting): Now size_t, not unsigned int. (struct kwset.words): Now ptrdiff_t, not int. * src/kwset.h (struct kwsmatch.index): Now size_t, not int.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: prtok (token t) { char const *s; if (t < 0) fprintf(stderr, "END"); else if (t < NOTCHAR) fprintf(stderr, "%c", t); else { switch (t) { case EMPTY: s = "EMPTY"; break; case BACKREF: s = "BACKREF"; break; case BEGLINE: s = "BEGLINE"; break; case ENDLINE: s = "ENDLINE"; break; case BEGWORD: s = "BEGWORD"; break; case ENDWORD: s = "ENDWORD"; break; case LIMWORD: s = "LIMWORD"; break; case NOTLIMWORD: s = "NOTLIMWORD"; break; case QMARK: s = "QMARK"; break; case STAR: s = "STAR"; break; case PLUS: s = "PLUS"; break; case CAT: s = "CAT"; break; case OR: s = "OR"; break; case LPAREN: s = "LPAREN"; break; case RPAREN: s = "RPAREN"; break; case ANYCHAR: s = "ANYCHAR"; break; case MBCSET: s = "MBCSET"; break; default: s = "CSET"; break; } fprintf(stderr, "%s", s); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'grep: fix some core dumps with long lines etc. These problems mostly occur because the code attempts to stuff sizes into int or into unsigned int; this doesn't work on most 64-bit hosts and the errors can lead to core dumps. * NEWS: Document this. * src/dfa.c (token): Typedef to ptrdiff_t, since the enum's range could be as small as -128 .. 127 on practical hosts. (position.index): Now size_t, not unsigned int. (leaf_set.elems): Now size_t *, not unsigned int *. (dfa_state.hash, struct mb_char_classes.nchars, .nch_classes) (.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex) (.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets): (.mbcsets_alloc): Now size_t, not int. (dfa_state.first_end): Now token, not int. (state_num): New type. (struct mb_char_classes.cset): Now ptrdiff_t, not int. (struct dfa.utf8_anychar_classes): Now token[5], not int[5]. (struct dfa.sindex, .salloc, .tralloc): Now state_num, not int. (struct dfa.trans, .realtrans, .fails): Now state_num **, not int **. (struct dfa.newlines): Now state_num *, not int *. (prtok): Don't assume 'token' is no wider than int. (lexleft, parens, depth): Now size_t, not int. (charclass_index, nsubtoks) (parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete) (state_index, epsclosure, state_separate_contexts) (dfaanalyze, dfastate, build_state, realloc_trans_if_necessary) (transit_state_singlebyte, match_anychar, match_mb_charset) (check_matching_with_multibyte_ops, transit_state_consume_1char) (transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree) (freelist, enlist, addlists, inboth, dfamust): Don't assume indexes fit in 'int'. (lex): Avoid overflow in string-to-{hi,lo} conversions. (dfaanalyze): Redo indexing so that it works with size_t values, which cannot go negative. * src/dfa.h (dfaexec): Count argument is now size_t *, not int *. (dfastate): State numbers are now ptrdiff_t, not int. * src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM. (kwset_exact_matches): Now size_t, not int. (EGexecute): Don't assume indexes fit in 'int'. Check for overflow before converting a ptrdiff_t to a regoff_t, as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX). Check for memory exhaustion in re_search rather than treating it merely as failure to match; use xalloc_die () to report any error. * src/kwset.c (struct trie.accepting): Now size_t, not unsigned int. (struct kwset.words): Now ptrdiff_t, not int. * src/kwset.h (struct kwsmatch.index): Now size_t, not int.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: freelist (char **cpp) { int i; if (cpp == NULL) return; for (i = 0; cpp[i] != NULL; ++i) { free(cpp[i]); cpp[i] = NULL; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-189'], 'message': 'grep: fix some core dumps with long lines etc. These problems mostly occur because the code attempts to stuff sizes into int or into unsigned int; this doesn't work on most 64-bit hosts and the errors can lead to core dumps. * NEWS: Document this. * src/dfa.c (token): Typedef to ptrdiff_t, since the enum's range could be as small as -128 .. 127 on practical hosts. (position.index): Now size_t, not unsigned int. (leaf_set.elems): Now size_t *, not unsigned int *. (dfa_state.hash, struct mb_char_classes.nchars, .nch_classes) (.nranges, .nequivs, .ncoll_elems, struct dfa.cindex, .calloc, .tindex) (.talloc, .depth, .nleaves, .nregexps, .nmultibyte_prop, .nmbcsets): (.mbcsets_alloc): Now size_t, not int. (dfa_state.first_end): Now token, not int. (state_num): New type. (struct mb_char_classes.cset): Now ptrdiff_t, not int. (struct dfa.utf8_anychar_classes): Now token[5], not int[5]. (struct dfa.sindex, .salloc, .tralloc): Now state_num, not int. (struct dfa.trans, .realtrans, .fails): Now state_num **, not int **. (struct dfa.newlines): Now state_num *, not int *. (prtok): Don't assume 'token' is no wider than int. (lexleft, parens, depth): Now size_t, not int. (charclass_index, nsubtoks) (parse_bracket_exp, addtok, copytoks, closure, insert, merge, delete) (state_index, epsclosure, state_separate_contexts) (dfaanalyze, dfastate, build_state, realloc_trans_if_necessary) (transit_state_singlebyte, match_anychar, match_mb_charset) (check_matching_with_multibyte_ops, transit_state_consume_1char) (transit_state, dfaexec, free_mbdata, dfaoptimize, dfafree) (freelist, enlist, addlists, inboth, dfamust): Don't assume indexes fit in 'int'. (lex): Avoid overflow in string-to-{hi,lo} conversions. (dfaanalyze): Redo indexing so that it works with size_t values, which cannot go negative. * src/dfa.h (dfaexec): Count argument is now size_t *, not int *. (dfastate): State numbers are now ptrdiff_t, not int. * src/dfasearch.c: Include "intprops.h", for TYPE_MAXIMUM. (kwset_exact_matches): Now size_t, not int. (EGexecute): Don't assume indexes fit in 'int'. Check for overflow before converting a ptrdiff_t to a regoff_t, as regoff_t is narrower than ptrdiff_t in 64-bit glibc (contra POSIX). Check for memory exhaustion in re_search rather than treating it merely as failure to match; use xalloc_die () to report any error. * src/kwset.c (struct trie.accepting): Now size_t, not unsigned int. (struct kwset.words): Now ptrdiff_t, not int. * src/kwset.h (struct kwsmatch.index): Now size_t, not int.'</s>