target
int64
0
1
func
stringlengths
0
484k
idx
int64
1
378k
0
bool have_window_funcs() const { return (window_funcs.elements !=0); }
377,130
0
bool uses_stored_routines() const { return sroutines_list.elements != 0; }
377,132
0
bool LEX::stmt_create_stored_function_start(const DDL_options_st &options, enum_sp_aggregate_type agg_type, const sp_name *spname) { if (stmt_create_function_start(options) || unlikely(!make_sp_head_no_recursive(thd, spname, &sp_handler_function, agg_type))) return true; return false; }
377,134
0
void set(const st_sp_chistics &other) { *this= other; }
377,136
0
void SELECT_LEX::mark_const_derived(bool empty) { TABLE_LIST *derived= master_unit()->derived; /* join == NULL in DELETE ... RETURNING */ if (!(join && join->thd->lex->describe) && derived) { if (!empty) increase_derived_records(1); if (!master_unit()->is_unit_op() && !derived->is_merged_derived() && !(join && join->with_two_phase_optimization)) derived->fill_me= TRUE; } }
377,137
0
bool LEX::sp_block_with_exceptions_finalize_declarations(THD *thd) { /* [ DECLARE declarations ] BEGIN executable_section [ EXCEPTION exceptions ] END We are now at the "BEGIN" keyword. We have collected all declarations, including DECLARE HANDLER directives. But there will be possibly more handlers in the EXCEPTION section. Generate a forward jump from the end of the DECLARE section to the beginning of the EXCEPTION section, over the executable section. */ return sphead->add_instr_jump(thd, spcont); }
377,138
0
LEX::sp_add_for_loop_cursor_variable(THD *thd, const LEX_CSTRING *name, const sp_pcursor *pcursor, uint coffset, sp_assignment_lex *param_lex, Item_args *parameters) { sp_variable *spvar= spcont->add_variable(thd, name); if (!spvar) return NULL; spcont->declare_var_boundary(1); sphead->fill_spvar_definition(thd, &spvar->field_def, &spvar->name); if (unlikely(!(spvar->default_value= new (thd->mem_root) Item_null(thd)))) return NULL; spvar->field_def.set_cursor_rowtype_ref(coffset); if (unlikely(sphead->add_for_loop_open_cursor(thd, spcont, spvar, pcursor, coffset, param_lex, parameters))) return NULL; spcont->declare_var_boundary(0); return spvar; }
377,139
0
inline bool is_stmt_row_injection() const { return binlog_stmt_flags & (1U << (BINLOG_STMT_UNSAFE_COUNT + BINLOG_STMT_TYPE_ROW_INJECTION)); }
377,140
0
static void wsrep_prepare_for_autocommit_retry(THD* thd, char* rawbuf, uint length, Parser_state* parser_state) { thd->clear_error(); close_thread_tables(thd); thd->wsrep_retry_counter++; // grow wsrep_copy_query(thd); thd->set_time(); parser_state->reset(rawbuf, length); /* PSI end */ MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da()); thd->m_statement_psi= NULL; thd->m_digest= NULL; /* DTRACE end */ if (MYSQL_QUERY_DONE_ENABLED()) { MYSQL_QUERY_DONE(thd->is_error()); } /* SHOW PROFILE end */ #if defined(ENABLED_PROFILING) thd->profiling.finish_current_query(); #endif /* SHOW PROFILE begin */ #if defined(ENABLED_PROFILING) thd->profiling.start_new_query("continuing"); thd->profiling.set_query_source(rawbuf, length); #endif /* DTRACE begin */ MYSQL_QUERY_START(rawbuf, thd->thread_id, thd->get_db(), &thd->security_ctx->priv_user[0], (char *) thd->security_ctx->host_or_ip); /* Performance Schema Interface instrumentation, begin */ thd->m_statement_psi= MYSQL_REFINE_STATEMENT(thd->m_statement_psi, com_statement_info[thd->get_command()].m_key); MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, thd->query(), thd->query_length()); DBUG_ASSERT(thd->wsrep_trx().active() == false); thd->wsrep_cs().reset_error(); thd->set_query_id(next_query_id()); }
377,141
0
bool check_host_name(LEX_CSTRING *str) { const char *name= str->str; const char *end= str->str + str->length; if (check_string_byte_length(str, ER_HOSTNAME, HOSTNAME_LENGTH)) return TRUE; while (name != end) { if (*name == '@') { my_printf_error(ER_UNKNOWN_ERROR, "Malformed hostname (illegal symbol: '%c')", MYF(0), *name); return TRUE; } name++; } return FALSE; }
377,143
0
void yyUnget() { m_ptr--; if (m_echo) m_cpp_ptr--; }
377,144
0
static int pptp_echo_rply(struct pptp_conn_t *conn) { struct pptp_echo_rply *msg = (struct pptp_echo_rply *)conn->in_buf; if (conf_verbose) log_ppp_debug("recv [PPTP Echo-Reply <Identifier %x>]\n", msg->identifier); conn->echo_sent = 0; return 0; }
377,145
0
static int send_pptp_stop_ctrl_conn_rply(struct pptp_conn_t *conn, int reason, int err_code) { struct pptp_stop_ctrl_conn msg = { .header = PPTP_HEADER_CTRL(PPTP_STOP_CTRL_CONN_RPLY), .reason_result = hton8(reason), .error_code = hton8(err_code), }; if (conf_verbose) log_ppp_info2("send [PPTP Stop-Ctrl-Conn-Reply <Result %i> <Error %i>]\n", msg.reason_result, msg.error_code); return post_msg(conn, &msg, sizeof(msg)); }
377,146
0
static int post_msg(struct pptp_conn_t *conn, void *buf, int size) { int n; if (conn->out_size) { log_error("pptp: buffer is not empty\n"); return -1; } again: n=write(conn->hnd.fd, buf, size); if (n < 0) { if (errno == EINTR) goto again; else if (errno == EAGAIN) n = 0; else { if (errno != EPIPE) { if (conf_verbose) log_ppp_info2("pptp: write: %s\n", strerror(errno)); } return -1; } } if ( n<size ) { memcpy(conn->out_buf, (uint8_t *)buf + n, size - n); triton_md_enable_handler(&conn->hnd, MD_MODE_WRITE); } return 0; }
377,147
0
static void pptp_ctx_switch(struct triton_context_t *ctx, void *arg) { if (arg) { struct ap_session *s = arg; net = s->net; } else net = def_net; log_switch(ctx, arg); }
377,148
0
static void disconnect(struct pptp_conn_t *conn) { log_ppp_debug("pptp: disconnect\n"); triton_md_unregister_handler(&conn->hnd, 1); if (conn->timeout_timer.tpd) triton_timer_del(&conn->timeout_timer); if (conn->echo_timer.tpd) triton_timer_del(&conn->echo_timer); if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_CLOSE; ap_session_terminate(&conn->ppp.ses, TERM_LOST_CARRIER, 1); } else if (conn->state != STATE_CLOSE) __sync_sub_and_fetch(&stat_starting, 1); triton_event_fire(EV_CTRL_FINISHED, &conn->ppp.ses); log_ppp_info1("disconnected\n"); triton_context_unregister(&conn->ctx); if (conn->ppp.ses.chan_name) _free(conn->ppp.ses.chan_name); _free(conn->in_buf); _free(conn->out_buf); _free(conn->ctrl.calling_station_id); _free(conn->ctrl.called_station_id); mempool_free(conn); }
377,149
0
static void pptp_serv_close(struct triton_context_t *ctx) { struct pptp_serv_t *s=container_of(ctx,typeof(*s),ctx); triton_md_unregister_handler(&s->hnd, 1); triton_context_unregister(ctx); }
377,150
0
static int send_pptp_stop_ctrl_conn_rqst(struct pptp_conn_t *conn, int reason) { struct pptp_stop_ctrl_conn msg = { .header = PPTP_HEADER_CTRL(PPTP_STOP_CTRL_CONN_RQST), .reason_result = hton8(reason), }; if (conf_verbose) log_ppp_info2("send [PPTP Stop-Ctrl-Conn-Request <Reason %i>]\n", reason); return post_msg(conn, &msg, sizeof(msg)); }
377,151
0
static void pptp_close(struct triton_context_t *ctx) { struct pptp_conn_t *conn = container_of(ctx, typeof(*conn), ctx); if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_CLOSE; ap_session_terminate(&conn->ppp.ses, TERM_ADMIN_RESET, 1); if (send_pptp_call_disconnect_notify(conn, 3)) { triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn); return; } } else { if (send_pptp_stop_ctrl_conn_rqst(conn, 0)) { triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn); return; } } if (conn->timeout_timer.tpd) triton_timer_mod(&conn->timeout_timer, 0); else triton_timer_add(ctx, &conn->timeout_timer, 0); }
377,152
0
static void pptp_init(void) { struct sockaddr_in addr; char *opt; int fd; fd = socket(AF_PPPOX, SOCK_STREAM, PX_PROTO_PPTP); if (fd >= 0) close(fd); else if (system("modprobe -q pptp")) log_warn("failed to load pptp kernel module\n"); serv.hnd.fd = socket(PF_INET, SOCK_STREAM, 0); if (serv.hnd.fd < 0) { log_emerg("pptp: failed to create server socket: %s\n", strerror(errno)); return; } fcntl(serv.hnd.fd, F_SETFD, fcntl(serv.hnd.fd, F_GETFD) | FD_CLOEXEC); addr.sin_family = AF_INET; opt = conf_get_opt("pptp", "bind"); if (opt) addr.sin_addr.s_addr = inet_addr(opt); else addr.sin_addr.s_addr = htonl(INADDR_ANY); opt = conf_get_opt("pptp", "port"); if (opt && atoi(opt) > 0) addr.sin_port = htons(atoi(opt)); else addr.sin_port = htons(PPTP_PORT); setsockopt(serv.hnd.fd, SOL_SOCKET, SO_REUSEADDR, &serv.hnd.fd, 4); if (bind (serv.hnd.fd, (struct sockaddr *) &addr, sizeof (addr)) < 0) { log_emerg("pptp: failed to bind socket: %s\n", strerror(errno)); close(serv.hnd.fd); return; } if (listen (serv.hnd.fd, 100) < 0) { log_emerg("pptp: failed to listen socket: %s\n", strerror(errno)); close(serv.hnd.fd); return; } if (fcntl(serv.hnd.fd, F_SETFL, O_NONBLOCK)) { log_emerg("pptp: failed to set nonblocking mode: %s\n", strerror(errno)); close(serv.hnd.fd); return; } conn_pool = mempool_create(sizeof(struct pptp_conn_t)); load_config(); triton_context_register(&serv.ctx, NULL); triton_md_register_handler(&serv.ctx, &serv.hnd); triton_md_enable_handler(&serv.hnd, MD_MODE_READ); triton_context_wakeup(&serv.ctx); cli_register_simple_cmd2(show_stat_exec, NULL, 2, "show", "stat"); triton_event_register_handler(EV_CONFIG_RELOAD, (triton_event_func)load_config); }
377,153
0
static int send_pptp_start_ctrl_conn_rply(struct pptp_conn_t *conn, int res_code, int err_code) { struct pptp_start_ctrl_conn msg = { .header = PPTP_HEADER_CTRL(PPTP_START_CTRL_CONN_RPLY), .version = htons(PPTP_VERSION), .result_code = res_code, .error_code = err_code, .framing_cap = htonl(PPTP_FRAME_ANY), .bearer_cap = htonl(PPTP_BEARER_ANY), .max_channels = htons(1), .firmware_rev = htons(PPTP_FIRMWARE_VERSION), }; memset(msg.hostname, 0, sizeof(msg.hostname)); strcpy((char*)msg.hostname, PPTP_HOSTNAME); memset(msg.vendor, 0, sizeof(msg.vendor)); strcpy((char*)msg.vendor, PPTP_VENDOR); if (conf_verbose) log_ppp_info2("send [PPTP Start-Ctrl-Conn-Reply <Version %i> <Result %i> <Error %i> <Framing %x> <Bearer %x> <Max-Chan %i>]\n", msg.version, msg.result_code, msg.error_code, ntohl(msg.framing_cap), ntohl(msg.bearer_cap), ntohs(msg.max_channels)); return post_msg(conn, &msg, sizeof(msg)); }
377,154
0
static int pptp_stop_ctrl_conn_rply(struct pptp_conn_t *conn) { struct pptp_stop_ctrl_conn *msg = (struct pptp_stop_ctrl_conn*)conn->in_buf; if (conf_verbose) log_ppp_info2("recv [PPTP Stop-Ctrl-Conn-Reply <Result %i> <Error %i>]\n", msg->reason_result, msg->error_code); return -1; }
377,155
0
static int pptp_out_call_rqst(struct pptp_conn_t *conn) { struct pptp_out_call_rqst *msg = (struct pptp_out_call_rqst *)conn->in_buf; struct sockaddr_pppox src_addr, dst_addr; struct sockaddr_in addr; socklen_t addrlen; int pptp_sock; if (conf_verbose) log_ppp_info2("recv [PPTP Outgoing-Call-Request <Call-ID %x> <Call-Serial %x> <Min-BPS %i> <Max-BPS %i> <Bearer %x> <Framing %x> <Window-Size %i> <Delay %i>]\n", ntohs(msg->call_id), ntohs(msg->call_sernum), ntohl(msg->bps_min), ntohl(msg->bps_max), ntohl(msg->bearer), ntohl(msg->framing), ntohs(msg->recv_size), ntohs(msg->delay)); if (conn->state != STATE_ESTB) { log_ppp_warn("unexpected PPTP_OUT_CALL_RQST\n"); if (send_pptp_out_call_rply(conn, msg, 0, PPTP_CALL_RES_GE, PPTP_GE_NOCONN)) return -1; return 0; } memset(&src_addr, 0, sizeof(src_addr)); src_addr.sa_family = AF_PPPOX; src_addr.sa_protocol = PX_PROTO_PPTP; src_addr.sa_addr.pptp.call_id = 0; addrlen = sizeof(addr); getsockname(conn->hnd.fd, (struct sockaddr*)&addr, &addrlen); src_addr.sa_addr.pptp.sin_addr = addr.sin_addr; memset(&dst_addr, 0, sizeof(dst_addr)); dst_addr.sa_family = AF_PPPOX; dst_addr.sa_protocol = PX_PROTO_PPTP; dst_addr.sa_addr.pptp.call_id = htons(msg->call_id); addrlen = sizeof(addr); getpeername(conn->hnd.fd, (struct sockaddr*)&addr, &addrlen); dst_addr.sa_addr.pptp.sin_addr = addr.sin_addr; pptp_sock = socket(AF_PPPOX, SOCK_STREAM, PX_PROTO_PPTP); if (pptp_sock < 0) { log_ppp_error("failed to create PPTP socket (%s)\n", strerror(errno)); return -1; } fcntl(pptp_sock, F_SETFD, fcntl(pptp_sock, F_GETFD) | FD_CLOEXEC); if (bind(pptp_sock, (struct sockaddr*)&src_addr, sizeof(src_addr))) { log_ppp_error("failed to bind PPTP socket (%s)\n", strerror(errno)); close(pptp_sock); return -1; } addrlen = sizeof(src_addr); getsockname(pptp_sock, (struct sockaddr*)&src_addr, &addrlen); if (connect(pptp_sock, (struct sockaddr*)&dst_addr, sizeof(dst_addr))) { log_ppp_error("failed to connect PPTP socket (%s)\n", strerror(errno)); close(pptp_sock); return -1; } if (send_pptp_out_call_rply(conn, msg, src_addr.sa_addr.pptp.call_id, PPTP_CALL_RES_OK, 0)) return -1; conn->call_id = src_addr.sa_addr.pptp.call_id; conn->peer_call_id = msg->call_id; conn->ppp.fd = pptp_sock; conn->ppp.ses.chan_name = _strdup(inet_ntoa(dst_addr.sa_addr.pptp.sin_addr)); triton_event_fire(EV_CTRL_STARTED, &conn->ppp.ses); if (establish_ppp(&conn->ppp)) { close(pptp_sock); //if (send_pptp_stop_ctrl_conn_rqst(conn, 0, 0)) conn->state = STATE_FIN; return -1; } conn->state = STATE_PPP; __sync_sub_and_fetch(&stat_starting, 1); __sync_add_and_fetch(&stat_active, 1); if (conn->timeout_timer.tpd) triton_timer_del(&conn->timeout_timer); if (conf_echo_interval) { conn->echo_timer.period = conf_echo_interval * 1000; triton_timer_add(&conn->ctx, &conn->echo_timer, 0); } return 0; }
377,156
0
static int pptp_read(struct triton_md_handler_t *h) { struct pptp_conn_t *conn=container_of(h,typeof(*conn),hnd); struct pptp_header *hdr=(struct pptp_header *)conn->in_buf; int n; while(1) { n = read(h->fd, conn->in_buf + conn->in_size, PPTP_CTRL_SIZE_MAX - conn->in_size); if (n < 0) { if (errno == EINTR) continue; if (errno == EAGAIN) return 0; log_ppp_error("pptp: read: %s\n",strerror(errno)); goto drop; } if (n == 0) { if (conf_verbose) log_ppp_info2("pptp: disconnect by peer\n"); goto drop; } conn->in_size += n; if (conn->in_size >= sizeof(*hdr)) { if (hdr->magic != htonl(PPTP_MAGIC)) { log_ppp_error("pptp: invalid magic\n"); goto drop; } if (ntohs(hdr->length) >= PPTP_CTRL_SIZE_MAX) { log_ppp_error("pptp: message is too long\n"); goto drop; } if (ntohs(hdr->length) > conn->in_size) continue; if (ntohs(hdr->length) <= conn->in_size) { if (ntohs(hdr->length) != PPTP_CTRL_SIZE(ntohs(hdr->ctrl_type))) { log_ppp_error("pptp: invalid message length\n"); goto drop; } if (process_packet(conn)) goto drop; conn->in_size -= ntohs(hdr->length); if (conn->in_size) memmove(conn->in_buf, conn->in_buf + ntohs(hdr->length), conn->in_size); } } } drop: disconnect(conn); return 1; }
377,158
0
static int send_pptp_out_call_rply(struct pptp_conn_t *conn, struct pptp_out_call_rqst *rqst, int call_id, int res_code, int err_code) { struct pptp_out_call_rply msg = { .header = PPTP_HEADER_CTRL(PPTP_OUT_CALL_RPLY), .call_id = htons(call_id), .call_id_peer = rqst->call_id, .result_code = res_code, .error_code = err_code, .cause_code = 0, .speed = rqst->bps_max, .recv_size = rqst->recv_size, .delay = 0, .channel = 0, }; if (conf_verbose) log_ppp_info2("send [PPTP Outgoing-Call-Reply <Call-ID %x> <Peer-Call-ID %x> <Result %i> <Error %i> <Cause %i> <Speed %i> <Window-Size %i> <Delay %i> <Channel %x>]\n", ntohs(msg.call_id), ntohs(msg.call_id_peer), msg.result_code, msg.error_code, ntohs(msg.cause_code), ntohl(msg.speed), ntohs(msg.recv_size), ntohs(msg.delay), ntohl(msg.channel)); return post_msg(conn, &msg, sizeof(msg)); }
377,160
0
static int show_stat_exec(const char *cmd, char * const *fields, int fields_cnt, void *client) { cli_send(client, "pptp:\r\n"); cli_sendv(client," starting: %u\r\n", stat_starting); cli_sendv(client," active: %u\r\n", stat_active); return CLI_CMD_OK; }
377,161
0
static int pptp_call_clear_rqst(struct pptp_conn_t *conn) { struct pptp_call_clear_rqst *rqst = (struct pptp_call_clear_rqst *)conn->in_buf; if (conf_verbose) log_ppp_info2("recv [PPTP Call-Clear-Request <Call-ID %x>]\n", ntohs(rqst->call_id)); if (conn->echo_timer.tpd) triton_timer_del(&conn->echo_timer); if (conn->state == STATE_PPP) { __sync_sub_and_fetch(&stat_active, 1); conn->state = STATE_CLOSE; ap_session_terminate(&conn->ppp.ses, TERM_USER_REQUEST, 1); } return send_pptp_call_disconnect_notify(conn, 4); }
377,162
0
static void ppp_finished(struct ap_session *ses) { struct ppp_t *ppp = container_of(ses, typeof(*ppp), ses); struct pptp_conn_t *conn = container_of(ppp, typeof(*conn), ppp); if (conn->state != STATE_CLOSE) { log_ppp_debug("pptp: ppp finished\n"); conn->state = STATE_CLOSE; __sync_sub_and_fetch(&stat_active, 1); if (send_pptp_call_disconnect_notify(conn, 3)) triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn); else if (send_pptp_stop_ctrl_conn_rqst(conn, 0)) triton_context_call(&conn->ctx, (void (*)(void*))disconnect, conn); else { if (conn->timeout_timer.tpd) triton_timer_mod(&conn->timeout_timer, 0); else triton_timer_add(&conn->ctx, &conn->timeout_timer, 0); } } }
377,163
0
static int pptp_start_ctrl_conn_rqst(struct pptp_conn_t *conn) { struct pptp_start_ctrl_conn *msg = (struct pptp_start_ctrl_conn *)conn->in_buf; if (conf_verbose) log_ppp_info2("recv [PPTP Start-Ctrl-Conn-Request <Version %i> <Framing %x> <Bearer %x> <Max-Chan %i>]\n", msg->version, ntohl(msg->framing_cap), ntohl(msg->bearer_cap), ntohs(msg->max_channels)); if (conn->state != STATE_IDLE) { log_ppp_warn("unexpected PPTP_START_CTRL_CONN_RQST\n"); if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_EXISTS, 0)) return -1; return 0; } if (msg->version != htons(PPTP_VERSION)) { log_ppp_warn("PPTP version mismatch: expecting %x, received %" PRIu32 "\n", PPTP_VERSION, msg->version); if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_PROTOCOL, 0)) return -1; return 0; } /*if (!(ntohl(msg->framing_cap) & PPTP_FRAME_SYNC)) { log_ppp_warn("connection does not supports sync mode\n"); if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_GE, 0)) return -1; return 0; }*/ if (send_pptp_start_ctrl_conn_rply(conn, PPTP_CONN_RES_SUCCESS, 0)) return -1; if (conn->timeout_timer.tpd) triton_timer_mod(&conn->timeout_timer, 0); conn->state = STATE_ESTB; return 0; }
377,164
0
static void pptp_timeout(struct triton_timer_t *t) { struct pptp_conn_t *conn = container_of(t, typeof(*conn), timeout_timer); disconnect(conn); }
377,165
0
static void ppp_started(struct ap_session *ses) { log_ppp_debug("pptp: ppp started\n"); }
377,166
0
static void load_config(void) { char *opt; opt = conf_get_opt("pptp", "timeout"); if (opt && atoi(opt) > 0) conf_timeout = atoi(opt); opt = conf_get_opt("pptp", "echo-interval"); if (opt && atoi(opt) >= 0) conf_echo_interval = atoi(opt); opt = conf_get_opt("pptp", "echo-failure"); if (opt && atoi(opt) >= 0) conf_echo_failure = atoi(opt); opt = conf_get_opt("pptp", "verbose"); if (opt && atoi(opt) >= 0) conf_verbose = atoi(opt) > 0; opt = conf_get_opt("pptp", "ppp-max-mtu"); if (opt && atoi(opt) > 0) conf_ppp_max_mtu = atoi(opt); else conf_ppp_max_mtu = PPTP_MAX_MTU; conf_mppe = MPPE_UNSET; opt = conf_get_opt("pptp", "mppe"); if (opt) { if (strcmp(opt, "deny") == 0) conf_mppe = MPPE_DENY; else if (strcmp(opt, "allow") == 0) conf_mppe = MPPE_ALLOW; else if (strcmp(opt, "prefer") == 0) conf_mppe = MPPE_PREFER; else if (strcmp(opt, "require") == 0) conf_mppe = MPPE_REQUIRE; } conf_ip_pool = conf_get_opt("pptp", "ip-pool"); conf_ipv6_pool = conf_get_opt("pptp", "ipv6-pool"); conf_dpv6_pool = conf_get_opt("pptp", "ipv6-pool-delegate"); conf_ifname = conf_get_opt("pptp", "ifname"); switch (iprange_check_activation()) { case IPRANGE_DISABLED: log_warn("pptp: iprange module disabled, improper IP configuration of PPP interfaces may cause kernel soft lockup\n"); break; case IPRANGE_NO_RANGE: log_warn("pptp: no IP address range defined in section [%s], incoming PPTP connections will be rejected\n", IPRANGE_CONF_SECTION); break; default: /* Makes compiler happy */ break; } }
377,167
0
static int pptp_connect(struct triton_md_handler_t *h) { struct sockaddr_in addr; socklen_t size = sizeof(addr); int sock; struct pptp_conn_t *conn; while(1) { sock = accept(h->fd, (struct sockaddr *)&addr, &size); if (sock < 0) { if (errno == EAGAIN) return 0; log_error("pptp: accept failed: %s\n", strerror(errno)); continue; } if (ap_shutdown) { close(sock); continue; } if (conf_max_starting && ap_session_stat.starting >= conf_max_starting) { close(sock); continue; } if (conf_max_sessions && ap_session_stat.active + ap_session_stat.starting >= conf_max_sessions) { close(sock); continue; } if (triton_module_loaded("connlimit") && connlimit_check(cl_key_from_ipv4(addr.sin_addr.s_addr))) { close(sock); continue; } log_info2("pptp: new connection from %s\n", inet_ntoa(addr.sin_addr)); if (iprange_client_check(addr.sin_addr.s_addr)) { log_warn("pptp: IP is out of client-ip-range, droping connection...\n"); close(sock); continue; } if (fcntl(sock, F_SETFL, O_NONBLOCK)) { log_error("pptp: failed to set nonblocking mode: %s, closing connection...\n", strerror(errno)); close(sock); continue; } conn = mempool_alloc(conn_pool); memset(conn, 0, sizeof(*conn)); conn->hnd.fd = sock; conn->hnd.read = pptp_read; conn->hnd.write = pptp_write; conn->ctx.close = pptp_close; conn->ctx.before_switch = pptp_ctx_switch; conn->in_buf = _malloc(PPTP_CTRL_SIZE_MAX); conn->out_buf = _malloc(PPTP_CTRL_SIZE_MAX); conn->timeout_timer.expire = pptp_timeout; conn->timeout_timer.period = conf_timeout * 1000; conn->echo_timer.expire = pptp_send_echo; conn->ctrl.ctx = &conn->ctx; conn->ctrl.started = ppp_started; conn->ctrl.finished = ppp_finished; conn->ctrl.terminate = ppp_terminate; conn->ctrl.max_mtu = conf_ppp_max_mtu; conn->ctrl.type = CTRL_TYPE_PPTP; conn->ctrl.ppp = 1; conn->ctrl.name = "pptp"; conn->ctrl.ifname = ""; conn->ctrl.mppe = conf_mppe; conn->ctrl.calling_station_id = _malloc(17); conn->ctrl.called_station_id = _malloc(17); u_inet_ntoa(addr.sin_addr.s_addr, conn->ctrl.calling_station_id); getsockname(sock, &addr, &size); u_inet_ntoa(addr.sin_addr.s_addr, conn->ctrl.called_station_id); ppp_init(&conn->ppp); conn->ppp.ses.ctrl = &conn->ctrl; if (conf_ip_pool) conn->ppp.ses.ipv4_pool_name = _strdup(conf_ip_pool); if (conf_ipv6_pool) conn->ppp.ses.ipv6_pool_name = _strdup(conf_ipv6_pool); if (conf_dpv6_pool) conn->ppp.ses.dpv6_pool_name = _strdup(conf_dpv6_pool); if (conf_ifname) conn->ppp.ses.ifname_rename = _strdup(conf_ifname); triton_context_register(&conn->ctx, &conn->ppp.ses); triton_md_register_handler(&conn->ctx, &conn->hnd); triton_md_enable_handler(&conn->hnd,MD_MODE_READ); triton_timer_add(&conn->ctx, &conn->timeout_timer, 0); triton_context_wakeup(&conn->ctx); triton_event_fire(EV_CTRL_STARTING, &conn->ppp.ses); __sync_add_and_fetch(&stat_starting, 1); } return 0; }
377,169
0
void __export pptp_get_stat(unsigned int **starting, unsigned int **active) { *starting = &stat_starting; *active = &stat_active; }
377,170
0
static int pptp_echo_rqst(struct pptp_conn_t *conn) { struct pptp_echo_rqst *in_msg = (struct pptp_echo_rqst *)conn->in_buf; struct pptp_echo_rply out_msg = { .header = PPTP_HEADER_CTRL(PPTP_ECHO_RPLY), .identifier = in_msg->identifier, .result_code = 1, }; if (conf_verbose) { log_ppp_debug("recv [PPTP Echo-Request <Identifier %x>]\n", in_msg->identifier); log_ppp_debug("send [PPTP Echo-Reply <Identifier %x>]\n", out_msg.identifier); } if (conn->echo_timer.tpd) triton_timer_mod(&conn->echo_timer, 0); return post_msg(conn, &out_msg, sizeof(out_msg)); }
377,171
0
static void pptp_send_echo(struct triton_timer_t *t) { struct pptp_conn_t *conn = container_of(t, typeof(*conn), echo_timer); struct pptp_echo_rqst msg = { .header = PPTP_HEADER_CTRL(PPTP_ECHO_RQST), }; if (++conn->echo_sent == conf_echo_failure) { log_ppp_warn("pptp: no echo reply\n"); disconnect(conn); return; } msg.identifier = random(); if (conf_verbose) log_ppp_debug("send [PPTP Echo-Request <Identifier %x>]\n", msg.identifier); if (post_msg(conn, &msg, sizeof(msg))) disconnect(conn); }
377,172
0
static int send_pptp_call_disconnect_notify(struct pptp_conn_t *conn, int result) { struct pptp_call_clear_ntfy msg = { .header = PPTP_HEADER_CTRL(PPTP_CALL_CLEAR_NTFY), .call_id = htons(conn->peer_call_id), .result_code = result, .error_code = 0, .cause_code = 0, }; if (conf_verbose) log_ppp_info2("send [PPTP Call-Disconnect-Notify <Call-ID %x> <Result %i> <Error %i> <Cause %i>]\n", ntohs(msg.call_id), msg.result_code, msg.error_code, msg.cause_code); return post_msg(conn, &msg, sizeof(msg)); }
377,173
0
static void RSAZ_exp52x20_x2_256(BN_ULONG *out, /* [2][20] */ const BN_ULONG *base, /* [2][20] */ const BN_ULONG *exp[2], /* 2x16 */ const BN_ULONG *m, /* [2][20] */ const BN_ULONG *rr, /* [2][20] */ const BN_ULONG k0[2]) { # define BITSIZE_MODULUS (1024) # define EXP_WIN_SIZE (5) # define EXP_WIN_MASK ((1U << EXP_WIN_SIZE) - 1) /* * Number of digits (64-bit words) in redundant representation to handle * modulus bits */ # define RED_DIGITS (20) # define EXP_DIGITS (16) # define DAMM ossl_rsaz_amm52x20_x2_256 /* * Squaring is done using multiplication now. That can be a subject of * optimization in future. */ # define DAMS(r,a,m,k0) \ ossl_rsaz_amm52x20_x2_256((r),(a),(a),(m),(k0)) /* Allocate stack for red(undant) result Y and multiplier X */ ALIGN64 BN_ULONG red_Y[2][RED_DIGITS]; ALIGN64 BN_ULONG red_X[2][RED_DIGITS]; /* Allocate expanded exponent */ ALIGN64 BN_ULONG expz[2][EXP_DIGITS + 1]; /* Pre-computed table of base powers */ ALIGN64 BN_ULONG red_table[1U << EXP_WIN_SIZE][2][RED_DIGITS]; int idx; memset(red_Y, 0, sizeof(red_Y)); memset(red_table, 0, sizeof(red_table)); memset(red_X, 0, sizeof(red_X)); /* * Compute table of powers base^i, i = 0, ..., (2^EXP_WIN_SIZE) - 1 * table[0] = mont(x^0) = mont(1) * table[1] = mont(x^1) = mont(x) */ red_X[0][0] = 1; red_X[1][0] = 1; DAMM(red_table[0][0], (const BN_ULONG*)red_X, rr, m, k0); DAMM(red_table[1][0], base, rr, m, k0); for (idx = 1; idx < (int)((1U << EXP_WIN_SIZE) / 2); idx++) { DAMS(red_table[2 * idx + 0][0], red_table[1 * idx][0], m, k0); DAMM(red_table[2 * idx + 1][0], red_table[2 * idx][0], red_table[1][0], m, k0); } /* Copy and expand exponents */ memcpy(expz[0], exp[0], EXP_DIGITS * sizeof(BN_ULONG)); expz[0][EXP_DIGITS] = 0; memcpy(expz[1], exp[1], EXP_DIGITS * sizeof(BN_ULONG)); expz[1][EXP_DIGITS] = 0; /* Exponentiation */ { const int rem = BITSIZE_MODULUS % EXP_WIN_SIZE; BN_ULONG table_idx_mask = EXP_WIN_MASK; int exp_bit_no = BITSIZE_MODULUS - rem; int exp_chunk_no = exp_bit_no / 64; int exp_chunk_shift = exp_bit_no % 64; BN_ULONG red_table_idx_0, red_table_idx_1; /* * If rem == 0, then * exp_bit_no = modulus_bitsize - exp_win_size * However, this isn't possible because rem is { 1024, 1536, 2048 } % 5 * which is { 4, 1, 3 } respectively. * * If this assertion ever fails the fix above is easy. */ OPENSSL_assert(rem != 0); /* Process 1-st exp window - just init result */ red_table_idx_0 = expz[0][exp_chunk_no]; red_table_idx_1 = expz[1][exp_chunk_no]; /* * The function operates with fixed moduli sizes divisible by 64, * thus table index here is always in supported range [0, EXP_WIN_SIZE). */ red_table_idx_0 >>= exp_chunk_shift; red_table_idx_1 >>= exp_chunk_shift; ossl_extract_multiplier_2x20_win5(red_Y[0], (const BN_ULONG*)red_table, (int)red_table_idx_0, 0); ossl_extract_multiplier_2x20_win5(red_Y[1], (const BN_ULONG*)red_table, (int)red_table_idx_1, 1); /* Process other exp windows */ for (exp_bit_no -= EXP_WIN_SIZE; exp_bit_no >= 0; exp_bit_no -= EXP_WIN_SIZE) { /* Extract pre-computed multiplier from the table */ { BN_ULONG T; exp_chunk_no = exp_bit_no / 64; exp_chunk_shift = exp_bit_no % 64; { red_table_idx_0 = expz[0][exp_chunk_no]; T = expz[0][exp_chunk_no + 1]; red_table_idx_0 >>= exp_chunk_shift; /* * Get additional bits from then next quadword * when 64-bit boundaries are crossed. */ if (exp_chunk_shift > 64 - EXP_WIN_SIZE) { T <<= (64 - exp_chunk_shift); red_table_idx_0 ^= T; } red_table_idx_0 &= table_idx_mask; ossl_extract_multiplier_2x20_win5(red_X[0], (const BN_ULONG*)red_table, (int)red_table_idx_0, 0); } { red_table_idx_1 = expz[1][exp_chunk_no]; T = expz[1][exp_chunk_no + 1]; red_table_idx_1 >>= exp_chunk_shift; /* * Get additional bits from then next quadword * when 64-bit boundaries are crossed. */ if (exp_chunk_shift > 64 - EXP_WIN_SIZE) { T <<= (64 - exp_chunk_shift); red_table_idx_1 ^= T; } red_table_idx_1 &= table_idx_mask; ossl_extract_multiplier_2x20_win5(red_X[1], (const BN_ULONG*)red_table, (int)red_table_idx_1, 1); } } /* Series of squaring */ DAMS((BN_ULONG*)red_Y, (const BN_ULONG*)red_Y, m, k0); DAMS((BN_ULONG*)red_Y, (const BN_ULONG*)red_Y, m, k0); DAMS((BN_ULONG*)red_Y, (const BN_ULONG*)red_Y, m, k0); DAMS((BN_ULONG*)red_Y, (const BN_ULONG*)red_Y, m, k0); DAMS((BN_ULONG*)red_Y, (const BN_ULONG*)red_Y, m, k0); DAMM((BN_ULONG*)red_Y, (const BN_ULONG*)red_Y, (const BN_ULONG*)red_X, m, k0); } } /* * * NB: After the last AMM of exponentiation in Montgomery domain, the result * may be 1025-bit, but the conversion out of Montgomery domain performs an * AMM(x,1) which guarantees that the final result is less than |m|, so no * conditional subtraction is needed here. See "Efficient Software * Implementations of Modular Exponentiation" (by Shay Gueron) paper for details. */ /* Convert result back in regular 2^52 domain */ memset(red_X, 0, sizeof(red_X)); red_X[0][0] = 1; red_X[1][0] = 1; DAMM(out, (const BN_ULONG*)red_Y, (const BN_ULONG*)red_X, m, k0); /* Clear exponents */ OPENSSL_cleanse(expz, sizeof(expz)); OPENSSL_cleanse(red_Y, sizeof(red_Y)); # undef DAMS # undef DAMM # undef EXP_DIGITS # undef RED_DIGITS # undef EXP_WIN_MASK # undef EXP_WIN_SIZE # undef BITSIZE_MODULUS }
377,174
0
int ossl_rsaz_mod_exp_avx512_x2(BN_ULONG *res1, const BN_ULONG *base1, const BN_ULONG *exp1, const BN_ULONG *m1, const BN_ULONG *rr1, BN_ULONG k0_1, BN_ULONG *res2, const BN_ULONG *base2, const BN_ULONG *exp2, const BN_ULONG *m2, const BN_ULONG *rr2, BN_ULONG k0_2, int factor_size) { int ret = 0; /* * Number of word-size (BN_ULONG) digits to store exponent in redundant * representation. */ int exp_digits = number_of_digits(factor_size + 2, DIGIT_SIZE); int coeff_pow = 4 * (DIGIT_SIZE * exp_digits - factor_size); BN_ULONG *base1_red, *m1_red, *rr1_red; BN_ULONG *base2_red, *m2_red, *rr2_red; BN_ULONG *coeff_red; BN_ULONG *storage = NULL; BN_ULONG *storage_aligned = NULL; BN_ULONG storage_len_bytes = 7 * exp_digits * sizeof(BN_ULONG); /* AMM = Almost Montgomery Multiplication */ AMM52 amm = NULL; /* Dual (2-exps in parallel) exponentiation */ EXP52_x2 exp_x2 = NULL; const BN_ULONG *exp[2] = {0}; BN_ULONG k0[2] = {0}; /* Only 1024-bit factor size is supported now */ switch (factor_size) { case 1024: amm = ossl_rsaz_amm52x20_x1_256; exp_x2 = RSAZ_exp52x20_x2_256; break; default: goto err; } storage = (BN_ULONG *)OPENSSL_malloc(storage_len_bytes + 64); if (storage == NULL) goto err; storage_aligned = (BN_ULONG *)ALIGN_OF(storage, 64); /* Memory layout for red(undant) representations */ base1_red = storage_aligned; base2_red = storage_aligned + 1 * exp_digits; m1_red = storage_aligned + 2 * exp_digits; m2_red = storage_aligned + 3 * exp_digits; rr1_red = storage_aligned + 4 * exp_digits; rr2_red = storage_aligned + 5 * exp_digits; coeff_red = storage_aligned + 6 * exp_digits; /* Convert base_i, m_i, rr_i, from regular to 52-bit radix */ to_words52(base1_red, exp_digits, base1, factor_size); to_words52(base2_red, exp_digits, base2, factor_size); to_words52(m1_red, exp_digits, m1, factor_size); to_words52(m2_red, exp_digits, m2, factor_size); to_words52(rr1_red, exp_digits, rr1, factor_size); to_words52(rr2_red, exp_digits, rr2, factor_size); /* * Compute target domain Montgomery converters RR' for each modulus * based on precomputed original domain's RR. * * RR -> RR' transformation steps: * (1) coeff = 2^k * (2) t = AMM(RR,RR) = RR^2 / R' mod m * (3) RR' = AMM(t, coeff) = RR^2 * 2^k / R'^2 mod m * where * k = 4 * (52 * digits52 - modlen) * R = 2^(64 * ceil(modlen/64)) mod m * RR = R^2 mod M * R' = 2^(52 * ceil(modlen/52)) mod m * * modlen = 1024: k = 64, RR = 2^2048 mod m, RR' = 2^2080 mod m */ memset(coeff_red, 0, exp_digits * sizeof(BN_ULONG)); /* (1) in reduced domain representation */ set_bit(coeff_red, 64 * (int)(coeff_pow / 52) + coeff_pow % 52); amm(rr1_red, rr1_red, rr1_red, m1_red, k0_1); /* (2) for m1 */ amm(rr1_red, rr1_red, coeff_red, m1_red, k0_1); /* (3) for m1 */ amm(rr2_red, rr2_red, rr2_red, m2_red, k0_2); /* (2) for m2 */ amm(rr2_red, rr2_red, coeff_red, m2_red, k0_2); /* (3) for m2 */ exp[0] = exp1; exp[1] = exp2; k0[0] = k0_1; k0[1] = k0_2; exp_x2(rr1_red, base1_red, exp, m1_red, rr1_red, k0); /* Convert rr_i back to regular radix */ from_words52(res1, factor_size, rr1_red); from_words52(res2, factor_size, rr2_red); /* bn_reduce_once_in_place expects number of BN_ULONG, not bit size */ factor_size /= sizeof(BN_ULONG) * 8; bn_reduce_once_in_place(res1, /*carry=*/0, m1, storage, factor_size); bn_reduce_once_in_place(res2, /*carry=*/0, m2, storage, factor_size); ret = 1; err: if (storage != NULL) { OPENSSL_cleanse(storage, storage_len_bytes); OPENSSL_free(storage); } return ret; }
377,175
0
static void from_words52(BN_ULONG *out, int out_bitsize, const BN_ULONG *in) { int i; int out_len = BITS2WORD64_SIZE(out_bitsize); assert(out != NULL); assert(in != NULL); for (i = 0; i < out_len; i++) out[i] = 0; { uint8_t *out_str = (uint8_t *)out; for (; out_bitsize >= (2 * DIGIT_SIZE); out_bitsize -= (2 * DIGIT_SIZE), in += 2) { (*(uint64_t *)out_str) = in[0]; out_str += 6; (*(uint64_t *)out_str) ^= in[1] << 4; out_str += 7; } if (out_bitsize > DIGIT_SIZE) { put_digit52(out_str, 7, in[0]); out_str += 6; out_bitsize -= DIGIT_SIZE; put_digit52(out_str, BITS2WORD8_SIZE(out_bitsize), (in[1] << 4 | in[0] >> 48)); } else if (out_bitsize) { put_digit52(out_str, BITS2WORD8_SIZE(out_bitsize), in[0]); } } }
377,176
0
static ossl_inline uint64_t get_digit52(const uint8_t *in, int in_len) { uint64_t digit = 0; assert(in != NULL); for (; in_len > 0; in_len--) { digit <<= 8; digit += (uint64_t)(in[in_len - 1]); } return digit; }
377,177
0
static ossl_inline void put_digit52(uint8_t *pStr, int strLen, uint64_t digit) { assert(pStr != NULL); for (; strLen > 0; strLen--) { *pStr++ = (uint8_t)(digit & 0xFF); digit >>= 8; } }
377,178
0
static void to_words52(BN_ULONG *out, int out_len, const BN_ULONG *in, int in_bitsize) { uint8_t *in_str = NULL; assert(out != NULL); assert(in != NULL); /* Check destination buffer capacity */ assert(out_len >= number_of_digits(in_bitsize, DIGIT_SIZE)); in_str = (uint8_t *)in; for (; in_bitsize >= (2 * DIGIT_SIZE); in_bitsize -= (2 * DIGIT_SIZE), out += 2) { out[0] = (*(uint64_t *)in_str) & DIGIT_MASK; in_str += 6; out[1] = ((*(uint64_t *)in_str) >> 4) & DIGIT_MASK; in_str += 7; out_len -= 2; } if (in_bitsize > DIGIT_SIZE) { uint64_t digit = get_digit52(in_str, 7); out[0] = digit & DIGIT_MASK; in_str += 6; in_bitsize -= DIGIT_SIZE; digit = get_digit52(in_str, BITS2WORD8_SIZE(in_bitsize)); out[1] = digit >> 4; out += 2; out_len -= 2; } else if (in_bitsize > 0) { out[0] = get_digit52(in_str, BITS2WORD8_SIZE(in_bitsize)); out++; out_len--; } while (out_len > 0) { *out = 0; out_len--; out++; } }
377,179
0
unsigned int getKernelExecutionTimeMicros(cl_event & e) { cl_ulong timeStart = 0, timeEnd = 0; clWaitForEvents(1, &e); clGetEventProfilingInfo(e, CL_PROFILING_COMMAND_START, sizeof(timeStart), &timeStart, NULL); clGetEventProfilingInfo(e, CL_PROFILING_COMMAND_END, sizeof(timeEnd), &timeEnd, NULL); return (timeEnd - timeStart) / 1000; }
377,182
0
void Dispatcher::addDevice(cl_device_id clDeviceId, const size_t worksizeLocal, const size_t index) { Device * pDevice = new Device(*this, m_clContext, m_clProgram, clDeviceId, worksizeLocal, m_size, index, m_mode); m_vDevices.push_back(pDevice); }
377,183
0
void Dispatcher::enqueueKernelDevice(Device & d, cl_kernel & clKernel, size_t worksizeGlobal, cl_event * pEvent = NULL) { try { enqueueKernel(d.m_clQueue, clKernel, worksizeGlobal, d.m_worksizeLocal, pEvent); } catch ( OpenCLException & e ) { // If local work size is invalid, abandon it and let implementation decide if ((e.m_res == CL_INVALID_WORK_GROUP_SIZE || e.m_res == CL_INVALID_WORK_ITEM_SIZE) && d.m_worksizeLocal != 0) { std::cout << std::endl << "warning: local work size abandoned on GPU" << d.m_index << std::endl; d.m_worksizeLocal = 0; enqueueKernel(d.m_clQueue, clKernel, worksizeGlobal, d.m_worksizeLocal, pEvent); } else { throw; } } }
377,184
0
bool printResult(const cl_int err) { std::cout << ((err != CL_SUCCESS) ? toString(err) : "OK") << std::endl; return err != CL_SUCCESS; }
377,185
0
Dispatcher::Device::~Device() { }
377,186
0
T clGetWrapper(U function, V param, W param2) { T t; function(param, param2, sizeof(t), &t, NULL); return t; }
377,187
0
void Dispatcher::run() { m_eventFinished = clCreateUserEvent(m_clContext, NULL); timeStart = std::chrono::steady_clock::now(); init(); const auto timeInitialization = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::steady_clock::now() - timeStart).count(); std::cout << "Initialization time: " << timeInitialization << " seconds" << std::endl; m_quit = false; m_countRunning = m_vDevices.size(); std::cout << "Running..." << std::endl; std::cout << " Always verify that a private key generated by this program corresponds to the" << std::endl; std::cout << " public key printed by importing it to a wallet of your choice. This program" << std::endl; std::cout << " like any software might contain bugs and it does by design cut corners to" << std::endl; std::cout << " improve overall performance." << std::endl; std::cout << std::endl; for (auto it = m_vDevices.begin(); it != m_vDevices.end(); ++it) { dispatch(*(*it)); } clWaitForEvents(1, &m_eventFinished); clReleaseEvent(m_eventFinished); m_eventFinished = NULL; }
377,188
0
Dispatcher::Dispatcher(cl_context & clContext, cl_program & clProgram, const Mode mode, const size_t worksizeMax, const size_t inverseSize, const size_t inverseMultiple, const cl_uchar clScoreQuit) : m_clContext(clContext), m_clProgram(clProgram), m_mode(mode), m_worksizeMax(worksizeMax), m_inverseSize(inverseSize), m_size(inverseSize*inverseMultiple), m_clScoreMax(mode.score), m_clScoreQuit(clScoreQuit), m_eventFinished(NULL), m_countPrint(0) { }
377,189
0
std::string Dispatcher::formatSpeed(double f) { const std::string S = " KMGT"; unsigned int index = 0; while (f > 1000.0f && index < S.size()) { f /= 1000.0f; ++index; } std::ostringstream ss; ss << std::fixed << std::setprecision(3) << (double)f << " " << S[index] << "H/s"; return ss.str(); }
377,191
0
std::string clGetWrapperString(U function, V param, W param2) { size_t len; function(param, param2, 0, NULL, &len); char * const szString = new char[len]; function(param, param2, len, szString, NULL); std::string r(szString); delete[] szString; return r; }
377,193
0
void Dispatcher::printSpeed() { ++m_countPrint; if( m_countPrint > m_vDevices.size() ) { std::string strGPUs; double speedTotal = 0; unsigned int i = 0; for (auto & e : m_vDevices) { const auto curSpeed = e->m_speed.getSpeed(); speedTotal += curSpeed; strGPUs += " GPU" + toString(e->m_index) + ": " + formatSpeed(curSpeed); ++i; } const std::string strVT100ClearLine = "\33[2K\r"; std::cerr << strVT100ClearLine << "Total: " << formatSpeed(speedTotal) << " -" << strGPUs << '\r' << std::flush; m_countPrint = 0; } }
377,194
0
std::string readFile(const char * const szFilename) { std::ifstream in(szFilename, std::ios::in | std::ios::binary); std::ostringstream contents; contents << in.rdbuf(); return contents.str(); }
377,195
0
std::string getDeviceCacheFilename(cl_device_id & d, const size_t & inverseSize) { const auto uniqueId = getUniqueDeviceIdentifier(d); return "cache-opencl." + toString(inverseSize) + "." + toString(uniqueId); }
377,196
0
unsigned int getUniqueDeviceIdentifier(const cl_device_id & deviceId) { #if defined(CL_DEVICE_TOPOLOGY_AMD) auto topology = clGetWrapper<cl_device_topology_amd>(clGetDeviceInfo, deviceId, CL_DEVICE_TOPOLOGY_AMD); if (topology.raw.type == CL_DEVICE_TOPOLOGY_TYPE_PCIE_AMD) { return (topology.pcie.bus << 16) + (topology.pcie.device << 8) + topology.pcie.function; } #endif cl_int bus_id = clGetWrapper<cl_int>(clGetDeviceInfo, deviceId, CL_DEVICE_PCI_BUS_ID_NV); cl_int slot_id = clGetWrapper<cl_int>(clGetDeviceInfo, deviceId, CL_DEVICE_PCI_SLOT_ID_NV); return (bus_id << 16) + slot_id; }
377,197
0
void Dispatcher::dispatch(Device & d) { cl_event event; d.m_memResult.read(false, &event); #ifdef PROFANITY_DEBUG cl_event eventInverse; cl_event eventIterate; enqueueKernelDevice(d, d.m_kernelInverse, m_size / m_inverseSize, &eventInverse); enqueueKernelDevice(d, d.m_kernelIterate, m_size, &eventIterate); #else enqueueKernelDevice(d, d.m_kernelInverse, m_size / m_inverseSize); enqueueKernelDevice(d, d.m_kernelIterate, m_size); #endif if (d.m_kernelTransform) { enqueueKernelDevice(d, d.m_kernelTransform, m_size); } enqueueKernelDevice(d, d.m_kernelScore, m_size); clFlush(d.m_clQueue); #ifdef PROFANITY_DEBUG // We're actually not allowed to call clFinish here because this function is ultimately asynchronously called by OpenCL. // However, this happens to work on my computer and it's not really intended for release, just something to aid me in // optimizations. clFinish(d.m_clQueue); std::cout << "Timing: profanity_inverse = " << getKernelExecutionTimeMicros(eventInverse) << "us, profanity_iterate = " << getKernelExecutionTimeMicros(eventIterate) << "us" << std::endl; #endif const auto res = clSetEventCallback(event, CL_COMPLETE, staticCallback, &d); OpenCLException::throwIfError("failed to set custom callback", res); }
377,198
0
Dispatcher::OpenCLException::OpenCLException(const std::string s, const cl_int res) : std::runtime_error( s + " (res = " + toString(res) + ")"), m_res(res) { }
377,199
0
std::vector<T> clGetWrapperVector(U function, V param, W param2) { size_t len; function(param, param2, 0, NULL, &len); len /= sizeof(T); std::vector<T> v; if (len > 0) { T * pArray = new T[len]; function(param, param2, len * sizeof(T), pArray, NULL); for (size_t i = 0; i < len; ++i) { v.push_back(pArray[i]); } delete[] pArray; } return v; }
377,200
0
void Dispatcher::initContinue(Device & d) { size_t sizeLeft = m_size - d.m_sizeInitialized; const size_t sizeInitLimit = m_size / 20; // Print progress const size_t percentDone = m_sizeInitDone * 100 / m_sizeInitTotal; std::cout << " " << percentDone << "%\r" << std::flush; if (sizeLeft) { cl_event event; const size_t sizeRun = std::min(sizeInitLimit, std::min(sizeLeft, m_worksizeMax)); const auto resEnqueue = clEnqueueNDRangeKernel(d.m_clQueue, d.m_kernelInit, 1, &d.m_sizeInitialized, &sizeRun, NULL, 0, NULL, &event); OpenCLException::throwIfError("kernel queueing failed during initilization", resEnqueue); // See: https://www.khronos.org/registry/OpenCL/sdk/1.2/docs/man/xhtml/clSetEventCallback.html // If an application needs to wait for completion of a routine from the above list in a callback, please use the non-blocking form of the function, and // assign a completion callback to it to do the remainder of your work. Note that when a callback (or other code) enqueues commands to a command-queue, // the commands are not required to begin execution until the queue is flushed. In standard usage, blocking enqueue calls serve this role by implicitly // flushing the queue. Since blocking calls are not permitted in callbacks, those callbacks that enqueue commands on a command queue should either call // clFlush on the queue before returning or arrange for clFlush to be called later on another thread. clFlush(d.m_clQueue); std::lock_guard<std::mutex> lock(m_mutex); d.m_sizeInitialized += sizeRun; m_sizeInitDone += sizeRun; const auto resCallback = clSetEventCallback(event, CL_COMPLETE, staticCallback, &d); OpenCLException::throwIfError("failed to set custom callback during initialization", resCallback); } else { // Printing one whole string at once helps in avoiding garbled output when executed in parallell const std::string strOutput = " GPU" + toString(d.m_index) + " initialized"; std::cout << strOutput << std::endl; clSetUserEventStatus(d.m_eventFinished, CL_COMPLETE); } }
377,201
0
std::vector<cl_device_id> getAllDevices(cl_device_type deviceType = CL_DEVICE_TYPE_GPU) { std::vector<cl_device_id> vDevices; cl_uint platformIdCount = 0; clGetPlatformIDs (0, NULL, &platformIdCount); std::vector<cl_platform_id> platformIds (platformIdCount); clGetPlatformIDs (platformIdCount, platformIds.data (), NULL); for( auto it = platformIds.cbegin(); it != platformIds.cend(); ++it ) { cl_uint countDevice; clGetDeviceIDs(*it, deviceType, 0, NULL, &countDevice); std::vector<cl_device_id> deviceIds(countDevice); clGetDeviceIDs(*it, deviceType, countDevice, deviceIds.data(), &countDevice); std::copy( deviceIds.begin(), deviceIds.end(), std::back_inserter(vDevices) ); } return vDevices; }
377,202
0
static void printResult(cl_ulong4 seed, cl_ulong round, result r, cl_uchar score, const std::chrono::time_point<std::chrono::steady_clock> & timeStart, const Mode & mode) { // Time delta const auto seconds = std::chrono::duration_cast<std::chrono::seconds>(std::chrono::steady_clock::now() - timeStart).count(); // Format private key cl_ulong carry = 0; cl_ulong4 seedRes; seedRes.s[0] = seed.s[0] + round; carry = seedRes.s[0] < round; seedRes.s[1] = seed.s[1] + carry; carry = !seedRes.s[1]; seedRes.s[2] = seed.s[2] + carry; carry = !seedRes.s[2]; seedRes.s[3] = seed.s[3] + carry + r.foundId; std::ostringstream ss; ss << std::hex << std::setfill('0'); ss << std::setw(16) << seedRes.s[3] << std::setw(16) << seedRes.s[2] << std::setw(16) << seedRes.s[1] << std::setw(16) << seedRes.s[0]; const std::string strPrivate = ss.str(); // Format public key const std::string strPublic = toHex(r.foundHash, 20); // Print const std::string strVT100ClearLine = "\33[2K\r"; std::cout << strVT100ClearLine << " Time: " << std::setw(5) << seconds << "s Score: " << std::setw(2) << (int) score << " Private: 0x" << strPrivate << ' '; std::cout << mode.transformName(); std::cout << ": 0x" << strPublic << std::endl; }
377,203
0
template <typename T> bool printResult(const T & t, const cl_int & err) { std::cout << ((t == NULL) ? toString(err) : "OK") << std::endl; return t == NULL; }
377,207
0
void CL_CALLBACK Dispatcher::staticCallback(cl_event event, cl_int event_command_exec_status, void * user_data) { Device * const pDevice = static_cast<Device *>(user_data); pDevice->m_parent.onEvent(event, event_command_exec_status, *pDevice); clReleaseEvent(event); }
377,208
0
int main(int argc, char * * argv) { // THIS LINE WILL LEAD TO A COMPILE ERROR. THIS TOOL SHOULD NOT BE USED, SEE README. try { ArgParser argp(argc, argv); bool bHelp = false; bool bModeBenchmark = false; bool bModeZeros = false; bool bModeLetters = false; bool bModeNumbers = false; std::string strModeLeading; std::string strModeMatching; bool bModeLeadingRange = false; bool bModeRange = false; bool bModeMirror = false; bool bModeDoubles = false; int rangeMin = 0; int rangeMax = 0; std::vector<size_t> vDeviceSkipIndex; size_t worksizeLocal = 64; size_t worksizeMax = 0; // Will be automatically determined later if not overriden by user bool bNoCache = false; size_t inverseSize = 255; size_t inverseMultiple = 16384; bool bMineContract = false; argp.addSwitch('h', "help", bHelp); argp.addSwitch('0', "benchmark", bModeBenchmark); argp.addSwitch('1', "zeros", bModeZeros); argp.addSwitch('2', "letters", bModeLetters); argp.addSwitch('3', "numbers", bModeNumbers); argp.addSwitch('4', "leading", strModeLeading); argp.addSwitch('5', "matching", strModeMatching); argp.addSwitch('6', "leading-range", bModeLeadingRange); argp.addSwitch('7', "range", bModeRange); argp.addSwitch('8', "mirror", bModeMirror); argp.addSwitch('9', "leading-doubles", bModeDoubles); argp.addSwitch('m', "min", rangeMin); argp.addSwitch('M', "max", rangeMax); argp.addMultiSwitch('s', "skip", vDeviceSkipIndex); argp.addSwitch('w', "work", worksizeLocal); argp.addSwitch('W', "work-max", worksizeMax); argp.addSwitch('n', "no-cache", bNoCache); argp.addSwitch('i', "inverse-size", inverseSize); argp.addSwitch('I', "inverse-multiple", inverseMultiple); argp.addSwitch('c', "contract", bMineContract); if (!argp.parse()) { std::cout << "error: bad arguments, try again :<" << std::endl; return 1; } if (bHelp) { std::cout << g_strHelp << std::endl; return 0; } Mode mode = Mode::benchmark(); if (bModeBenchmark) { mode = Mode::benchmark(); } else if (bModeZeros) { mode = Mode::zeros(); } else if (bModeLetters) { mode = Mode::letters(); } else if (bModeNumbers) { mode = Mode::numbers(); } else if (!strModeLeading.empty()) { mode = Mode::leading(strModeLeading.front()); } else if (!strModeMatching.empty()) { mode = Mode::matching(strModeMatching); } else if (bModeLeadingRange) { mode = Mode::leadingRange(rangeMin, rangeMax); } else if (bModeRange) { mode = Mode::range(rangeMin, rangeMax); } else if(bModeMirror) { mode = Mode::mirror(); } else if (bModeDoubles) { mode = Mode::doubles(); } else { std::cout << g_strHelp << std::endl; return 0; } std::cout << "Mode: " << mode.name << std::endl; if (bMineContract) { mode.target = CONTRACT; } else { mode.target = ADDRESS; } std::cout << "Target: " << mode.transformName() << std:: endl; std::vector<cl_device_id> vFoundDevices = getAllDevices(); std::vector<cl_device_id> vDevices; std::map<cl_device_id, size_t> mDeviceIndex; std::vector<std::string> vDeviceBinary; std::vector<size_t> vDeviceBinarySize; cl_int errorCode; bool bUsedCache = false; std::cout << "Devices:" << std::endl; for (size_t i = 0; i < vFoundDevices.size(); ++i) { // Ignore devices in skip index if (std::find(vDeviceSkipIndex.begin(), vDeviceSkipIndex.end(), i) != vDeviceSkipIndex.end()) { continue; } cl_device_id & deviceId = vFoundDevices[i]; const auto strName = clGetWrapperString(clGetDeviceInfo, deviceId, CL_DEVICE_NAME); const auto computeUnits = clGetWrapper<cl_uint>(clGetDeviceInfo, deviceId, CL_DEVICE_MAX_COMPUTE_UNITS); const auto globalMemSize = clGetWrapper<cl_ulong>(clGetDeviceInfo, deviceId, CL_DEVICE_GLOBAL_MEM_SIZE); bool precompiled = false; // Check if there's a prebuilt binary for this device and load it if(!bNoCache) { std::ifstream fileIn(getDeviceCacheFilename(deviceId, inverseSize), std::ios::binary); if (fileIn.is_open()) { vDeviceBinary.push_back(std::string((std::istreambuf_iterator<char>(fileIn)), std::istreambuf_iterator<char>())); vDeviceBinarySize.push_back(vDeviceBinary.back().size()); precompiled = true; } } std::cout << " GPU" << i << ": " << strName << ", " << globalMemSize << " bytes available, " << computeUnits << " compute units (precompiled = " << (precompiled ? "yes" : "no") << ")" << std::endl; vDevices.push_back(vFoundDevices[i]); mDeviceIndex[vFoundDevices[i]] = i; } if (vDevices.empty()) { return 1; } std::cout << std::endl; std::cout << "Initializing OpenCL..." << std::endl; std::cout << " Creating context..." << std::flush; auto clContext = clCreateContext( NULL, vDevices.size(), vDevices.data(), NULL, NULL, &errorCode); if (printResult(clContext, errorCode)) { return 1; } cl_program clProgram; if (vDeviceBinary.size() == vDevices.size()) { // Create program from binaries bUsedCache = true; std::cout << " Loading kernel from binary..." << std::flush; const unsigned char * * pKernels = new const unsigned char *[vDevices.size()]; for (size_t i = 0; i < vDeviceBinary.size(); ++i) { pKernels[i] = reinterpret_cast<const unsigned char *>(vDeviceBinary[i].data()); } cl_int * pStatus = new cl_int[vDevices.size()]; clProgram = clCreateProgramWithBinary(clContext, vDevices.size(), vDevices.data(), vDeviceBinarySize.data(), pKernels, pStatus, &errorCode); if(printResult(clProgram, errorCode)) { return 1; } } else { // Create a program from the kernel source std::cout << " Compiling kernel..." << std::flush; const std::string strKeccak = readFile("keccak.cl"); const std::string strVanity = readFile("profanity.cl"); const char * szKernels[] = { strKeccak.c_str(), strVanity.c_str() }; clProgram = clCreateProgramWithSource(clContext, sizeof(szKernels) / sizeof(char *), szKernels, NULL, &errorCode); if (printResult(clProgram, errorCode)) { return 1; } } // Build the program std::cout << " Building program..." << std::flush; const std::string strBuildOptions = "-D PROFANITY_INVERSE_SIZE=" + toString(inverseSize) + " -D PROFANITY_MAX_SCORE=" + toString(PROFANITY_MAX_SCORE); if (printResult(clBuildProgram(clProgram, vDevices.size(), vDevices.data(), strBuildOptions.c_str(), NULL, NULL))) { #ifdef PROFANITY_DEBUG std::cout << std::endl; std::cout << "build log:" << std::endl; size_t sizeLog; clGetProgramBuildInfo(clProgram, vDevices[0], CL_PROGRAM_BUILD_LOG, 0, NULL, &sizeLog); char * const szLog = new char[sizeLog]; clGetProgramBuildInfo(clProgram, vDevices[0], CL_PROGRAM_BUILD_LOG, sizeLog, szLog, NULL); std::cout << szLog << std::endl; delete[] szLog; #endif return 1; } // Save binary to improve future start times if( !bUsedCache && !bNoCache ) { std::cout << " Saving program..." << std::flush; auto binaries = getBinaries(clProgram); for (size_t i = 0; i < binaries.size(); ++i) { std::ofstream fileOut(getDeviceCacheFilename(vDevices[i], inverseSize), std::ios::binary); fileOut.write(binaries[i].data(), binaries[i].size()); } std::cout << "OK" << std::endl; } std::cout << std::endl; Dispatcher d(clContext, clProgram, mode, worksizeMax == 0 ? inverseSize * inverseMultiple : worksizeMax, inverseSize, inverseMultiple, 0); for (auto & i : vDevices) { d.addDevice(i, worksizeLocal, mDeviceIndex[i]); } d.run(); clReleaseContext(clContext); return 0; } catch (std::runtime_error & e) { std::cout << "std::runtime_error - " << e.what() << std::endl; } catch (...) { std::cout << "unknown exception occured" << std::endl; } return 1; }
377,209
0
Dispatcher::Device::Device(Dispatcher & parent, cl_context & clContext, cl_program & clProgram, cl_device_id clDeviceId, const size_t worksizeLocal, const size_t size, const size_t index, const Mode & mode) : m_parent(parent), m_index(index), m_clDeviceId(clDeviceId), m_worksizeLocal(worksizeLocal), m_clScoreMax(0), m_clQueue(createQueue(clContext, clDeviceId) ), m_kernelInit( createKernel(clProgram, "profanity_init") ), m_kernelInverse(createKernel(clProgram, "profanity_inverse")), m_kernelIterate(createKernel(clProgram, "profanity_iterate")), m_kernelTransform( mode.transformKernel() == "" ? NULL : createKernel(clProgram, mode.transformKernel())), m_kernelScore(createKernel(clProgram, mode.kernel)), m_memPrecomp(clContext, m_clQueue, CL_MEM_READ_ONLY | CL_MEM_HOST_WRITE_ONLY, sizeof(g_precomp), g_precomp), m_memPointsDeltaX(clContext, m_clQueue, CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS, size, true), m_memInversedNegativeDoubleGy(clContext, m_clQueue, CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS, size, true), m_memPrevLambda(clContext, m_clQueue, CL_MEM_READ_WRITE | CL_MEM_HOST_NO_ACCESS, size, true), m_memResult(clContext, m_clQueue, CL_MEM_READ_WRITE | CL_MEM_HOST_READ_ONLY, PROFANITY_MAX_SCORE + 1), m_memData1(clContext, m_clQueue, CL_MEM_READ_ONLY | CL_MEM_HOST_WRITE_ONLY, 20), m_memData2(clContext, m_clQueue, CL_MEM_READ_ONLY | CL_MEM_HOST_WRITE_ONLY, 20), m_clSeed(createSeed()), m_round(0), m_speed(PROFANITY_SPEEDSAMPLES), m_sizeInitialized(0), m_eventFinished(NULL) { }
377,210
0
Dispatcher::~Dispatcher() { }
377,211
0
void Dispatcher::enqueueKernel(cl_command_queue & clQueue, cl_kernel & clKernel, size_t worksizeGlobal, const size_t worksizeLocal, cl_event * pEvent = NULL) { const size_t worksizeMax = m_worksizeMax; size_t worksizeOffset = 0; while (worksizeGlobal) { const size_t worksizeRun = std::min(worksizeGlobal, worksizeMax); const size_t * const pWorksizeLocal = (worksizeLocal == 0 ? NULL : &worksizeLocal); const auto res = clEnqueueNDRangeKernel(clQueue, clKernel, 1, &worksizeOffset, &worksizeRun, pWorksizeLocal, 0, NULL, pEvent); OpenCLException::throwIfError("kernel queueing failed", res); worksizeGlobal -= worksizeRun; worksizeOffset += worksizeRun; } }
377,212
0
static std::string toHex(const uint8_t * const s, const size_t len) { std::string b("0123456789abcdef"); std::string r; for (size_t i = 0; i < len; ++i) { const unsigned char h = s[i] / 16; const unsigned char l = s[i] % 16; r = r + b.substr(h, 1) + b.substr(l, 1); } return r; }
377,213
0
cl_kernel Dispatcher::Device::createKernel(cl_program & clProgram, const std::string s) { cl_kernel ret = clCreateKernel(clProgram, s.c_str(), NULL); return ret == NULL ? throw std::runtime_error("failed to create kernel \"" + s + "\"") : ret; }
377,214
0
void Dispatcher::OpenCLException::OpenCLException::throwIfError(const std::string s, const cl_int res) { if (res != CL_SUCCESS) { throw OpenCLException(s, res); } }
377,215
0
makegamtab(float gam) { int i; for(i=0; i<256; i++) gamtab[i] = (unsigned short) (IMAX*pow(i/255.0,gam)+0.5); }
377,216
0
readraster(void) { unsigned char *fill = raster; unsigned char buf[255]; register int bits=0; register unsigned long datum=0; register unsigned char *ch; register int count, code; int status = 1; datasize = getc(infile); if (datasize > 12) return 0; clear = 1 << datasize; eoi = clear + 1; avail = clear + 2; oldcode = -1; codesize = datasize + 1; codemask = (1 << codesize) - 1; for (code = 0; code < clear; code++) { prefix[code] = 0; suffix[code] = code; } stackp = stack; for (count = getc(infile); count > 0; count = getc(infile)) { fread(buf,1,count,infile); for (ch=buf; count-- > 0; ch++) { datum += (unsigned long) *ch << bits; bits += 8; while (bits >= codesize) { code = datum & codemask; datum >>= codesize; bits -= codesize; if (code == eoi) { /* This kludge put in */ goto exitloop; /* because some GIF files*/ } /* aren't standard */ if (!process(code, &fill)) { status = 0; goto exitloop; } } } if (fill >= raster + width*height) { fprintf(stderr, "raster full before eoi code\n"); break; } } exitloop: if (fill != raster + width*height) { fprintf(stderr, "warning: wrong rastersize: %ld bytes\n", (long) (fill-raster)); fprintf(stderr, " instead of %ld bytes\n", (long) width*height); } return status; }
377,218
0
initcolors(unsigned char colormap[COLSIZE][3], int ncolors) { register int i; for (i = 0; i < ncolors; i++) { red[i] = gamtab[colormap[i][0]]; green[i] = gamtab[colormap[i][1]]; blue[i] = gamtab[colormap[i][2]]; } }
377,220
0
processCompressOptions(char* opt) { if (streq(opt, "none")) compression = COMPRESSION_NONE; else if (streq(opt, "packbits")) compression = COMPRESSION_PACKBITS; else if (strneq(opt, "lzw", 3)) { char* cp = strchr(opt, ':'); if (cp) predictor = atoi(cp+1); compression = COMPRESSION_LZW; } else if (strneq(opt, "zip", 3)) { char* cp = strchr(opt, ':'); if (cp) predictor = atoi(cp+1); compression = COMPRESSION_DEFLATE; } else return (0); return (1); }
377,221
0
convert(void) { int ch; char* mode = "w"; if (!checksignature()) return (-1); readscreen(); while ((ch = getc(infile)) != ';' && ch != EOF) { switch (ch) { case '\0': break; /* this kludge for non-standard files */ case ',': if (!readgifimage(mode)) return (-1); mode = "a"; /* subsequent images append */ break; case '!': readextension(); break; default: fprintf(stderr, "illegal GIF block type\n"); return (-1); } } return (0); }
377,222
0
rasterize(int interleaved, char* mode) { register unsigned long row; unsigned char *newras; unsigned char *ras; TIFF *tif; tstrip_t strip; tsize_t stripsize; if ((newras = (unsigned char*) _TIFFmalloc(width*height+EXTRAFUDGE)) == NULL) { fprintf(stderr, "not enough memory for image\n"); return; } #define DRAWSEGMENT(offset, step) { \ for (row = offset; row < height; row += step) { \ _TIFFmemcpy(newras + row*width, ras, width);\ ras += width; \ } \ } ras = raster; if (interleaved) { DRAWSEGMENT(0, 8); DRAWSEGMENT(4, 8); DRAWSEGMENT(2, 4); DRAWSEGMENT(1, 2); } else DRAWSEGMENT(0, 1); #undef DRAWSEGMENT tif = TIFFOpen(imagename, mode); if (!tif) { TIFFError(imagename,"Can not open output image"); exit(-1); } TIFFSetField(tif, TIFFTAG_IMAGEWIDTH, (uint32) width); TIFFSetField(tif, TIFFTAG_IMAGELENGTH, (uint32) height); TIFFSetField(tif, TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_PALETTE); TIFFSetField(tif, TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG); TIFFSetField(tif, TIFFTAG_SAMPLESPERPIXEL, 1); TIFFSetField(tif, TIFFTAG_BITSPERSAMPLE, 8); TIFFSetField(tif, TIFFTAG_ROWSPERSTRIP, rowsperstrip = TIFFDefaultStripSize(tif, rowsperstrip)); TIFFSetField(tif, TIFFTAG_COMPRESSION, compression); switch (compression) { case COMPRESSION_LZW: case COMPRESSION_DEFLATE: if (predictor != 0) TIFFSetField(tif, TIFFTAG_PREDICTOR, predictor); break; } TIFFSetField(tif, TIFFTAG_COLORMAP, red, green, blue); TIFFSetField(tif, TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT); strip = 0; stripsize = TIFFStripSize(tif); for (row=0; row<height; row += rowsperstrip) { if (rowsperstrip > height-row) { rowsperstrip = height-row; stripsize = TIFFVStripSize(tif, rowsperstrip); } if (TIFFWriteEncodedStrip(tif, strip, newras+row*width, stripsize) < 0) break; strip++; } TIFFClose(tif); _TIFFfree(newras); }
377,224
0
main(int argc, char* argv[]) { extern int optind; extern char *optarg; int c, status; while ((c = getopt(argc, argv, "c:r:")) != -1) switch (c) { case 'c': /* compression scheme */ if (!processCompressOptions(optarg)) usage(); break; case 'r': /* rows/strip */ rowsperstrip = atoi(optarg); break; case '?': usage(); /*NOTREACHED*/ } if (argc - optind != 2) usage(); makegamtab(GIFGAMMA); filename = argv[optind]; imagename = argv[optind+1]; if ((infile = fopen(imagename, "rb")) != NULL) { int c; fclose(infile); printf("overwrite %s? ", imagename); fflush(stdout); c = getc(stdin); if (c != 'y' && c != 'Y') return (1); } if ((infile = fopen(filename, "rb")) == NULL) { perror(filename); return (1); } status = convert(); fclose(infile); return (status); }
377,225
0
checksignature(void) { char buf[6]; fread(buf,1,6,infile); if (strncmp(buf,"GIF",3)) { fprintf(stderr, "file is not a GIF file\n"); return 0; } if (strncmp(&buf[3],"87a",3)) { fprintf(stderr, "unknown GIF version number\n"); return 0; } return 1; }
377,226
0
readgifimage(char* mode) { unsigned char buf[9]; int local, interleaved; unsigned char localmap[256][3]; int localbits; int status; if (fread(buf, 1, 9, infile) == 0) { perror(filename); return (0); } width = buf[4] + (buf[5] << 8); height = buf[6] + (buf[7] << 8); local = buf[8] & 0x80; interleaved = buf[8] & 0x40; if (local == 0 && global == 0) { fprintf(stderr, "no colormap present for image\n"); return (0); } if ((raster = (unsigned char*) _TIFFmalloc(width*height+EXTRAFUDGE)) == NULL) { fprintf(stderr, "not enough memory for image\n"); return (0); } if (local) { localbits = (buf[8] & 0x7) + 1; fprintf(stderr, " local colors: %d\n", 1<<localbits); fread(localmap, 3, ((size_t)1)<<localbits, infile); initcolors(localmap, 1<<localbits); } else if (global) { initcolors(globalmap, 1<<globalbits); } if ((status = readraster())) rasterize(interleaved, mode); _TIFFfree(raster); return status; }
377,227
0
inline void set_stmt_row_injection() { DBUG_ENTER("set_stmt_row_injection"); binlog_stmt_flags|= (1U << BINLOG_STMT_TYPE_ROW_INJECTION); DBUG_VOID_RETURN; }
377,228
0
bool vers_history_generating() const { switch (sql_command) { case SQLCOM_DELETE: return !vers_conditions.delete_history; case SQLCOM_UPDATE: case SQLCOM_UPDATE_MULTI: case SQLCOM_DELETE_MULTI: case SQLCOM_REPLACE: case SQLCOM_REPLACE_SELECT: return true; case SQLCOM_INSERT: case SQLCOM_INSERT_SELECT: return duplicates == DUP_UPDATE; case SQLCOM_LOAD: return duplicates == DUP_REPLACE; default: return false; } }
377,229
0
void LEX::relink_hack(st_select_lex *select_lex) { if (!select_stack_top) // Statements of the second type { if (!select_lex->outer_select() && !builtin_select.first_inner_unit()) { builtin_select.register_unit(select_lex->master_unit(), &builtin_select.context); builtin_select.add_statistics(select_lex->master_unit()); } } }
377,230
0
int Lex_input_stream::scan_ident_delimited(THD *thd, Lex_ident_cli_st *str, uchar quote_char) { CHARSET_INFO *const cs= thd->charset(); uchar c; DBUG_ASSERT(m_ptr == m_tok_start + 1); for ( ; ; ) { if (!(c= yyGet())) { /* End-of-query or straight 0x00 inside a delimited identifier. Return the quote character, to have the parser fail on syntax error. */ m_ptr= (char *) m_tok_start + 1; if (m_echo) m_cpp_ptr= (char *) m_cpp_tok_start + 1; return quote_char; } int var_length= my_charlen(cs, get_ptr() - 1, get_end_of_query()); if (var_length == 1) { if (c == quote_char) { if (yyPeek() != quote_char) break; c= yyGet(); continue; } } else if (var_length > 1) { skip_binary(var_length - 1); } } str->set_ident_quoted(m_tok_start + 1, yyLength() - 1, true, quote_char); yyUnget(); // ptr points now after last token char m_cpp_text_start= m_cpp_tok_start + 1; m_cpp_text_end= m_cpp_text_start + str->length; if (c == quote_char) yySkip(); // Skip end ` next_state= MY_LEX_START; body_utf8_append(m_cpp_text_start); // QQQ: shouldn't it add unescaped version ???? body_utf8_append_ident(thd, str, m_cpp_text_end); return IDENT_QUOTED; }
377,231
0
init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex) { TABLE_LIST *table_list; Table_ident *table_ident; SELECT_LEX *select_lex= lex->first_select_lex(); Name_resolution_context *context= &select_lex->context; /* We will call the parser to create a part_info struct based on the partition string stored in the frm file. We will use a local lex object for this purpose. However we also need to set the Name_resolution_object for this lex object. We do this by using add_table_to_list where we add the table that we're working with to the Name_resolution_context. */ thd->lex= lex; lex_start(thd); context->init(); if (unlikely((!(table_ident= new Table_ident(thd, &table->s->db, &table->s->table_name, TRUE)))) || (unlikely(!(table_list= select_lex->add_table_to_list(thd, table_ident, NULL, 0))))) return TRUE; context->resolve_in_table_list_only(table_list); lex->use_only_table_context= TRUE; select_lex->cur_pos_in_select_list= UNDEF_POS; table->map= 1; //To ensure correct calculation of const item table_list->table= table; table_list->cacheable_table= false; lex->create_last_non_select_table= table_list; return FALSE; }
377,232
0
SELECT_LEX *parser_current_outer_select() { return select_stack_top - 1 == select_stack_outer_barrier ? 0 : select_stack[select_stack_top - 2]; }
377,233
0
bool LEX::maybe_start_compound_statement(THD *thd) { if (!sphead) { if (!make_sp_head(thd, NULL, &sp_handler_procedure, DEFAULT_AGGREGATE)) return true; sphead->set_suid(SP_IS_NOT_SUID); sphead->set_body_start(thd, thd->m_parser_state->m_lip.get_cpp_tok_start()); } return false; }
377,235
0
static int ssl3_record_app_data_waiting(SSL *s) { SSL3_BUFFER *rbuf; int left, len; unsigned char *p; rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); p = SSL3_BUFFER_get_buf(rbuf); if (p == NULL) return 0; left = SSL3_BUFFER_get_left(rbuf); if (left < SSL3_RT_HEADER_LENGTH) return 0; p += SSL3_BUFFER_get_offset(rbuf); /* * We only check the type and record length, we will sanity check version * etc later */ if (*p != SSL3_RT_APPLICATION_DATA) return 0; p += 3; n2s(p, len); if (left < SSL3_RT_HEADER_LENGTH + len) return 0; return 1; }
377,237
0
int tls1_set_curves_list(unsigned char **pext, size_t *pextlen, const char *str) { nid_cb_st ncb; ncb.nidcnt = 0; if (!CONF_parse_list(str, ':', 1, nid_cb, &ncb)) return 0; if (pext == NULL) return 1; return tls1_set_curves(pext, pextlen, ncb.nid_arr, ncb.nidcnt); }
377,238
0
void SSL3_RECORD_clear(SSL3_RECORD *r, unsigned int num_recs) { unsigned char *comp; unsigned int i; for (i = 0; i < num_recs; i++) { comp = r[i].comp; memset(&r[i], 0, sizeof(*r)); r[i].comp = comp; } }
377,239
0
void ssl3_cbc_copy_mac(unsigned char *out, const SSL3_RECORD *rec, unsigned md_size) { #if defined(CBC_MAC_ROTATE_IN_PLACE) unsigned char rotated_mac_buf[64 + EVP_MAX_MD_SIZE]; unsigned char *rotated_mac; #else unsigned char rotated_mac[EVP_MAX_MD_SIZE]; #endif /* * mac_end is the index of |rec->data| just after the end of the MAC. */ unsigned mac_end = rec->length; unsigned mac_start = mac_end - md_size; unsigned in_mac; /* * scan_start contains the number of bytes that we can ignore because the * MAC's position can only vary by 255 bytes. */ unsigned scan_start = 0; unsigned i, j; unsigned rotate_offset; OPENSSL_assert(rec->orig_len >= md_size); OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); #if defined(CBC_MAC_ROTATE_IN_PLACE) rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf) & 63); #endif /* This information is public so it's safe to branch based on it. */ if (rec->orig_len > md_size + 255 + 1) scan_start = rec->orig_len - (md_size + 255 + 1); in_mac = 0; rotate_offset = 0; memset(rotated_mac, 0, md_size); for (i = scan_start, j = 0; i < rec->orig_len; i++) { unsigned mac_started = constant_time_eq(i, mac_start); unsigned mac_ended = constant_time_lt(i, mac_end); unsigned char b = rec->data[i]; in_mac |= mac_started; in_mac &= mac_ended; rotate_offset |= j & mac_started; rotated_mac[j++] |= b & in_mac; j &= constant_time_lt(j, md_size); } /* Now rotate the MAC */ #if defined(CBC_MAC_ROTATE_IN_PLACE) j = 0; for (i = 0; i < md_size; i++) { /* in case cache-line is 32 bytes, touch second line */ ((volatile unsigned char *)rotated_mac)[rotate_offset ^ 32]; out[j++] = rotated_mac[rotate_offset++]; rotate_offset &= constant_time_lt(rotate_offset, md_size); } #else memset(out, 0, md_size); rotate_offset = md_size - rotate_offset; rotate_offset &= constant_time_lt(rotate_offset, md_size); for (i = 0; i < md_size; i++) { for (j = 0; j < md_size; j++) out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); rotate_offset++; rotate_offset &= constant_time_lt(rotate_offset, md_size); } #endif }
377,240
0
int tls1_cbc_remove_padding(const SSL *s, SSL3_RECORD *rec, unsigned block_size, unsigned mac_size) { unsigned padding_length, good, to_check, i; const unsigned overhead = 1 /* padding length byte */ + mac_size; /* Check if version requires explicit IV */ if (SSL_USE_EXPLICIT_IV(s)) { /* * These lengths are all public so we can test them in non-constant * time. */ if (overhead + block_size > rec->length) return 0; /* We can now safely skip explicit IV */ rec->data += block_size; rec->input += block_size; rec->length -= block_size; rec->orig_len -= block_size; } else if (overhead > rec->length) return 0; padding_length = rec->data[rec->length - 1]; if (EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_read_ctx)) & EVP_CIPH_FLAG_AEAD_CIPHER) { /* padding is already verified */ rec->length -= padding_length + 1; return 1; } good = constant_time_ge(rec->length, overhead + padding_length); /* * The padding consists of a length byte at the end of the record and * then that many bytes of padding, all with the same value as the length * byte. Thus, with the length byte included, there are i+1 bytes of * padding. We can't check just |padding_length+1| bytes because that * leaks decrypted information. Therefore we always have to check the * maximum amount of padding possible. (Again, the length of the record * is public information so we can use it.) */ to_check = 256; /* maximum amount of padding, inc length byte. */ if (to_check > rec->length) to_check = rec->length; for (i = 0; i < to_check; i++) { unsigned char mask = constant_time_ge_8(padding_length, i); unsigned char b = rec->data[rec->length - 1 - i]; /* * The final |padding_length+1| bytes should all have the value * |padding_length|. Therefore the XOR should be zero. */ good &= ~(mask & (padding_length ^ b)); } /* * If any of the final |padding_length+1| bytes had the wrong value, one * or more of the lower eight bits of |good| will be cleared. */ good = constant_time_eq(0xff, good & 0xff); rec->length -= good & (padding_length + 1); return constant_time_select_int(good, 1, -1); }
377,244
0
int tls1_generate_master_secret(SSL *s, unsigned char *out, unsigned char *p, int len) { if (s->session->flags & SSL_SESS_FLAG_EXTMS) { unsigned char hash[EVP_MAX_MD_SIZE * 2]; int hashlen; /* * Digest cached records keeping record buffer (if present): this wont * affect client auth because we're freezing the buffer at the same * point (after client key exchange and before certificate verify) */ if (!ssl3_digest_cached_records(s, 1)) return -1; hashlen = ssl_handshake_hash(s, hash, sizeof(hash)); #ifdef SSL_DEBUG fprintf(stderr, "Handshake hashes:\n"); BIO_dump_fp(stderr, (char *)hash, hashlen); #endif tls1_PRF(s, TLS_MD_EXTENDED_MASTER_SECRET_CONST, TLS_MD_EXTENDED_MASTER_SECRET_CONST_SIZE, hash, hashlen, NULL, 0, NULL, 0, NULL, 0, p, len, s->session->master_key, SSL3_MASTER_SECRET_SIZE); OPENSSL_cleanse(hash, hashlen); } else { tls1_PRF(s, TLS_MD_MASTER_SECRET_CONST, TLS_MD_MASTER_SECRET_CONST_SIZE, s->s3->client_random, SSL3_RANDOM_SIZE, NULL, 0, s->s3->server_random, SSL3_RANDOM_SIZE, NULL, 0, p, len, s->session->master_key, SSL3_MASTER_SECRET_SIZE); } #ifdef SSL_DEBUG fprintf(stderr, "Premaster Secret:\n"); BIO_dump_fp(stderr, (char *)p, len); fprintf(stderr, "Client Random:\n"); BIO_dump_fp(stderr, (char *)s->s3->client_random, SSL3_RANDOM_SIZE); fprintf(stderr, "Server Random:\n"); BIO_dump_fp(stderr, (char *)s->s3->server_random, SSL3_RANDOM_SIZE); fprintf(stderr, "Master Secret:\n"); BIO_dump_fp(stderr, (char *)s->session->master_key, SSL3_MASTER_SECRET_SIZE); #endif #ifdef OPENSSL_SSL_TRACE_CRYPTO if (s->msg_callback) { s->msg_callback(2, s->version, TLS1_RT_CRYPTO_PREMASTER, p, len, s, s->msg_callback_arg); s->msg_callback(2, s->version, TLS1_RT_CRYPTO_CLIENT_RANDOM, s->s3->client_random, SSL3_RANDOM_SIZE, s, s->msg_callback_arg); s->msg_callback(2, s->version, TLS1_RT_CRYPTO_SERVER_RANDOM, s->s3->server_random, SSL3_RANDOM_SIZE, s, s->msg_callback_arg); s->msg_callback(2, s->version, TLS1_RT_CRYPTO_MASTER, s->session->master_key, SSL3_MASTER_SECRET_SIZE, s, s->msg_callback_arg); } #endif return (SSL3_MASTER_SECRET_SIZE); }
377,245
0
static int ssl_scan_serverhello_tlsext(SSL *s, PACKET *pkt, int *al) { unsigned int length, type, size; int tlsext_servername = 0; int renegotiate_seen = 0; #ifndef OPENSSL_NO_NEXTPROTONEG s->s3->next_proto_neg_seen = 0; #endif s->tlsext_ticket_expected = 0; OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = NULL; #ifndef OPENSSL_NO_HEARTBEATS s->tlsext_heartbeat &= ~(SSL_DTLSEXT_HB_ENABLED | SSL_DTLSEXT_HB_DONT_SEND_REQUESTS); #endif s->tlsext_use_etm = 0; s->s3->flags &= ~TLS1_FLAGS_RECEIVED_EXTMS; if (!PACKET_get_net_2(pkt, &length)) goto ri_check; if (PACKET_remaining(pkt) != length) { *al = SSL_AD_DECODE_ERROR; return 0; } if (!tls1_check_duplicate_extensions(pkt)) { *al = SSL_AD_DECODE_ERROR; return 0; } while (PACKET_get_net_2(pkt, &type) && PACKET_get_net_2(pkt, &size)) { const unsigned char *data; PACKET spkt; if (!PACKET_get_sub_packet(pkt, &spkt, size) || !PACKET_peek_bytes(&spkt, &data, size)) goto ri_check; if (s->tlsext_debug_cb) s->tlsext_debug_cb(s, 1, type, data, size, s->tlsext_debug_arg); if (type == TLSEXT_TYPE_renegotiate) { if (!ssl_parse_serverhello_renegotiate_ext(s, &spkt, al)) return 0; renegotiate_seen = 1; } else if (s->version == SSL3_VERSION) { } else if (type == TLSEXT_TYPE_server_name) { if (s->tlsext_hostname == NULL || size > 0) { *al = TLS1_AD_UNRECOGNIZED_NAME; return 0; } tlsext_servername = 1; } #ifndef OPENSSL_NO_EC else if (type == TLSEXT_TYPE_ec_point_formats) { unsigned int ecpointformatlist_length; if (!PACKET_get_1(&spkt, &ecpointformatlist_length) || ecpointformatlist_length != size - 1) { *al = TLS1_AD_DECODE_ERROR; return 0; } if (!s->hit) { s->session->tlsext_ecpointformatlist_length = 0; OPENSSL_free(s->session->tlsext_ecpointformatlist); if ((s->session->tlsext_ecpointformatlist = OPENSSL_malloc(ecpointformatlist_length)) == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } s->session->tlsext_ecpointformatlist_length = ecpointformatlist_length; if (!PACKET_copy_bytes(&spkt, s->session->tlsext_ecpointformatlist, ecpointformatlist_length)) { *al = TLS1_AD_DECODE_ERROR; return 0; } } } #endif /* OPENSSL_NO_EC */ else if (type == TLSEXT_TYPE_session_ticket) { if (s->tls_session_ticket_ext_cb && !s->tls_session_ticket_ext_cb(s, data, size, s->tls_session_ticket_ext_cb_arg)) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } if (!tls_use_ticket(s) || (size > 0)) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } s->tlsext_ticket_expected = 1; } else if (type == TLSEXT_TYPE_status_request) { /* * MUST be empty and only sent if we've requested a status * request message. */ if ((s->tlsext_status_type == -1) || (size > 0)) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } /* Set flag to expect CertificateStatus message */ s->tlsext_status_expected = 1; } #ifndef OPENSSL_NO_CT /* * Only take it if we asked for it - i.e if there is no CT validation * callback set, then a custom extension MAY be processing it, so we * need to let control continue to flow to that. */ else if (type == TLSEXT_TYPE_signed_certificate_timestamp && s->ct_validation_callback != NULL) { /* Simply copy it off for later processing */ if (s->tlsext_scts != NULL) { OPENSSL_free(s->tlsext_scts); s->tlsext_scts = NULL; } s->tlsext_scts_len = size; if (size > 0) { s->tlsext_scts = OPENSSL_malloc(size); if (s->tlsext_scts == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } memcpy(s->tlsext_scts, data, size); } } #endif #ifndef OPENSSL_NO_NEXTPROTONEG else if (type == TLSEXT_TYPE_next_proto_neg && s->s3->tmp.finish_md_len == 0) { unsigned char *selected; unsigned char selected_len; /* We must have requested it. */ if (s->ctx->next_proto_select_cb == NULL) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } /* The data must be valid */ if (!ssl_next_proto_validate(&spkt)) { *al = TLS1_AD_DECODE_ERROR; return 0; } if (s->ctx->next_proto_select_cb(s, &selected, &selected_len, data, size, s-> ctx->next_proto_select_cb_arg) != SSL_TLSEXT_ERR_OK) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } /* * Could be non-NULL if server has sent multiple NPN extensions in * a single Serverhello */ OPENSSL_free(s->next_proto_negotiated); s->next_proto_negotiated = OPENSSL_malloc(selected_len); if (s->next_proto_negotiated == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } memcpy(s->next_proto_negotiated, selected, selected_len); s->next_proto_negotiated_len = selected_len; s->s3->next_proto_neg_seen = 1; } #endif else if (type == TLSEXT_TYPE_application_layer_protocol_negotiation) { unsigned len; /* We must have requested it. */ if (!s->s3->alpn_sent) { *al = TLS1_AD_UNSUPPORTED_EXTENSION; return 0; } /*- * The extension data consists of: * uint16 list_length * uint8 proto_length; * uint8 proto[proto_length]; */ if (!PACKET_get_net_2(&spkt, &len) || PACKET_remaining(&spkt) != len || !PACKET_get_1(&spkt, &len) || PACKET_remaining(&spkt) != len) { *al = TLS1_AD_DECODE_ERROR; return 0; } OPENSSL_free(s->s3->alpn_selected); s->s3->alpn_selected = OPENSSL_malloc(len); if (s->s3->alpn_selected == NULL) { *al = TLS1_AD_INTERNAL_ERROR; return 0; } if (!PACKET_copy_bytes(&spkt, s->s3->alpn_selected, len)) { *al = TLS1_AD_DECODE_ERROR; return 0; } s->s3->alpn_selected_len = len; } #ifndef OPENSSL_NO_HEARTBEATS else if (SSL_IS_DTLS(s) && type == TLSEXT_TYPE_heartbeat) { unsigned int hbtype; if (!PACKET_get_1(&spkt, &hbtype)) { *al = SSL_AD_DECODE_ERROR; return 0; } switch (hbtype) { case 0x01: /* Server allows us to send HB requests */ s->tlsext_heartbeat |= SSL_DTLSEXT_HB_ENABLED; break; case 0x02: /* Server doesn't accept HB requests */ s->tlsext_heartbeat |= SSL_DTLSEXT_HB_ENABLED; s->tlsext_heartbeat |= SSL_DTLSEXT_HB_DONT_SEND_REQUESTS; break; default: *al = SSL_AD_ILLEGAL_PARAMETER; return 0; } } #endif #ifndef OPENSSL_NO_SRTP else if (SSL_IS_DTLS(s) && type == TLSEXT_TYPE_use_srtp) { if (ssl_parse_serverhello_use_srtp_ext(s, &spkt, al)) return 0; } #endif else if (type == TLSEXT_TYPE_encrypt_then_mac) { /* Ignore if inappropriate ciphersuite */ if (s->s3->tmp.new_cipher->algorithm_mac != SSL_AEAD && s->s3->tmp.new_cipher->algorithm_enc != SSL_RC4) s->tlsext_use_etm = 1; } else if (type == TLSEXT_TYPE_extended_master_secret) { s->s3->flags |= TLS1_FLAGS_RECEIVED_EXTMS; if (!s->hit) s->session->flags |= SSL_SESS_FLAG_EXTMS; } /* * If this extension type was not otherwise handled, but matches a * custom_cli_ext_record, then send it to the c callback */ else if (custom_ext_parse(s, 0, type, data, size, al) <= 0) return 0; } if (PACKET_remaining(pkt) != 0) { *al = SSL_AD_DECODE_ERROR; return 0; } if (!s->hit && tlsext_servername == 1) { if (s->tlsext_hostname) { if (s->session->tlsext_hostname == NULL) { s->session->tlsext_hostname = OPENSSL_strdup(s->tlsext_hostname); if (!s->session->tlsext_hostname) { *al = SSL_AD_UNRECOGNIZED_NAME; return 0; } } else { *al = SSL_AD_DECODE_ERROR; return 0; } } } ri_check: /* * Determine if we need to see RI. Strictly speaking if we want to avoid * an attack we should *always* see RI even on initial server hello * because the client doesn't see any renegotiation during an attack. * However this would mean we could not connect to any server which * doesn't support RI so for the immediate future tolerate RI absence */ if (!renegotiate_seen && !(s->options & SSL_OP_LEGACY_SERVER_CONNECT) && !(s->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL_SCAN_SERVERHELLO_TLSEXT, SSL_R_UNSAFE_LEGACY_RENEGOTIATION_DISABLED); return 0; } if (s->hit) { /* * Check extended master secret extension is consistent with * original session. */ if (!(s->s3->flags & TLS1_FLAGS_RECEIVED_EXTMS) != !(s->session->flags & SSL_SESS_FLAG_EXTMS)) { *al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL_SCAN_SERVERHELLO_TLSEXT, SSL_R_INCONSISTENT_EXTMS); return 0; } } return 1; }
377,246
0
int tls1_mac(SSL *ssl, SSL3_RECORD *rec, unsigned char *md, int send) { unsigned char *seq; EVP_MD_CTX *hash; size_t md_size; int i; EVP_MD_CTX *hmac = NULL, *mac_ctx; unsigned char header[13]; int stream_mac = (send ? (ssl->mac_flags & SSL_MAC_FLAG_WRITE_MAC_STREAM) : (ssl->mac_flags & SSL_MAC_FLAG_READ_MAC_STREAM)); int t; if (send) { seq = RECORD_LAYER_get_write_sequence(&ssl->rlayer); hash = ssl->write_hash; } else { seq = RECORD_LAYER_get_read_sequence(&ssl->rlayer); hash = ssl->read_hash; } t = EVP_MD_CTX_size(hash); OPENSSL_assert(t >= 0); md_size = t; /* I should fix this up TLS TLS TLS TLS TLS XXXXXXXX */ if (stream_mac) { mac_ctx = hash; } else { hmac = EVP_MD_CTX_new(); if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) return -1; mac_ctx = hmac; } if (SSL_IS_DTLS(ssl)) { unsigned char dtlsseq[8], *p = dtlsseq; s2n(send ? DTLS_RECORD_LAYER_get_w_epoch(&ssl->rlayer) : DTLS_RECORD_LAYER_get_r_epoch(&ssl->rlayer), p); memcpy(p, &seq[2], 6); memcpy(header, dtlsseq, 8); } else memcpy(header, seq, 8); header[8] = rec->type; header[9] = (unsigned char)(ssl->version >> 8); header[10] = (unsigned char)(ssl->version); header[11] = (rec->length) >> 8; header[12] = (rec->length) & 0xff; if (!send && !SSL_READ_ETM(ssl) && EVP_CIPHER_CTX_mode(ssl->enc_read_ctx) == EVP_CIPH_CBC_MODE && ssl3_cbc_record_digest_supported(mac_ctx)) { /* * This is a CBC-encrypted record. We must avoid leaking any * timing-side channel information about how many blocks of data we * are hashing because that gives an attacker a timing-oracle. */ /* Final param == not SSLv3 */ if (ssl3_cbc_digest_record(mac_ctx, md, &md_size, header, rec->input, rec->length + md_size, rec->orig_len, ssl->s3->read_mac_secret, ssl->s3->read_mac_secret_size, 0) <= 0) { EVP_MD_CTX_free(hmac); return -1; } } else { if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0) { EVP_MD_CTX_free(hmac); return -1; } if (!send && !SSL_READ_ETM(ssl) && FIPS_mode()) if (!tls_fips_digest_extra(ssl->enc_read_ctx, mac_ctx, rec->input, rec->length, rec->orig_len)) { EVP_MD_CTX_free(hmac); return -1; } } EVP_MD_CTX_free(hmac); #ifdef SSL_DEBUG fprintf(stderr, "seq="); { int z; for (z = 0; z < 8; z++) fprintf(stderr, "%02X ", seq[z]); fprintf(stderr, "\n"); } fprintf(stderr, "rec="); { unsigned int z; for (z = 0; z < rec->length; z++) fprintf(stderr, "%02X ", rec->data[z]); fprintf(stderr, "\n"); } #endif if (!SSL_IS_DTLS(ssl)) { for (i = 7; i >= 0; i--) { ++seq[i]; if (seq[i] != 0) break; } } #ifdef SSL_DEBUG { unsigned int z; for (z = 0; z < md_size; z++) fprintf(stderr, "%02X ", md[z]); fprintf(stderr, "\n"); } #endif return (md_size); }
377,247
0
void tls1_clear(SSL *s) { ssl3_clear(s); if (s->method->version == TLS_ANY_VERSION) s->version = TLS_MAX_VERSION; else s->version = s->method->version; }
377,248
0
int ssl_check_clienthello_tlsext_late(SSL *s, int *al) { s->tlsext_status_expected = 0; /* * If status request then ask callback what to do. Note: this must be * called after servername callbacks in case the certificate has changed, * and must be called after the cipher has been chosen because this may * influence which certificate is sent */ if ((s->tlsext_status_type != -1) && s->ctx && s->ctx->tlsext_status_cb) { int ret; CERT_PKEY *certpkey; certpkey = ssl_get_server_send_pkey(s); /* If no certificate can't return certificate status */ if (certpkey != NULL) { /* * Set current certificate to one we will use so SSL_get_certificate * et al can pick it up. */ s->cert->key = certpkey; ret = s->ctx->tlsext_status_cb(s, s->ctx->tlsext_status_arg); switch (ret) { /* We don't want to send a status request response */ case SSL_TLSEXT_ERR_NOACK: s->tlsext_status_expected = 0; break; /* status request response should be sent */ case SSL_TLSEXT_ERR_OK: if (s->tlsext_ocsp_resp) s->tlsext_status_expected = 1; break; /* something bad happened */ case SSL_TLSEXT_ERR_ALERT_FATAL: default: *al = SSL_AD_INTERNAL_ERROR; return 0; } } } if (!tls1_alpn_handle_client_hello_late(s, al)) { return 0; } return 1; }
377,249
0
int ssl3_do_uncompress(SSL *ssl, SSL3_RECORD *rr) { #ifndef OPENSSL_NO_COMP int i; if (rr->comp == NULL) { rr->comp = (unsigned char *) OPENSSL_malloc(SSL3_RT_MAX_ENCRYPTED_LENGTH); } if (rr->comp == NULL) return 0; i = COMP_expand_block(ssl->expand, rr->comp, SSL3_RT_MAX_PLAIN_LENGTH, rr->data, (int)rr->length); if (i < 0) return 0; else rr->length = i; rr->data = rr->comp; #endif return 1; }
377,251
0
int tls1_enc(SSL *s, SSL3_RECORD *recs, unsigned int n_recs, int send) { EVP_CIPHER_CTX *ds; size_t reclen[SSL_MAX_PIPELINES]; unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN]; int bs, i, j, k, pad = 0, ret, mac_size = 0; const EVP_CIPHER *enc; unsigned int ctr; if (send) { if (EVP_MD_CTX_md(s->write_hash)) { int n = EVP_MD_CTX_size(s->write_hash); OPENSSL_assert(n >= 0); } ds = s->enc_write_ctx; if (s->enc_write_ctx == NULL) enc = NULL; else { int ivlen; enc = EVP_CIPHER_CTX_cipher(s->enc_write_ctx); /* For TLSv1.1 and later explicit IV */ if (SSL_USE_EXPLICIT_IV(s) && EVP_CIPHER_mode(enc) == EVP_CIPH_CBC_MODE) ivlen = EVP_CIPHER_iv_length(enc); else ivlen = 0; if (ivlen > 1) { for (ctr = 0; ctr < n_recs; ctr++) { if (recs[ctr].data != recs[ctr].input) { /* * we can't write into the input stream: Can this ever * happen?? (steve) */ SSLerr(SSL_F_TLS1_ENC, ERR_R_INTERNAL_ERROR); return -1; } else if (RAND_bytes(recs[ctr].input, ivlen) <= 0) { SSLerr(SSL_F_TLS1_ENC, ERR_R_INTERNAL_ERROR); return -1; } } } } } else { if (EVP_MD_CTX_md(s->read_hash)) { int n = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(n >= 0); } ds = s->enc_read_ctx; if (s->enc_read_ctx == NULL) enc = NULL; else enc = EVP_CIPHER_CTX_cipher(s->enc_read_ctx); } if ((s->session == NULL) || (ds == NULL) || (enc == NULL)) { for (ctr = 0; ctr < n_recs; ctr++) { memmove(recs[ctr].data, recs[ctr].input, recs[ctr].length); recs[ctr].input = recs[ctr].data; } ret = 1; } else { bs = EVP_CIPHER_block_size(EVP_CIPHER_CTX_cipher(ds)); if (n_recs > 1) { if (!(EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ds)) & EVP_CIPH_FLAG_PIPELINE)) { /* * We shouldn't have been called with pipeline data if the * cipher doesn't support pipelining */ SSLerr(SSL_F_TLS1_ENC, SSL_R_PIPELINE_FAILURE); return -1; } } for (ctr = 0; ctr < n_recs; ctr++) { reclen[ctr] = recs[ctr].length; if (EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ds)) & EVP_CIPH_FLAG_AEAD_CIPHER) { unsigned char *seq; seq = send ? RECORD_LAYER_get_write_sequence(&s->rlayer) : RECORD_LAYER_get_read_sequence(&s->rlayer); if (SSL_IS_DTLS(s)) { /* DTLS does not support pipelining */ unsigned char dtlsseq[9], *p = dtlsseq; s2n(send ? DTLS_RECORD_LAYER_get_w_epoch(&s->rlayer) : DTLS_RECORD_LAYER_get_r_epoch(&s->rlayer), p); memcpy(p, &seq[2], 6); memcpy(buf[ctr], dtlsseq, 8); } else { memcpy(buf[ctr], seq, 8); for (i = 7; i >= 0; i--) { /* increment */ ++seq[i]; if (seq[i] != 0) break; } } buf[ctr][8] = recs[ctr].type; buf[ctr][9] = (unsigned char)(s->version >> 8); buf[ctr][10] = (unsigned char)(s->version); buf[ctr][11] = recs[ctr].length >> 8; buf[ctr][12] = recs[ctr].length & 0xff; pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD, EVP_AEAD_TLS1_AAD_LEN, buf[ctr]); if (pad <= 0) return -1; if (send) { reclen[ctr] += pad; recs[ctr].length += pad; } } else if ((bs != 1) && send) { i = bs - ((int)reclen[ctr] % bs); /* Add weird padding of upto 256 bytes */ /* we need to add 'i' padding bytes of value j */ j = i - 1; for (k = (int)reclen[ctr]; k < (int)(reclen[ctr] + i); k++) recs[ctr].input[k] = j; reclen[ctr] += i; recs[ctr].length += i; } if (!send) { if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) return 0; } } if (n_recs > 1) { unsigned char *data[SSL_MAX_PIPELINES]; /* Set the output buffers */ for (ctr = 0; ctr < n_recs; ctr++) { data[ctr] = recs[ctr].data; } if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS, n_recs, data) <= 0) { SSLerr(SSL_F_TLS1_ENC, SSL_R_PIPELINE_FAILURE); } /* Set the input buffers */ for (ctr = 0; ctr < n_recs; ctr++) { data[ctr] = recs[ctr].input; } if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS, n_recs, data) <= 0 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS, n_recs, reclen) <= 0) { SSLerr(SSL_F_TLS1_ENC, SSL_R_PIPELINE_FAILURE); return -1; } } i = EVP_Cipher(ds, recs[0].data, recs[0].input, reclen[0]); if ((EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ds)) & EVP_CIPH_FLAG_CUSTOM_CIPHER) ? (i < 0) : (i == 0)) return -1; /* AEAD can fail to verify MAC */ if (send == 0) { if (EVP_CIPHER_mode(enc) == EVP_CIPH_GCM_MODE) { for (ctr = 0; ctr < n_recs; ctr++) { recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN; recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN; recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN; } } else if (EVP_CIPHER_mode(enc) == EVP_CIPH_CCM_MODE) { for (ctr = 0; ctr < n_recs; ctr++) { recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN; recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN; recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN; } } } ret = 1; if (!SSL_READ_ETM(s) && EVP_MD_CTX_md(s->read_hash) != NULL) mac_size = EVP_MD_CTX_size(s->read_hash); if ((bs != 1) && !send) { int tmpret; for (ctr = 0; ctr < n_recs; ctr++) { tmpret = tls1_cbc_remove_padding(s, &recs[ctr], bs, mac_size); /* * If tmpret == 0 then this means publicly invalid so we can * short circuit things here. Otherwise we must respect constant * time behaviour. */ if (tmpret == 0) return 0; ret = constant_time_select_int(constant_time_eq_int(tmpret, 1), ret, -1); } } if (pad && !send) { for (ctr = 0; ctr < n_recs; ctr++) { recs[ctr].length -= pad; } } } return ret; }
377,253