target
int64
0
1
func
stringlengths
0
484k
idx
int64
1
378k
0
OM_uint32 kg_sync_ccache_name ( krb5_context context , OM_uint32 * minor_status ) { OM_uint32 err = 0 ; if ( ! err ) { err = krb5_cc_set_default_name ( context , ( char * ) k5_getspecific ( K5_KEY_GSS_KRB5_CCACHE_NAME ) ) ; } * minor_status = err ; return ( * minor_status == 0 ) ? GSS_S_COMPLETE : GSS_S_FAILURE ; }
1
0
void KPasswordDlg::keyPressed( QKeyEvent *e ) { static bool waitForAuthentication = false; if (!waitForAuthentication) { switch ( e->key() ) { case Key_Backspace: { int len = password.length(); if ( len ) { password.truncate( len - 1 ); if( stars ) showStars(); } } break; case Key_Return: timer.stop(); waitForAuthentication = true; if ( tryPassword() ) emit passOk(); else { label->setText( glocale->translate("Failed") ); password = ""; timerMode = 1; timer.start( 1500, TRUE ); } waitForAuthentication = false; break; case Key_Escape: emit passCancel(); break; default: if ( password.length() < MAX_PASSWORD_LENGTH ) { password += (char)e->ascii(); if( stars ) showStars(); timer.changeInterval( 10000 ); } } } }
2
0
static int transcode(AVFormatContext **output_files, int nb_output_files, InputFile *input_files, int nb_input_files, StreamMap *stream_maps, int nb_stream_maps) { int ret = 0, i, j, k, n, nb_ostreams = 0, step; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; OutputStream *ost, **ost_table = NULL; InputStream *ist; char error[1024]; int key; int want_sdp = 1; uint8_t no_packet[MAX_FILES]={0}; int no_packet_count=0; int nb_frame_threshold[AVMEDIA_TYPE_NB]={0}; int nb_streams[AVMEDIA_TYPE_NB]={0}; if (rate_emu) for (i = 0; i < nb_input_streams; i++) input_streams[i].start = av_gettime(); /* output stream init */ nb_ostreams = 0; for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (!os->nb_streams && !(os->oformat->flags & AVFMT_NOSTREAMS)) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Output file #%d does not contain any stream\n", i); ret = AVERROR(EINVAL); goto fail; } nb_ostreams += os->nb_streams; } if (nb_stream_maps > 0 && nb_stream_maps != nb_ostreams) { fprintf(stderr, "Number of stream maps must match number of output streams\n"); ret = AVERROR(EINVAL); goto fail; } /* Sanity check the mapping args -- do the input files & streams exist? */ for(i=0;i<nb_stream_maps;i++) { int fi = stream_maps[i].file_index; int si = stream_maps[i].stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find input stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } fi = stream_maps[i].sync_file_index; si = stream_maps[i].sync_stream_index; if (fi < 0 || fi > nb_input_files - 1 || si < 0 || si > input_files[fi].ctx->nb_streams - 1) { fprintf(stderr,"Could not find sync stream #%d.%d\n", fi, si); ret = AVERROR(EINVAL); goto fail; } } ost_table = av_mallocz(sizeof(OutputStream *) * nb_ostreams); if (!ost_table) goto fail; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { nb_streams[os->streams[i]->codec->codec_type]++; } } for(step=1<<30; step; step>>=1){ int found_streams[AVMEDIA_TYPE_NB]={0}; for(j=0; j<AVMEDIA_TYPE_NB; j++) nb_frame_threshold[j] += step; for(j=0; j<nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f= input_files[ ist->file_index ].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames){ found_streams[ist->st->codec->codec_type]++; } } for(j=0; j<AVMEDIA_TYPE_NB; j++) if(found_streams[j] < nb_streams[j]) nb_frame_threshold[j] -= step; } n = 0; for(k=0;k<nb_output_files;k++) { os = output_files[k]; for(i=0;i<os->nb_streams;i++,n++) { int found; ost = ost_table[n] = output_streams_for_file[k][i]; if (nb_stream_maps > 0) { ost->source_index = input_files[stream_maps[n].file_index].ist_index + stream_maps[n].stream_index; /* Sanity check that the stream types match */ if (input_streams[ost->source_index].st->codec->codec_type != ost->st->codec->codec_type) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Codec type mismatch for mapping #%d.%d -> #%d.%d\n", stream_maps[n].file_index, stream_maps[n].stream_index, ost->file_index, ost->index); ffmpeg_exit(1); } } else { /* get corresponding input stream index : we select the first one with the right type */ found = 0; for (j = 0; j < nb_input_streams; j++) { int skip=0; ist = &input_streams[j]; if(opt_programid){ int pi,si; AVFormatContext *f = input_files[ist->file_index].ctx; skip=1; for(pi=0; pi<f->nb_programs; pi++){ AVProgram *p= f->programs[pi]; if(p->id == opt_programid) for(si=0; si<p->nb_stream_indexes; si++){ if(f->streams[ p->stream_index[si] ] == ist->st) skip=0; } } } if (ist->discard && ist->st->discard != AVDISCARD_ALL && !skip && ist->st->codec->codec_type == ost->st->codec->codec_type && nb_frame_threshold[ist->st->codec->codec_type] <= ist->st->codec_info_nb_frames) { ost->source_index = j; found = 1; break; } } if (!found) { if(! opt_programid) { /* try again and reuse existing stream */ for (j = 0; j < nb_input_streams; j++) { ist = &input_streams[j]; if ( ist->st->codec->codec_type == ost->st->codec->codec_type && ist->st->discard != AVDISCARD_ALL) { ost->source_index = j; found = 1; } } } if (!found) { int i= ost->file_index; av_dump_format(output_files[i], i, output_files[i]->filename, 1); fprintf(stderr, "Could not find input stream matching output stream #%d.%d\n", ost->file_index, ost->index); ffmpeg_exit(1); } } } ist = &input_streams[ost->source_index]; ist->discard = 0; ost->sync_ist = (nb_stream_maps > 0) ? &input_streams[input_files[stream_maps[n].sync_file_index].ist_index + stream_maps[n].sync_stream_index] : ist; } } /* for each output stream, we compute the right encoding parameters */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; codec = ost->st->codec; icodec = ist->st->codec; if (metadata_streams_autocopy) av_dict_copy(&ost->st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE); ost->st->disposition = ist->st->disposition; codec->bits_per_raw_sample= icodec->bits_per_raw_sample; codec->chroma_sample_location = icodec->chroma_sample_location; if (ost->st->stream_copy) { uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) goto fail; /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; codec->codec_type = icodec->codec_type; if(!codec->codec_tag){ if( !os->oformat->codec_tag || av_codec_get_id (os->oformat->codec_tag, icodec->codec_tag) == codec->codec_id || av_codec_get_tag(os->oformat->codec_tag, icodec->codec_id) <= 0) codec->codec_tag = icodec->codec_tag; } codec->bit_rate = icodec->bit_rate; codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->extradata= av_mallocz(extra_size); if (!codec->extradata) goto fail; memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ codec->time_base = icodec->time_base; codec->time_base.num *= icodec->ticks_per_frame; av_reduce(&codec->time_base.num, &codec->time_base.den, codec->time_base.num, codec->time_base.den, INT_MAX); }else codec->time_base = ist->st->time_base; switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: if(audio_volume != 256) { fprintf(stderr,"-acodec copy and -vol are incompatible (frames are not decoded)\n"); ffmpeg_exit(1); } codec->channel_layout = icodec->channel_layout; codec->sample_rate = icodec->sample_rate; codec->channels = icodec->channels; codec->frame_size = icodec->frame_size; codec->audio_service_type = icodec->audio_service_type; codec->block_align= icodec->block_align; if(codec->block_align == 1 && codec->codec_id == CODEC_ID_MP3) codec->block_align= 0; if(codec->codec_id == CODEC_ID_AC3) codec->block_align= 0; break; case AVMEDIA_TYPE_VIDEO: codec->pix_fmt = icodec->pix_fmt; codec->width = icodec->width; codec->height = icodec->height; codec->has_b_frames = icodec->has_b_frames; if (!codec->sample_aspect_ratio.num) { codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio : ist->st->codec->sample_aspect_ratio.num ? ist->st->codec->sample_aspect_ratio : (AVRational){0, 1}; } break; case AVMEDIA_TYPE_SUBTITLE: codec->width = icodec->width; codec->height = icodec->height; break; case AVMEDIA_TYPE_DATA: break; default: abort(); } } else { if (!ost->enc) ost->enc = avcodec_find_encoder(ost->st->codec->codec_id); switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); if(!ost->fifo) goto fail; ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; if (icodec->lowres) codec->sample_rate >>= icodec->lowres; } choose_sample_rate(ost->st, ost->enc); codec->time_base = (AVRational){1, codec->sample_rate}; if (codec->sample_fmt == AV_SAMPLE_FMT_NONE) codec->sample_fmt = icodec->sample_fmt; choose_sample_fmt(ost->st, ost->enc); if (!codec->channels) { codec->channels = icodec->channels; codec->channel_layout = icodec->channel_layout; } if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels) codec->channel_layout = 0; ost->audio_resample = codec->sample_rate != icodec->sample_rate || audio_sync_method > 1; icodec->request_channels = codec->channels; ist->decoding_needed = 1; ost->encoding_needed = 1; ost->resample_sample_fmt = icodec->sample_fmt; ost->resample_sample_rate = icodec->sample_rate; ost->resample_channels = icodec->channels; break; case AVMEDIA_TYPE_VIDEO: if (codec->pix_fmt == PIX_FMT_NONE) codec->pix_fmt = icodec->pix_fmt; choose_pixel_fmt(ost->st, ost->enc); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) { fprintf(stderr, "Video pixel format is unknown, stream cannot be encoded\n"); ffmpeg_exit(1); } ost->video_resample = codec->width != icodec->width || codec->height != icodec->height || codec->pix_fmt != icodec->pix_fmt; if (ost->video_resample) { codec->bits_per_raw_sample= frame_bits_per_raw_sample; } if (!codec->width || !codec->height) { codec->width = icodec->width; codec->height = icodec->height; } ost->resample_height = icodec->height; ost->resample_width = icodec->width; ost->resample_pix_fmt= icodec->pix_fmt; ost->encoding_needed = 1; ist->decoding_needed = 1; if (!ost->frame_rate.num) ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25,1}; if (ost->enc && ost->enc->supported_framerates && !force_fps) { int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates); ost->frame_rate = ost->enc->supported_framerates[idx]; } codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num}; if( av_q2d(codec->time_base) < 0.001 && video_sync_method && (video_sync_method==1 || (video_sync_method<0 && !(os->oformat->flags & AVFMT_VARIABLE_FPS)))){ av_log(os, AV_LOG_WARNING, "Frame rate very high for a muxer not effciciently supporting it.\n" "Please consider specifiying a lower framerate, a different muxer or -vsync 2\n"); } #if CONFIG_AVFILTER if (configure_video_filters(ist, ost)) { fprintf(stderr, "Error opening filters!\n"); exit(1); } #endif break; case AVMEDIA_TYPE_SUBTITLE: ost->encoding_needed = 1; ist->decoding_needed = 1; break; default: abort(); break; } /* two pass mode */ if (ost->encoding_needed && codec->codec_id != CODEC_ID_H264 && (codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) { char logfilename[1024]; FILE *f; snprintf(logfilename, sizeof(logfilename), "%s-%d.log", pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX, i); if (codec->flags & CODEC_FLAG_PASS1) { f = fopen(logfilename, "wb"); if (!f) { fprintf(stderr, "Cannot write log file '%s' for pass-1 encoding: %s\n", logfilename, strerror(errno)); ffmpeg_exit(1); } ost->logfile = f; } else { char *logbuffer; size_t logbuffer_size; if (read_file(logfilename, &logbuffer, &logbuffer_size) < 0) { fprintf(stderr, "Error reading log file '%s' for pass-2 encoding\n", logfilename); ffmpeg_exit(1); } codec->stats_in = logbuffer; } } } if(codec->codec_type == AVMEDIA_TYPE_VIDEO){ /* maximum video buffer size is 6-bytes per pixel, plus DPX header size */ int size= codec->width * codec->height; bit_buffer_size= FFMAX(bit_buffer_size, 6*size + 1664); } } if (!bit_buffer) bit_buffer = av_malloc(bit_buffer_size); if (!bit_buffer) { fprintf(stderr, "Cannot allocate %d bytes output buffer\n", bit_buffer_size); ret = AVERROR(ENOMEM); goto fail; } /* open each encoder */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { AVCodec *codec = ost->enc; AVCodecContext *dec = input_streams[ost->source_index].st->codec; if (!codec) { snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d.%d", ost->st->codec->codec_id, ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } if (dec->subtitle_header) { ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size); if (!ost->st->codec->subtitle_header) { ret = AVERROR(ENOMEM); goto dump_format; } memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size); ost->st->codec->subtitle_header_size = dec->subtitle_header_size; } if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) { snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d.%d - maybe incorrect parameters such as bit_rate, rate, width or height", ost->file_index, ost->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ost->st->codec, 1); assert_avoptions(ost->opts); if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000) av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low." "It takes bits/s as argument, not kbits/s\n"); extra_size += ost->st->codec->extradata_size; } } /* open each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { AVCodec *codec = ist->dec; if (!codec) codec = avcodec_find_decoder(ist->st->codec->codec_id); if (!codec) { snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d", ist->st->codec->codec_id, ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", ist->file_index, ist->st->index); ret = AVERROR(EINVAL); goto dump_format; } assert_codec_experimental(ist->st->codec, 0); assert_avoptions(ost->opts); //if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) // ist->st->codec->flags |= CODEC_FLAG_REPEAT_FIELD; } } /* init pts */ for (i = 0; i < nb_input_streams; i++) { AVStream *st; ist = &input_streams[i]; st= ist->st; ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; ist->next_pts = AV_NOPTS_VALUE; ist->is_start = 1; } /* set meta data information from input file if required */ for (i=0;i<nb_meta_data_maps;i++) { AVFormatContext *files[2]; AVDictionary **meta[2]; int j; #define METADATA_CHECK_INDEX(index, nb_elems, desc)\ if ((index) < 0 || (index) >= (nb_elems)) {\ snprintf(error, sizeof(error), "Invalid %s index %d while processing metadata maps\n",\ (desc), (index));\ ret = AVERROR(EINVAL);\ goto dump_format;\ } int out_file_index = meta_data_maps[i][0].file; int in_file_index = meta_data_maps[i][1].file; if (in_file_index < 0 || out_file_index < 0) continue; METADATA_CHECK_INDEX(out_file_index, nb_output_files, "output file") METADATA_CHECK_INDEX(in_file_index, nb_input_files, "input file") files[0] = output_files[out_file_index]; files[1] = input_files[in_file_index].ctx; for (j = 0; j < 2; j++) { MetadataMap *map = &meta_data_maps[i][j]; switch (map->type) { case 'g': meta[j] = &files[j]->metadata; break; case 's': METADATA_CHECK_INDEX(map->index, files[j]->nb_streams, "stream") meta[j] = &files[j]->streams[map->index]->metadata; break; case 'c': METADATA_CHECK_INDEX(map->index, files[j]->nb_chapters, "chapter") meta[j] = &files[j]->chapters[map->index]->metadata; break; case 'p': METADATA_CHECK_INDEX(map->index, files[j]->nb_programs, "program") meta[j] = &files[j]->programs[map->index]->metadata; break; } } av_dict_copy(meta[0], *meta[1], AV_DICT_DONT_OVERWRITE); } /* copy global metadata by default */ if (metadata_global_autocopy) { for (i = 0; i < nb_output_files; i++) av_dict_copy(&output_files[i]->metadata, input_files[0].ctx->metadata, AV_DICT_DONT_OVERWRITE); } /* copy chapters according to chapter maps */ for (i = 0; i < nb_chapter_maps; i++) { int infile = chapter_maps[i].in_file; int outfile = chapter_maps[i].out_file; if (infile < 0 || outfile < 0) continue; if (infile >= nb_input_files) { snprintf(error, sizeof(error), "Invalid input file index %d in chapter mapping.\n", infile); ret = AVERROR(EINVAL); goto dump_format; } if (outfile >= nb_output_files) { snprintf(error, sizeof(error), "Invalid output file index %d in chapter mapping.\n",outfile); ret = AVERROR(EINVAL); goto dump_format; } copy_chapters(infile, outfile); } /* copy chapters from the first input file that has them*/ if (!nb_chapter_maps) for (i = 0; i < nb_input_files; i++) { if (!input_files[i].ctx->nb_chapters) continue; for (j = 0; j < nb_output_files; j++) if ((ret = copy_chapters(i, j)) < 0) goto dump_format; break; } /* open files and write file headers */ for(i=0;i<nb_output_files;i++) { os = output_files[i]; if (avformat_write_header(os, &output_opts[i]) < 0) { snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i); ret = AVERROR(EINVAL); goto dump_format; } assert_avoptions(output_opts[i]); if (strcmp(output_files[i]->oformat->name, "rtp")) { want_sdp = 0; } } dump_format: /* dump the file output parameters - cannot be done before in case of stream copy */ for(i=0;i<nb_output_files;i++) { av_dump_format(output_files[i], i, output_files[i]->filename, 1); } /* dump the stream mapping */ if (verbose >= 0) { fprintf(stderr, "Stream mapping:\n"); for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; fprintf(stderr, " Stream #%d.%d -> #%d.%d", input_streams[ost->source_index].file_index, input_streams[ost->source_index].st->index, ost->file_index, ost->index); if (ost->sync_ist != &input_streams[ost->source_index]) fprintf(stderr, " [sync #%d.%d]", ost->sync_ist->file_index, ost->sync_ist->st->index); fprintf(stderr, "\n"); } } if (ret) { fprintf(stderr, "%s\n", error); goto fail; } if (want_sdp) { print_sdp(output_files, nb_output_files); } if (!using_stdin) { if(verbose >= 0) fprintf(stderr, "Press [q] to stop, [?] for help\n"); avio_set_interrupt_cb(decode_interrupt_cb); } term_init(); timer_start = av_gettime(); for(; received_sigterm == 0;) { int file_index, ist_index; AVPacket pkt; double ipts_min; double opts_min; redo: ipts_min= 1e100; opts_min= 1e100; /* if 'q' pressed, exits */ if (!using_stdin) { if (q_pressed) break; /* read_key() returns 0 on EOF */ key = read_key(); if (key == 'q') break; if (key == '+') verbose++; if (key == '-') verbose--; if (key == 's') qp_hist ^= 1; if (key == 'h'){ if (do_hex_dump){ do_hex_dump = do_pkt_dump = 0; } else if(do_pkt_dump){ do_hex_dump = 1; } else do_pkt_dump = 1; av_log_set_level(AV_LOG_DEBUG); } if (key == 'd' || key == 'D'){ int debug=0; if(key == 'D') { debug = input_streams[0].st->codec->debug<<1; if(!debug) debug = 1; while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash debug += debug; }else scanf("%d", &debug); for(i=0;i<nb_input_streams;i++) { input_streams[i].st->codec->debug = debug; } for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; ost->st->codec->debug = debug; } if(debug) av_log_set_level(AV_LOG_DEBUG); fprintf(stderr,"debug=%d\n", debug); } if (key == '?'){ fprintf(stderr, "key function\n" "? show this help\n" "+ increase verbosity\n" "- decrease verbosity\n" "D cycle through available debug modes\n" "h dump packets/hex press to cycle through the 3 states\n" "q quit\n" "s Show QP histogram\n" ); } } /* select the stream that we must read now by looking at the smallest output pts */ file_index = -1; for(i=0;i<nb_ostreams;i++) { double ipts, opts; ost = ost_table[i]; os = output_files[ost->file_index]; ist = &input_streams[ost->source_index]; if(ist->is_past_recording_time || no_packet[ist->file_index]) continue; opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = (double)ist->pts; if (!input_files[ist->file_index].eof_reached){ if(ipts < ipts_min) { ipts_min = ipts; if(input_sync ) file_index = ist->file_index; } if(opts < opts_min) { opts_min = opts; if(!input_sync) file_index = ist->file_index; } } if(ost->frame_number >= max_frames[ost->st->codec->codec_type]){ file_index= -1; break; } } /* if none, if is finished */ if (file_index < 0) { if(no_packet_count){ no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); usleep(10000); continue; } break; } /* finish if limit size exhausted */ if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0]->pb)) break; /* read a frame from it and output it in the fifo */ is = input_files[file_index].ctx; ret= av_read_frame(is, &pkt); if(ret == AVERROR(EAGAIN)){ no_packet[file_index]=1; no_packet_count++; continue; } if (ret < 0) { input_files[file_index].eof_reached = 1; if (opt_shortest) break; else continue; } no_packet_count=0; memset(no_packet, 0, sizeof(no_packet)); if (do_pkt_dump) { av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump, is->streams[pkt.stream_index]); } /* the following test is needed in case new streams appear dynamically in stream : we ignore them */ if (pkt.stream_index >= input_files[file_index].ctx->nb_streams) goto discard_packet; ist_index = input_files[file_index].ist_index + pkt.stream_index; ist = &input_streams[ist_index]; if (ist->discard) goto discard_packet; if (pkt.dts != AV_NOPTS_VALUE) pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (pkt.pts != AV_NOPTS_VALUE) pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base); if (ist->ts_scale) { if(pkt.pts != AV_NOPTS_VALUE) pkt.pts *= ist->ts_scale; if(pkt.dts != AV_NOPTS_VALUE) pkt.dts *= ist->ts_scale; } // fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type); if (pkt.dts != AV_NOPTS_VALUE && ist->next_pts != AV_NOPTS_VALUE && (is->iformat->flags & AVFMT_TS_DISCONT)) { int64_t pkt_dts= av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q); int64_t delta= pkt_dts - ist->next_pts; if((FFABS(delta) > 1LL*dts_delta_threshold*AV_TIME_BASE || pkt_dts+1<ist->pts)&& !copy_ts){ input_files[ist->file_index].ts_offset -= delta; if (verbose > 2) fprintf(stderr, "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", delta, input_files[ist->file_index].ts_offset); pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); if(pkt.pts != AV_NOPTS_VALUE) pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } /* finish if recording time exhausted */ if (recording_time != INT64_MAX && (pkt.pts != AV_NOPTS_VALUE ? av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) : av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000}) )>= 0) { ist->is_past_recording_time = 1; goto discard_packet; } //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) { if (verbose >= 0) fprintf(stderr, "Error while decoding stream #%d.%d\n", ist->file_index, ist->st->index); if (exit_on_error) ffmpeg_exit(1); av_free_packet(&pkt); goto redo; } discard_packet: av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 0); } /* at the end of stream, we must flush the decoder buffers */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { output_packet(ist, i, ost_table, nb_ostreams, NULL); } } term_exit(); /* write the trailer if needed and close file */ for(i=0;i<nb_output_files;i++) { os = output_files[i]; av_write_trailer(os); } /* dump report by using the first video and audio streams */ print_report(output_files, ost_table, nb_ostreams, 1); /* close each encoder */ for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost->encoding_needed) { av_freep(&ost->st->codec->stats_in); avcodec_close(ost->st->codec); } #if CONFIG_AVFILTER avfilter_graph_free(&ost->graph); #endif } /* close each decoder */ for (i = 0; i < nb_input_streams; i++) { ist = &input_streams[i]; if (ist->decoding_needed) { avcodec_close(ist->st->codec); } } /* finished ! */ ret = 0; fail: av_freep(&bit_buffer); if (ost_table) { for(i=0;i<nb_ostreams;i++) { ost = ost_table[i]; if (ost) { if (ost->st->stream_copy) av_freep(&ost->st->codec->extradata); if (ost->logfile) { fclose(ost->logfile); ost->logfile = NULL; } av_fifo_free(ost->fifo); /* works even if fifo is not initialized but set to zero */ av_freep(&ost->st->codec->subtitle_header); av_free(ost->resample_frame.data[0]); av_free(ost->forced_kf_pts); if (ost->video_resample) sws_freeContext(ost->img_resample_ctx); if (ost->resample) audio_resample_close(ost->resample); if (ost->reformat_ctx) av_audio_convert_free(ost->reformat_ctx); av_dict_free(&ost->opts); av_free(ost); } } av_free(ost_table); } return ret; }
4
0
static void write_bootloader ( CPUMIPSState * env , uint8_t * base , int64_t kernel_entry ) { uint32_t * p ; p = ( uint32_t * ) base ; stl_raw ( p ++ , 0x0bf00160 ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( base + 0x500 , 0xbfc00580 ) ; stl_raw ( base + 0x504 , 0xbfc0083c ) ; stl_raw ( base + 0x520 , 0xbfc00580 ) ; stl_raw ( base + 0x52c , 0xbfc00800 ) ; stl_raw ( base + 0x534 , 0xbfc00808 ) ; stl_raw ( base + 0x538 , 0xbfc00800 ) ; stl_raw ( base + 0x53c , 0xbfc00800 ) ; stl_raw ( base + 0x540 , 0xbfc00800 ) ; stl_raw ( base + 0x544 , 0xbfc00800 ) ; stl_raw ( base + 0x548 , 0xbfc00800 ) ; stl_raw ( base + 0x54c , 0xbfc00800 ) ; stl_raw ( base + 0x550 , 0xbfc00800 ) ; stl_raw ( base + 0x554 , 0xbfc00800 ) ; p = ( uint32_t * ) ( base + 0x580 ) ; stl_raw ( p ++ , 0x24040002 ) ; stl_raw ( p ++ , 0x3c1d0000 | ( ( ( ENVP_ADDR - 64 ) >> 16 ) & 0xffff ) ) ; stl_raw ( p ++ , 0x37bd0000 | ( ( ENVP_ADDR - 64 ) & 0xffff ) ) ; stl_raw ( p ++ , 0x3c050000 | ( ( ENVP_ADDR >> 16 ) & 0xffff ) ) ; stl_raw ( p ++ , 0x34a50000 | ( ENVP_ADDR & 0xffff ) ) ; stl_raw ( p ++ , 0x3c060000 | ( ( ( ENVP_ADDR + 8 ) >> 16 ) & 0xffff ) ) ; stl_raw ( p ++ , 0x34c60000 | ( ( ENVP_ADDR + 8 ) & 0xffff ) ) ; stl_raw ( p ++ , 0x3c070000 | ( loaderparams . ram_size >> 16 ) ) ; stl_raw ( p ++ , 0x34e70000 | ( loaderparams . ram_size & 0xffff ) ) ; stl_raw ( p ++ , 0x3c09b400 ) ; # ifdef TARGET_WORDS_BIGENDIAN stl_raw ( p ++ , 0x3c08df00 ) ; # else stl_raw ( p ++ , 0x340800df ) ; # endif stl_raw ( p ++ , 0xad280068 ) ; stl_raw ( p ++ , 0x3c09bbe0 ) ; # ifdef TARGET_WORDS_BIGENDIAN stl_raw ( p ++ , 0x3c08c000 ) ; # else stl_raw ( p ++ , 0x340800c0 ) ; # endif stl_raw ( p ++ , 0xad280048 ) ; # ifdef TARGET_WORDS_BIGENDIAN stl_raw ( p ++ , 0x3c084000 ) ; # else stl_raw ( p ++ , 0x34080040 ) ; # endif stl_raw ( p ++ , 0xad280050 ) ; # ifdef TARGET_WORDS_BIGENDIAN stl_raw ( p ++ , 0x3c088000 ) ; # else stl_raw ( p ++ , 0x34080080 ) ; # endif stl_raw ( p ++ , 0xad280058 ) ; # ifdef TARGET_WORDS_BIGENDIAN stl_raw ( p ++ , 0x3c083f00 ) ; # else stl_raw ( p ++ , 0x3408003f ) ; # endif stl_raw ( p ++ , 0xad280060 ) ; # ifdef TARGET_WORDS_BIGENDIAN stl_raw ( p ++ , 0x3c08c100 ) ; # else stl_raw ( p ++ , 0x340800c1 ) ; # endif stl_raw ( p ++ , 0xad280080 ) ; # ifdef TARGET_WORDS_BIGENDIAN stl_raw ( p ++ , 0x3c085e00 ) ; # else stl_raw ( p ++ , 0x3408005e ) ; # endif stl_raw ( p ++ , 0xad280088 ) ; stl_raw ( p ++ , 0x3c1f0000 | ( ( kernel_entry >> 16 ) & 0xffff ) ) ; stl_raw ( p ++ , 0x37ff0000 | ( kernel_entry & 0xffff ) ) ; stl_raw ( p ++ , 0x03e00008 ) ; stl_raw ( p ++ , 0x00000000 ) ; p = ( uint32_t * ) ( base + 0x800 ) ; stl_raw ( p ++ , 0x03e00008 ) ; stl_raw ( p ++ , 0x24020000 ) ; stl_raw ( p ++ , 0x03e06821 ) ; stl_raw ( p ++ , 0x00805821 ) ; stl_raw ( p ++ , 0x00a05021 ) ; stl_raw ( p ++ , 0x91440000 ) ; stl_raw ( p ++ , 0x254a0001 ) ; stl_raw ( p ++ , 0x10800005 ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( p ++ , 0x0ff0021c ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( p ++ , 0x08000205 ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( p ++ , 0x01a00008 ) ; stl_raw ( p ++ , 0x01602021 ) ; stl_raw ( p ++ , 0x03e06821 ) ; stl_raw ( p ++ , 0x00805821 ) ; stl_raw ( p ++ , 0x00a05021 ) ; stl_raw ( p ++ , 0x00c06021 ) ; stl_raw ( p ++ , 0x91440000 ) ; stl_raw ( p ++ , 0x0ff0021c ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( p ++ , 0x254a0001 ) ; stl_raw ( p ++ , 0x258cffff ) ; stl_raw ( p ++ , 0x1580fffa ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( p ++ , 0x01a00008 ) ; stl_raw ( p ++ , 0x01602021 ) ; stl_raw ( p ++ , 0x3c08b800 ) ; stl_raw ( p ++ , 0x350803f8 ) ; stl_raw ( p ++ , 0x91090005 ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( p ++ , 0x31290040 ) ; stl_raw ( p ++ , 0x1120fffc ) ; stl_raw ( p ++ , 0x00000000 ) ; stl_raw ( p ++ , 0x03e00008 ) ; stl_raw ( p ++ , 0xa1040000 ) ; }
5
1
static char *make_filename_safe(const char *filename TSRMLS_DC) { if (*filename && strncmp(filename, ":memory:", sizeof(":memory:")-1)) { char *fullpath = expand_filepath(filename, NULL TSRMLS_CC); if (!fullpath) { return NULL; } if (PG(safe_mode) && (!php_checkuid(fullpath, NULL, CHECKUID_CHECK_FILE_AND_DIR))) { efree(fullpath); return NULL; } if (php_check_open_basedir(fullpath TSRMLS_CC)) { efree(fullpath); return NULL; } return fullpath; } return estrdup(filename); }
6
0
armv6pmu_handle_irq(int irq_num, void *dev) { unsigned long pmcr = armv6_pmcr_read(); struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int idx; if (!armv6_pmcr_has_overflowed(pmcr)) return IRQ_NONE; regs = get_irq_regs(); /* * The interrupts are cleared by writing the overflow flags back to * the control register. All of the other bits don't have any effect * if they are rewritten, so write the whole value back. */ armv6_pmcr_write(pmcr); perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; if (!test_bit(idx, cpuc->active_mask)) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) continue; hwc = &event->hw; armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, &data, regs)) armpmu->disable(hwc, idx); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; }
7
1
_dl_dst_count (const char *name, int is_path) { size_t cnt = 0; do { size_t len = 1; /* $ORIGIN is not expanded for SUID/GUID programs. */ if ((((!__libc_enable_secure && strncmp (&name[1], "ORIGIN", 6) == 0 && (len = 7) != 0) || (strncmp (&name[1], "PLATFORM", 8) == 0 && (len = 9) != 0)) && (name[len] == '\0' || name[len] == '/' || (is_path && name[len] == ':'))) || (name[1] == '{' && ((!__libc_enable_secure && strncmp (&name[2], "ORIGIN}", 7) == 0 && (len = 9) != 0) || (strncmp (&name[2], "PLATFORM}", 9) == 0 && (len = 11) != 0)))) ++cnt; name = strchr (name + len, '$'); } while (name != NULL); return cnt; }
8
1
_dl_dst_substitute (struct link_map *l, const char *name, char *result, int is_path) { char *last_elem, *wp; /* Now fill the result path. While copying over the string we keep track of the start of the last path element. When we come accross a DST we copy over the value or (if the value is not available) leave the entire path element out. */ last_elem = wp = result; do { if (*name == '$') { const char *repl; size_t len; if ((((strncmp (&name[1], "ORIGIN", 6) == 0 && (len = 7) != 0) || (strncmp (&name[1], "PLATFORM", 8) == 0 && (len = 9) != 0)) && (name[len] == '\0' || name[len] == '/' || (is_path && name[len] == ':'))) || (name[1] == '{' && ((strncmp (&name[2], "ORIGIN}", 7) == 0 && (len = 9) != 0) || (strncmp (&name[2], "PLATFORM}", 9) == 0 && (len = 11) != 0)))) { repl = ((len == 7 || name[2] == 'O') ? (__libc_enable_secure ? NULL : l->l_origin) : _dl_platform); if (repl != NULL && repl != (const char *) -1) { wp = __stpcpy (wp, repl); name += len; } else { /* We cannot use this path element, the value of the replacement is unknown. */ wp = last_elem; name += len; while (*name != '\0' && (!is_path || *name != ':')) ++name; } } else /* No DST we recognize. */ *wp++ = *name++; } else if (is_path && *name == ':') { *wp++ = *name++; last_elem = wp; } else *wp++ = *name++; } while (*name != '\0'); *wp = '\0'; return result; }
9
1
unpack_Z_stream(int fd_in, int fd_out) { IF_DESKTOP(long long total_written = 0;) IF_DESKTOP(long long) int retval = -1; unsigned char *stackp; long code; int finchar; long oldcode; long incode; int inbits; int posbits; int outpos; int insize; int bitmask; long free_ent; long maxcode; long maxmaxcode; int n_bits; int rsize = 0; unsigned char *inbuf; /* were eating insane amounts of stack - */ unsigned char *outbuf; /* bad for some embedded targets */ unsigned char *htab; unsigned short *codetab; /* Hmm, these were statics - why?! */ /* user settable max # bits/code */ int maxbits; /* = BITS; */ /* block compress mode -C compatible with 2.0 */ int block_mode; /* = BLOCK_MODE; */ inbuf = xzalloc(IBUFSIZ + 64); outbuf = xzalloc(OBUFSIZ + 2048); htab = xzalloc(HSIZE); /* wsn't zeroed out before, maybe can xmalloc? */ codetab = xzalloc(HSIZE * sizeof(codetab[0])); insize = 0; /* xread isn't good here, we have to return - caller may want * to do some cleanup (e.g. delete incomplete unpacked file etc) */ if (full_read(fd_in, inbuf, 1) != 1) { bb_error_msg("short read"); goto err; } maxbits = inbuf[0] & BIT_MASK; block_mode = inbuf[0] & BLOCK_MODE; maxmaxcode = MAXCODE(maxbits); if (maxbits > BITS) { bb_error_msg("compressed with %d bits, can only handle " BITS_STR" bits", maxbits); goto err; } n_bits = INIT_BITS; maxcode = MAXCODE(INIT_BITS) - 1; bitmask = (1 << INIT_BITS) - 1; oldcode = -1; finchar = 0; outpos = 0; posbits = 0 << 3; free_ent = ((block_mode) ? FIRST : 256); /* As above, initialize the first 256 entries in the table. */ /*clear_tab_prefixof(); - done by xzalloc */ for (code = 255; code >= 0; --code) { tab_suffixof(code) = (unsigned char) code; } do { resetbuf: { int i; int e; int o; o = posbits >> 3; e = insize - o; for (i = 0; i < e; ++i) inbuf[i] = inbuf[i + o]; insize = e; posbits = 0; } if (insize < (int) (IBUFSIZ + 64) - IBUFSIZ) { rsize = safe_read(fd_in, inbuf + insize, IBUFSIZ); //error check?? insize += rsize; } inbits = ((rsize > 0) ? (insize - insize % n_bits) << 3 : (insize << 3) - (n_bits - 1)); while (inbits > posbits) { if (free_ent > maxcode) { posbits = ((posbits - 1) + ((n_bits << 3) - (posbits - 1 + (n_bits << 3)) % (n_bits << 3))); ++n_bits; if (n_bits == maxbits) { maxcode = maxmaxcode; } else { maxcode = MAXCODE(n_bits) - 1; } bitmask = (1 << n_bits) - 1; goto resetbuf; } { unsigned char *p = &inbuf[posbits >> 3]; code = ((((long) (p[0])) | ((long) (p[1]) << 8) | ((long) (p[2]) << 16)) >> (posbits & 0x7)) & bitmask; } posbits += n_bits; if (oldcode == -1) { oldcode = code; finchar = (int) oldcode; outbuf[outpos++] = (unsigned char) finchar; continue; } if (code == CLEAR && block_mode) { clear_tab_prefixof(); free_ent = FIRST - 1; posbits = ((posbits - 1) + ((n_bits << 3) - (posbits - 1 + (n_bits << 3)) % (n_bits << 3))); n_bits = INIT_BITS; maxcode = MAXCODE(INIT_BITS) - 1; bitmask = (1 << INIT_BITS) - 1; goto resetbuf; } incode = code; stackp = de_stack; /* Special case for KwKwK string. */ if (code >= free_ent) { if (code > free_ent) { unsigned char *p; posbits -= n_bits; p = &inbuf[posbits >> 3]; bb_error_msg ("insize:%d posbits:%d inbuf:%02X %02X %02X %02X %02X (%d)", insize, posbits, p[-1], p[0], p[1], p[2], p[3], (posbits & 07)); bb_error_msg("corrupted data"); goto err; } *--stackp = (unsigned char) finchar; code = oldcode; } /* Generate output characters in reverse order */ while ((long) code >= (long) 256) { *--stackp = tab_suffixof(code); code = tab_prefixof(code); } finchar = tab_suffixof(code); *--stackp = (unsigned char) finchar; /* And put them out in forward order */ { int i; i = de_stack - stackp; if (outpos + i >= OBUFSIZ) { do { if (i > OBUFSIZ - outpos) { i = OBUFSIZ - outpos; } if (i > 0) { memcpy(outbuf + outpos, stackp, i); outpos += i; } if (outpos >= OBUFSIZ) { full_write(fd_out, outbuf, outpos); //error check?? IF_DESKTOP(total_written += outpos;) outpos = 0; } stackp += i; i = de_stack - stackp; } while (i > 0); } else { memcpy(outbuf + outpos, stackp, i); outpos += i; } } /* Generate the new entry. */ code = free_ent; if (code < maxmaxcode) { tab_prefixof(code) = (unsigned short) oldcode; tab_suffixof(code) = (unsigned char) finchar; free_ent = code + 1; } /* Remember previous code. */ oldcode = incode; } } while (rsize > 0); if (outpos > 0) { full_write(fd_out, outbuf, outpos); //error check?? IF_DESKTOP(total_written += outpos;) } retval = IF_DESKTOP(total_written) + 0; err: free(inbuf); free(outbuf); free(htab); free(codetab); return retval; }
10
1
armv6pmu_handle_irq(int irq_num, void *dev) { unsigned long pmcr = armv6_pmcr_read(); struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; int idx; if (!armv6_pmcr_has_overflowed(pmcr)) return IRQ_NONE; regs = get_irq_regs(); /* * The interrupts are cleared by writing the overflow flags back to * the control register. All of the other bits don't have any effect * if they are rewritten, so write the whole value back. */ armv6_pmcr_write(pmcr); perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx <= armpmu->num_events; ++idx) { struct perf_event *event = cpuc->events[idx]; struct hw_perf_event *hwc; if (!test_bit(idx, cpuc->active_mask)) continue; /* * We have a single interrupt for all counters. Check that * each counter has overflowed before we process it. */ if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) continue; hwc = &event->hw; armpmu_event_update(event, hwc, idx, 1); data.period = event->hw.last_period; if (!armpmu_event_set_period(event, hwc, idx)) continue; if (perf_event_overflow(event, 0, &data, regs)) armpmu->disable(hwc, idx); } /* * Handle the pending perf events. * * Note: this call *must* be run with interrupts disabled. For * platforms that can have the PMU interrupts raised as an NMI, this * will not work. */ irq_work_run(); return IRQ_HANDLED; }
11
0
static bool search_header ( struct message_search_context * ctx , const struct message_header_line * hdr ) { static const unsigned char crlf [ 2 ] = { '\r' , '\n' } ; return str_find_more ( ctx -> str_find_ctx , ( const unsigned char * ) hdr -> name , hdr -> name_len ) || str_find_more ( ctx -> str_find_ctx , hdr -> middle , hdr -> middle_len ) || str_find_more ( ctx -> str_find_ctx , hdr -> full_value , hdr -> full_value_len ) || ( ! hdr -> no_newline && str_find_more ( ctx -> str_find_ctx , crlf , 2 ) ) ; }
12
0
static void v4l2_free_buffer(void *opaque, uint8_t *unused) { V4L2Buffer* avbuf = opaque; V4L2m2mContext *s = buf_to_m2mctx(avbuf); if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) { atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel); if (s->reinit) { if (!atomic_load(&s->refcount)) sem_post(&s->refsync); } else if (avbuf->context->streamon) ff_v4l2_buffer_enqueue(avbuf); av_buffer_unref(&avbuf->context_ref); } }
13
0
static int qemuAgentIOProcessLine ( qemuAgentPtr mon , const char * line , qemuAgentMessagePtr msg ) { virJSONValuePtr obj = NULL ; int ret = - 1 ; VIR_DEBUG ( "Line [%s]" , line ) ; if ( ! ( obj = virJSONValueFromString ( line ) ) ) { if ( msg && msg -> sync && msg -> first ) { VIR_DEBUG ( "Received garbage on sync" ) ; msg -> finished = 1 ; return 0 ; } goto cleanup ; } if ( obj -> type != VIR_JSON_TYPE_OBJECT ) { virReportError ( VIR_ERR_INTERNAL_ERROR , _ ( "Parsed JSON reply '%s' isn't an object" ) , line ) ; goto cleanup ; } if ( virJSONValueObjectHasKey ( obj , "QMP" ) == 1 ) { ret = 0 ; } else if ( virJSONValueObjectHasKey ( obj , "event" ) == 1 ) { ret = qemuAgentIOProcessEvent ( mon , obj ) ; } else if ( virJSONValueObjectHasKey ( obj , "error" ) == 1 || virJSONValueObjectHasKey ( obj , "return" ) == 1 ) { if ( msg ) { if ( msg -> sync ) { unsigned long long id ; if ( virJSONValueObjectGetNumberUlong ( obj , "return" , & id ) < 0 ) { VIR_DEBUG ( "Ignoring delayed reply on sync" ) ; ret = 0 ; goto cleanup ; } VIR_DEBUG ( "Guest returned ID: %llu" , id ) ; if ( msg -> id != id ) { VIR_DEBUG ( "Guest agent returned ID: %llu instead of %llu" , id , msg -> id ) ; ret = 0 ; goto cleanup ; } } msg -> rxObject = obj ; msg -> finished = 1 ; obj = NULL ; } else { VIR_DEBUG ( "Ignoring delayed reply" ) ; } ret = 0 ; } else { virReportError ( VIR_ERR_INTERNAL_ERROR , _ ( "Unknown JSON reply '%s'" ) , line ) ; } cleanup : virJSONValueFree ( obj ) ; return ret ; }
14
1
static void cirrus_do_copy(CirrusVGAState *s, int dst, int src, int w, int h) { int sx, sy; int dx, dy; int width, height; int depth; int notify = 0; depth = s->get_bpp((VGAState *)s) / 8; s->get_resolution((VGAState *)s, &width, &height); /* extra x, y */ sx = (src % (width * depth)) / depth; sy = (src / (width * depth)); dx = (dst % (width *depth)) / depth; dy = (dst / (width * depth)); /* normalize width */ w /= depth; /* if we're doing a backward copy, we have to adjust our x/y to be the upper left corner (instead of the lower right corner) */ if (s->cirrus_blt_dstpitch < 0) { sx -= (s->cirrus_blt_width / depth) - 1; dx -= (s->cirrus_blt_width / depth) - 1; sy -= s->cirrus_blt_height - 1; dy -= s->cirrus_blt_height - 1; } /* are we in the visible portion of memory? */ if (sx >= 0 && sy >= 0 && dx >= 0 && dy >= 0 && (sx + w) <= width && (sy + h) <= height && (dx + w) <= width && (dy + h) <= height) { notify = 1; } /* make to sure only copy if it's a plain copy ROP */ if (*s->cirrus_rop != cirrus_bitblt_rop_fwd_src && *s->cirrus_rop != cirrus_bitblt_rop_bkwd_src) notify = 0; /* we have to flush all pending changes so that the copy is generated at the appropriate moment in time */ if (notify) vga_hw_update(); (*s->cirrus_rop) (s, s->vram_ptr + s->cirrus_blt_dstaddr, s->vram_ptr + s->cirrus_blt_srcaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); if (notify) s->ds->dpy_copy(s->ds, sx, sy, dx, dy, s->cirrus_blt_width / depth, s->cirrus_blt_height); /* we don't have to notify the display that this portion has changed since dpy_copy implies this */ if (!notify) cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); }
15
0
int av_opencl_buffer_write(cl_mem dst_cl_buf, uint8_t *src_buf, size_t buf_size) { cl_int status; void *mapped = clEnqueueMapBuffer(gpu_env.command_queue, dst_cl_buf, CL_TRUE,CL_MAP_WRITE, 0, sizeof(uint8_t) * buf_size, 0, NULL, NULL, &status); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not map OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } memcpy(mapped, src_buf, buf_size); status = clEnqueueUnmapMemObject(gpu_env.command_queue, dst_cl_buf, mapped, 0, NULL, NULL); if (status != CL_SUCCESS) { av_log(&openclutils, AV_LOG_ERROR, "Could not unmap OpenCL buffer: %s\n", opencl_errstr(status)); return AVERROR_EXTERNAL; } return 0; }
16
0
static int intel_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; struct cpu_hw_events *cpuc; int bit, loops; u64 status; int handled; perf_sample_data_init(&data, 0); cpuc = &__get_cpu_var(cpu_hw_events); /* * Some chipsets need to unmask the LVTPC in a particular spot * inside the nmi handler. As a result, the unmasking was pushed * into all the nmi handlers. * * This handler doesn't seem to have any issues with the unmasking * so it was left at the top. */ apic_write(APIC_LVTPC, APIC_DM_NMI); intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); status = intel_pmu_get_status(); if (!status) { intel_pmu_enable_all(0); return handled; } loops = 0; again: intel_pmu_ack_status(status); if (++loops > 100) { WARN_ONCE(1, "perfevents: irq loop stuck!\n"); perf_event_print_debug(); intel_pmu_reset(); goto done; } inc_irq_stat(apic_perf_irqs); intel_pmu_lbr_read(); /* * PEBS overflow sets bit 62 in the global status register */ if (__test_and_clear_bit(62, (unsigned long *)&status)) { handled++; x86_pmu.drain_pebs(regs); } for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { struct perf_event *event = cpuc->events[bit]; handled++; if (!test_bit(bit, cpuc->active_mask)) continue; if (!intel_pmu_save_and_restart(event)) continue; data.period = event->hw.last_period; if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); } /* * Repeat if there is more work to be done: */ status = intel_pmu_get_status(); if (status) goto again; done: intel_pmu_enable_all(0); return handled; }
17
1
expand_dynamic_string_token (struct link_map *l, const char *s) { /* We make two runs over the string. First we determine how large the resulting string is and then we copy it over. Since this is now frequently executed operation we are looking here not for performance but rather for code size. */ size_t cnt; size_t total; char *result; /* Determine the nubmer of DST elements. */ cnt = DL_DST_COUNT (s, 1); /* If we do not have to replace anything simply copy the string. */ if (cnt == 0) return local_strdup (s); /* Determine the length of the substituted string. */ total = DL_DST_REQUIRED (l, s, strlen (s), cnt); /* Allocate the necessary memory. */ result = (char *) malloc (total + 1); if (result == NULL) return NULL; return DL_DST_SUBSTITUTE (l, s, result, 1); }
19
1
static int r3d_read_rdvo(AVFormatContext *s, Atom *atom) { R3DContext *r3d = s->priv_data; AVStream *st = s->streams[0]; int i; r3d->video_offsets_count = (atom->size - 8) / 4; r3d->video_offsets = av_malloc(atom->size); if (!r3d->video_offsets) return AVERROR(ENOMEM); for (i = 0; i < r3d->video_offsets_count; i++) { r3d->video_offsets[i] = avio_rb32(s->pb); if (!r3d->video_offsets[i]) { r3d->video_offsets_count = i; break; } av_dlog(s, "video offset %d: %#x\n", i, r3d->video_offsets[i]); } if (st->r_frame_rate.num) st->duration = av_rescale_q(r3d->video_offsets_count, (AVRational){st->r_frame_rate.den, st->r_frame_rate.num}, st->time_base); av_dlog(s, "duration %"PRId64"\n", st->duration); return 0; }
20
0
static char * default_opaque_literal_tag ( tvbuff_t * tvb , guint32 offset , const char * token _U_ , guint8 codepage _U_ , guint32 * length ) { guint32 data_len = tvb_get_guintvar ( tvb , offset , length ) ; char * str = wmem_strdup_printf ( wmem_packet_scope ( ) , "(%d bytes of opaque data)" , data_len ) ; * length += data_len ; return str ; }
22
0
static int em_syscall(struct x86_emulate_ctxt *ctxt) { struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; /* syscall is not available in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_ud(ctxt); if (!(em_syscall_is_enabled(ctxt))) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_EFER, &efer); setup_syscalls_segments(ctxt, &cs, &ss); if (!(efer & EFER_SCE)) return emulate_ud(ctxt); ops->get_msr(ctxt, MSR_STAR, &msr_data); msr_data >>= 32; cs_sel = (u16)(msr_data & 0xfffc); ss_sel = (u16)(msr_data + 8); if (efer & EFER_LMA) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip; if (efer & EFER_LMA) { #ifdef CONFIG_X86_64 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; ops->get_msr(ctxt, ctxt->mode == X86EMUL_MODE_PROT64 ? MSR_LSTAR : MSR_CSTAR, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); ctxt->eflags &= ~(msr_data | EFLG_RF); #endif } else { /* legacy mode */ ops->get_msr(ctxt, MSR_STAR, &msr_data); ctxt->_eip = (u32)msr_data; ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); } return X86EMUL_CONTINUE; }
23
0
static void auth_server_send_new_request ( struct auth_server_connection * conn , struct auth_client_request * request , const struct auth_request_info * info ) { string_t * str ; str = t_str_new ( 512 ) ; str_printfa ( str , "AUTH\t%u\t" , request -> id ) ; str_append_tabescaped ( str , info -> mech ) ; str_append ( str , "\tservice=" ) ; str_append_tabescaped ( str , info -> service ) ; if ( ( info -> flags & AUTH_REQUEST_FLAG_SUPPORT_FINAL_RESP ) != 0 ) str_append ( str , "\tfinal-resp-ok" ) ; if ( ( info -> flags & AUTH_REQUEST_FLAG_SECURED ) != 0 ) { str_append ( str , "\tsecured" ) ; if ( ( info -> flags & AUTH_REQUEST_FLAG_TRANSPORT_SECURITY_TLS ) != 0 ) str_append ( str , "=tls" ) ; } else { i_assert ( ( info -> flags & AUTH_REQUEST_FLAG_TRANSPORT_SECURITY_TLS ) == 0 ) ; } if ( ( info -> flags & AUTH_REQUEST_FLAG_NO_PENALTY ) != 0 ) str_append ( str , "\tno-penalty" ) ; if ( ( info -> flags & AUTH_REQUEST_FLAG_VALID_CLIENT_CERT ) != 0 ) str_append ( str , "\tvalid-client-cert" ) ; if ( ( info -> flags & AUTH_REQUEST_FLAG_DEBUG ) != 0 ) str_append ( str , "\tdebug" ) ; if ( info -> session_id != NULL ) { str_append ( str , "\tsession=" ) ; str_append_tabescaped ( str , info -> session_id ) ; } if ( info -> cert_username != NULL ) { str_append ( str , "\tcert_username=" ) ; str_append_tabescaped ( str , info -> cert_username ) ; } if ( info -> local_ip . family != 0 ) str_printfa ( str , "\tlip=%s" , net_ip2addr ( & info -> local_ip ) ) ; if ( info -> remote_ip . family != 0 ) str_printfa ( str , "\trip=%s" , net_ip2addr ( & info -> remote_ip ) ) ; if ( info -> local_port != 0 ) str_printfa ( str , "\tlport=%u" , info -> local_port ) ; if ( info -> remote_port != 0 ) str_printfa ( str , "\trport=%u" , info -> remote_port ) ; if ( info -> real_local_ip . family != 0 && ! net_ip_compare ( & info -> real_local_ip , & info -> local_ip ) ) { str_printfa ( str , "\treal_lip=%s" , net_ip2addr ( & info -> real_local_ip ) ) ; } if ( info -> real_remote_ip . family != 0 && ! net_ip_compare ( & info -> real_remote_ip , & info -> remote_ip ) ) { str_printfa ( str , "\treal_rip=%s" , net_ip2addr ( & info -> real_remote_ip ) ) ; } if ( info -> real_local_port != 0 && info -> real_local_port != info -> local_port ) str_printfa ( str , "\treal_lport=%u" , info -> real_local_port ) ; if ( info -> real_remote_port != 0 && info -> real_remote_port != info -> remote_port ) str_printfa ( str , "\treal_rport=%u" , info -> real_remote_port ) ; if ( info -> local_name != NULL && * info -> local_name != '\0' ) { str_append ( str , "\tlocal_name=" ) ; str_append_tabescaped ( str , info -> local_name ) ; } if ( info -> ssl_cipher_bits != 0 && info -> ssl_cipher != NULL ) { str_append ( str , "\tssl_cipher=" ) ; str_append_tabescaped ( str , info -> ssl_cipher ) ; str_printfa ( str , "\tssl_cipher_bits=%u" , info -> ssl_cipher_bits ) ; if ( info -> ssl_pfs != NULL ) { str_append ( str , "\tssl_pfs=" ) ; str_append_tabescaped ( str , info -> ssl_pfs ) ; } } if ( info -> ssl_protocol != NULL ) { str_append ( str , "\tssl_protocol=" ) ; str_append_tabescaped ( str , info -> ssl_protocol ) ; } if ( info -> client_id != NULL && * info -> client_id != '\0' ) { str_append ( str , "\tclient_id=" ) ; str_append_tabescaped ( str , info -> client_id ) ; } if ( info -> forward_fields != NULL && * info -> forward_fields != '\0' ) { str_append ( str , "\tforward_fields=" ) ; str_append_tabescaped ( str , info -> forward_fields ) ; } if ( info -> initial_resp_base64 != NULL ) { str_append ( str , "\tresp=" ) ; str_append_tabescaped ( str , info -> initial_resp_base64 ) ; } str_append_c ( str , '\n' ) ; if ( o_stream_send ( conn -> output , str_data ( str ) , str_len ( str ) ) < 0 ) i_error ( "Error sending request to auth server: %m" ) ; }
24
1
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s) { if (s->ds->dpy_copy) { cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->start_addr, s->cirrus_blt_srcaddr - s->start_addr, s->cirrus_blt_width, s->cirrus_blt_height); } else { (*s->cirrus_rop) (s, s->vram_ptr + s->cirrus_blt_dstaddr, s->vram_ptr + s->cirrus_blt_srcaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); } return 1; }
25
1
static int dds_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { DDSContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; AVFrame *frame = data; int mipmap; int ret; ff_texturedsp_init(&ctx->texdsp); bytestream2_init(gbc, avpkt->data, avpkt->size); if (bytestream2_get_bytes_left(gbc) < 128) { av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", bytestream2_get_bytes_left(gbc)); return AVERROR_INVALIDDATA; } if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') || bytestream2_get_le32(gbc) != 124) { // header size av_log(avctx, AV_LOG_ERROR, "Invalid DDS header.\n"); return AVERROR_INVALIDDATA; } bytestream2_skip(gbc, 4); // flags avctx->height = bytestream2_get_le32(gbc); avctx->width = bytestream2_get_le32(gbc); ret = av_image_check_size(avctx->width, avctx->height, 0, avctx); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n", avctx->width, avctx->height); return ret; } /* Since codec is based on 4x4 blocks, size is aligned to 4. */ avctx->coded_width = FFALIGN(avctx->width, TEXTURE_BLOCK_W); avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H); bytestream2_skip(gbc, 4); // pitch bytestream2_skip(gbc, 4); // depth mipmap = bytestream2_get_le32(gbc); if (mipmap != 0) av_log(avctx, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", mipmap); /* Extract pixel format information, considering additional elements * in reserved1 and reserved2. */ ret = parse_pixel_format(avctx); if (ret < 0) return ret; ret = ff_get_buffer(avctx, frame, 0); if (ret < 0) return ret; if (ctx->compressed) { int size = (avctx->coded_height / TEXTURE_BLOCK_H) * (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio; ctx->slice_count = av_clip(avctx->thread_count, 1, avctx->coded_height / TEXTURE_BLOCK_H); if (bytestream2_get_bytes_left(gbc) < size) { av_log(avctx, AV_LOG_ERROR, "Compressed Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), size); return AVERROR_INVALIDDATA; } /* Use the decompress function on the texture, one block per thread. */ ctx->tex_data = gbc->buffer; avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count); } else if (!ctx->paletted && ctx->bpp == 4 && avctx->pix_fmt == AV_PIX_FMT_PAL8) { uint8_t *dst = frame->data[0]; int x, y, i; /* Use the first 64 bytes as palette, then copy the rest. */ bytestream2_get_buffer(gbc, frame->data[1], 16 * 4); for (i = 0; i < 16; i++) { AV_WN32(frame->data[1] + i*4, (frame->data[1][2+i*4]<<0)+ (frame->data[1][1+i*4]<<8)+ (frame->data[1][0+i*4]<<16)+ (frame->data[1][3+i*4]<<24) ); } frame->palette_has_changed = 1; if (bytestream2_get_bytes_left(gbc) < frame->height * frame->width / 2) { av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * frame->width / 2); return AVERROR_INVALIDDATA; } for (y = 0; y < frame->height; y++) { for (x = 0; x < frame->width; x += 2) { uint8_t val = bytestream2_get_byte(gbc); dst[x ] = val & 0xF; dst[x + 1] = val >> 4; } dst += frame->linesize[0]; } } else { int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0); if (ctx->paletted) { int i; /* Use the first 1024 bytes as palette, then copy the rest. */ bytestream2_get_buffer(gbc, frame->data[1], 256 * 4); for (i = 0; i < 256; i++) AV_WN32(frame->data[1] + i*4, (frame->data[1][2+i*4]<<0)+ (frame->data[1][1+i*4]<<8)+ (frame->data[1][0+i*4]<<16)+ (frame->data[1][3+i*4]<<24) ); frame->palette_has_changed = 1; } if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) { av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n", bytestream2_get_bytes_left(gbc), frame->height * linesize); return AVERROR_INVALIDDATA; } av_image_copy_plane(frame->data[0], frame->linesize[0], gbc->buffer, linesize, linesize, frame->height); } /* Run any post processing here if needed. */ if (ctx->postproc != DDS_NONE) run_postproc(avctx, frame); /* Frame is ready to be output. */ frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; *got_frame = 1; return avpkt->size; }
26
1
static void cirrus_mem_writeb_mode4and5_8bpp(CirrusVGAState * s, unsigned mode, unsigned offset, uint32_t mem_value) { int x; unsigned val = mem_value; uint8_t *dst; dst = s->vram_ptr + offset; for (x = 0; x < 8; x++) { if (val & 0x80) { *dst = s->cirrus_shadow_gr1; } else if (mode == 5) { *dst = s->cirrus_shadow_gr0; } val <<= 1; dst++; } cpu_physical_memory_set_dirty(s->vram_offset + offset); cpu_physical_memory_set_dirty(s->vram_offset + offset + 7); }
27
1
vsyslog(pri, fmt, ap) int pri; register const char *fmt; va_list ap; { struct tm now_tm; time_t now; int fd; FILE *f; char *buf = 0; size_t bufsize = 0; size_t prioff, msgoff; struct sigaction action, oldaction; struct sigaction *oldaction_ptr = NULL; int sigpipe; int saved_errno = errno; #define INTERNALLOG LOG_ERR|LOG_CONS|LOG_PERROR|LOG_PID /* Check for invalid bits. */ if (pri & ~(LOG_PRIMASK|LOG_FACMASK)) { syslog(INTERNALLOG, "syslog: unknown facility/priority: %x", pri); pri &= LOG_PRIMASK|LOG_FACMASK; } /* Check priority against setlogmask values. */ if ((LOG_MASK (LOG_PRI (pri)) & LogMask) == 0) return; /* Set default facility if none specified. */ if ((pri & LOG_FACMASK) == 0) pri |= LogFacility; /* Build the message in a memory-buffer stream. */ f = open_memstream (&buf, &bufsize); prioff = fprintf (f, "<%d>", pri); (void) time (&now); #ifdef USE_IN_LIBIO f->_IO_write_ptr += strftime (f->_IO_write_ptr, f->_IO_write_end - f->_IO_write_ptr, "%h %e %T ", __localtime_r (&now, &now_tm)); #else f->__bufp += strftime (f->__bufp, f->__put_limit - f->__bufp, "%h %e %T ", __localtime_r (&now, &now_tm)); #endif msgoff = ftell (f); if (LogTag == NULL) LogTag = __progname; if (LogTag != NULL) fputs_unlocked (LogTag, f); if (LogStat & LOG_PID) fprintf (f, "[%d]", __getpid ()); if (LogTag != NULL) putc_unlocked (':', f), putc_unlocked (' ', f); /* Restore errno for %m format. */ __set_errno (saved_errno); /* We have the header. Print the user's format into the buffer. */ vfprintf (f, fmt, ap); /* Close the memory stream; this will finalize the data into a malloc'd buffer in BUF. */ fclose (f); /* Output to stderr if requested. */ if (LogStat & LOG_PERROR) { struct iovec iov[2]; register struct iovec *v = iov; v->iov_base = buf + msgoff; v->iov_len = bufsize - msgoff; ++v; v->iov_base = (char *) "\n"; v->iov_len = 1; (void)__writev(STDERR_FILENO, iov, 2); } /* Prepare for multiple users. We have to take care: open and write are cancellation points. */ __libc_cleanup_region_start ((void (*) (void *)) cancel_handler, &oldaction_ptr); __libc_lock_lock (syslog_lock); /* Prepare for a broken connection. */ memset (&action, 0, sizeof (action)); action.sa_handler = sigpipe_handler; sigemptyset (&action.sa_mask); sigpipe = __sigaction (SIGPIPE, &action, &oldaction); if (sigpipe == 0) oldaction_ptr = &oldaction; /* Get connected, output the message to the local logger. */ if (!connected) openlog_internal(LogTag, LogStat | LOG_NDELAY, 0); /* If we have a SOCK_STREAM connection, also send ASCII NUL as a record terminator. */ if (LogType == SOCK_STREAM) ++bufsize; if (!connected || __send(LogFile, buf, bufsize, 0) < 0) { closelog_internal (); /* attempt re-open next time */ /* * Output the message to the console; don't worry about blocking, * if console blocks everything will. Make sure the error reported * is the one from the syslogd failure. */ if (LogStat & LOG_CONS && (fd = __open(_PATH_CONSOLE, O_WRONLY|O_NOCTTY, 0)) >= 0) { dprintf (fd, "%s\r\n", buf + msgoff); (void)__close(fd); } } if (sigpipe == 0) __sigaction (SIGPIPE, &oldaction, (struct sigaction *) NULL); /* End of critical section. */ __libc_cleanup_region_end (0); __libc_lock_unlock (syslog_lock); free (buf); }
30
1
static void check_lowpass_line(int depth){ LOCAL_ALIGNED_32(uint8_t, src, [SRC_SIZE]); LOCAL_ALIGNED_32(uint8_t, dst_ref, [WIDTH_PADDED]); LOCAL_ALIGNED_32(uint8_t, dst_new, [WIDTH_PADDED]); int w = WIDTH; int mref = WIDTH_PADDED * -1; int pref = WIDTH_PADDED; int i, depth_byte; InterlaceContext s; declare_func(void, uint8_t *dstp, ptrdiff_t linesize, const uint8_t *srcp, ptrdiff_t mref, ptrdiff_t pref, int clip_max); s.lowpass = 1; s.lowpass = VLPF_LIN; depth_byte = depth >> 3; w /= depth_byte; memset(src, 0, SRC_SIZE); memset(dst_ref, 0, WIDTH_PADDED); memset(dst_new, 0, WIDTH_PADDED); randomize_buffers(src, SRC_SIZE); ff_interlace_init(&s, depth); if (check_func(s.lowpass_line, "lowpass_line_%d", depth)) { for (i = 0; i < 32; i++) { /* simulate crop */ call_ref(dst_ref, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0); call_new(dst_new, w, src + WIDTH_PADDED, mref - i*depth_byte, pref, 0); if (memcmp(dst_ref, dst_new, WIDTH - i)) fail(); } bench_new(dst_new, w, src + WIDTH_PADDED, mref, pref, 0); } }
31
1
static int cirrus_bitblt_common_patterncopy(CirrusVGAState * s, const uint8_t * src) { uint8_t *dst; dst = s->vram_ptr + s->cirrus_blt_dstaddr; (*s->cirrus_rop) (s, dst, src, s->cirrus_blt_dstpitch, 0, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); return 1; }
32
0
int kvm_arch_process_async_events ( CPUState * cs ) { X86CPU * cpu = X86_CPU ( cs ) ; CPUX86State * env = & cpu -> env ; if ( cs -> interrupt_request & CPU_INTERRUPT_MCE ) { assert ( env -> mcg_cap ) ; cs -> interrupt_request &= ~ CPU_INTERRUPT_MCE ; kvm_cpu_synchronize_state ( cs ) ; if ( env -> exception_injected == EXCP08_DBLE ) { qemu_system_reset_request ( SHUTDOWN_CAUSE_GUEST_RESET ) ; cs -> exit_request = 1 ; return 0 ; } env -> exception_injected = EXCP12_MCHK ; env -> has_error_code = 0 ; cs -> halted = 0 ; if ( kvm_irqchip_in_kernel ( ) && env -> mp_state == KVM_MP_STATE_HALTED ) { env -> mp_state = KVM_MP_STATE_RUNNABLE ; } } if ( ( cs -> interrupt_request & CPU_INTERRUPT_INIT ) && ! ( env -> hflags & HF_SMM_MASK ) ) { kvm_cpu_synchronize_state ( cs ) ; do_cpu_init ( cpu ) ; } if ( kvm_irqchip_in_kernel ( ) ) { return 0 ; } if ( cs -> interrupt_request & CPU_INTERRUPT_POLL ) { cs -> interrupt_request &= ~ CPU_INTERRUPT_POLL ; apic_poll_irq ( cpu -> apic_state ) ; } if ( ( ( cs -> interrupt_request & CPU_INTERRUPT_HARD ) && ( env -> eflags & IF_MASK ) ) || ( cs -> interrupt_request & CPU_INTERRUPT_NMI ) ) { cs -> halted = 0 ; } if ( cs -> interrupt_request & CPU_INTERRUPT_SIPI ) { kvm_cpu_synchronize_state ( cs ) ; do_cpu_sipi ( cpu ) ; } if ( cs -> interrupt_request & CPU_INTERRUPT_TPR ) { cs -> interrupt_request &= ~ CPU_INTERRUPT_TPR ; kvm_cpu_synchronize_state ( cs ) ; apic_handle_tpr_access_report ( cpu -> apic_state , env -> eip , env -> tpr_access_type ) ; } return cs -> halted ; }
33
1
static void test_init(TestData *d) { QPCIBus *bus; QTestState *qs; char *s; s = g_strdup_printf("-machine q35 %s %s", d->noreboot ? "" : "-global ICH9-LPC.noreboot=false", !d->args ? "" : d->args); qs = qtest_start(s); qtest_irq_intercept_in(qs, "ioapic"); g_free(s); bus = qpci_init_pc(NULL); d->dev = qpci_device_find(bus, QPCI_DEVFN(0x1f, 0x00)); g_assert(d->dev != NULL); qpci_device_enable(d->dev); /* set ACPI PM I/O space base address */ qpci_config_writel(d->dev, ICH9_LPC_PMBASE, PM_IO_BASE_ADDR | 0x1); /* enable ACPI I/O */ qpci_config_writeb(d->dev, ICH9_LPC_ACPI_CTRL, 0x80); /* set Root Complex BAR */ qpci_config_writel(d->dev, ICH9_LPC_RCBA, RCBA_BASE_ADDR | 0x1); d->tco_io_base = qpci_legacy_iomap(d->dev, PM_IO_BASE_ADDR + 0x60); }
34
1
static void cirrus_invalidate_region(CirrusVGAState * s, int off_begin, int off_pitch, int bytesperline, int lines) { int y; int off_cur; int off_cur_end; for (y = 0; y < lines; y++) { off_cur = off_begin; off_cur_end = off_cur + bytesperline; off_cur &= TARGET_PAGE_MASK; while (off_cur < off_cur_end) { cpu_physical_memory_set_dirty(s->vram_offset + off_cur); off_cur += TARGET_PAGE_SIZE; } off_begin += off_pitch; } }
35
1
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: /* FS override */ case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; case InstrDual: if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.idual->mod3; else opcode = opcode.u.idual->mod012; break; case ModeDual: if (ctxt->mode == X86EMUL_MODE_PROT64) opcode = opcode.u.mdual->mode64; else opcode = opcode.u.mdual->mode32; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch| No16))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64) { if (ctxt->op_bytes == 4 && (ctxt->d & Stack)) ctxt->op_bytes = 8; else if (ctxt->d & NearBranch) ctxt->op_bytes = 8; } if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if ((ctxt->d & No16) && ctxt->op_bytes == 2) ctxt->op_bytes = 4; if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea = address_mask(ctxt, ctxt->memopp->addr.mem.ea + ctxt->_eip); done: return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; }
36
0
static void test_simplesignal ( void ) { struct event ev ; struct itimerval itv ; setup_test ( "Simple signal: " ) ; signal_set ( & ev , SIGALRM , signal_cb , & ev ) ; signal_add ( & ev , NULL ) ; signal_del ( & ev ) ; signal_add ( & ev , NULL ) ; memset ( & itv , 0 , sizeof ( itv ) ) ; itv . it_value . tv_sec = 1 ; if ( setitimer ( ITIMER_REAL , & itv , NULL ) == - 1 ) goto skip_simplesignal ; event_dispatch ( ) ; skip_simplesignal : if ( signal_del ( & ev ) == - 1 ) test_ok = 0 ; cleanup_test ( ) ; }
37
0
int TSHttpTxnIsWebsocket ( TSHttpTxn txnp ) { sdk_assert ( sdk_sanity_check_txn ( txnp ) == TS_SUCCESS ) ; HttpSM * sm = ( HttpSM * ) txnp ; return sm -> t_state . is_websocket ; }
39
1
init_syntax_once () { register int c; static int done; if (done) return; bzero (re_syntax_table, sizeof re_syntax_table); for (c = 'a'; c <= 'z'; c++) re_syntax_table[c] = Sword; for (c = 'A'; c <= 'Z'; c++) re_syntax_table[c] = Sword; for (c = '0'; c <= '9'; c++) re_syntax_table[c] = Sword; re_syntax_table['_'] = Sword; done = 1; }
40
1
static void nbd_refresh_filename(BlockDriverState *bs, QDict *options) { BDRVNBDState *s = bs->opaque; QDict *opts = qdict_new(); QObject *saddr_qdict; Visitor *ov; const char *host = NULL, *port = NULL, *path = NULL; if (s->saddr->type == SOCKET_ADDRESS_KIND_INET) { const InetSocketAddress *inet = s->saddr->u.inet.data; if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) { host = inet->host; port = inet->port; } } else if (s->saddr->type == SOCKET_ADDRESS_KIND_UNIX) { path = s->saddr->u.q_unix.data->path; } qdict_put(opts, "driver", qstring_from_str("nbd")); if (path && s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd+unix:///%s?socket=%s", s->export, path); } else if (path && !s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd+unix://?socket=%s", path); } else if (host && s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd://%s:%s/%s", host, port, s->export); } else if (host && !s->export) { snprintf(bs->exact_filename, sizeof(bs->exact_filename), "nbd://%s:%s", host, port); } ov = qobject_output_visitor_new(&saddr_qdict); visit_type_SocketAddress(ov, NULL, &s->saddr, &error_abort); visit_complete(ov, &saddr_qdict); assert(qobject_type(saddr_qdict) == QTYPE_QDICT); qdict_put_obj(opts, "server", saddr_qdict); if (s->export) { qdict_put(opts, "export", qstring_from_str(s->export)); } if (s->tlscredsid) { qdict_put(opts, "tls-creds", qstring_from_str(s->tlscredsid)); } qdict_flatten(opts); bs->full_open_options = opts; }
41
1
static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel, old_sel; struct desc_struct old_desc, new_desc; const struct x86_emulate_ops *ops = ctxt->ops; u8 cpl = ctxt->ops->cpl(ctxt); /* Assignment of RIP may only fail in 64-bit mode */ if (ctxt->mode == X86EMUL_MODE_PROT64) ops->get_segment(ctxt, &old_sel, &old_desc, NULL, VCPU_SREG_CS); memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, X86_TRANSFER_CALL_JMP, &new_desc); if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); if (rc != X86EMUL_CONTINUE) { WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); /* assigning eip failed; restore the old cs */ ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); return rc; } return rc; }
43
1
int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan) { const char *ifname; ifname = qemu_opt_get(opts, "ifname"); if (!ifname) { error_report("tap: no interface name"); return -1; } if (tap_win32_init(vlan, "tap", name, ifname) == -1) { return -1; } return 0; }
44
1
static int cirrus_bitblt_solidfill(CirrusVGAState *s, int blt_rop) { cirrus_fill_t rop_func; rop_func = cirrus_fill[rop_to_index[blt_rop]][s->cirrus_blt_pixelwidth - 1]; rop_func(s, s->vram_ptr + s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_bitblt_reset(s); return 1; }
45
0
static inline void decode2x2 ( GetBitContext * gb , uint8_t * dst , int linesize ) { int i , j , v [ 2 ] ; switch ( get_bits ( gb , 2 ) ) { case 1 : v [ 0 ] = get_bits ( gb , 8 ) ; for ( j = 0 ; j < 2 ; j ++ ) memset ( dst + j * linesize , v [ 0 ] , 2 ) ; break ; case 2 : v [ 0 ] = get_bits ( gb , 8 ) ; v [ 1 ] = get_bits ( gb , 8 ) ; for ( j = 0 ; j < 2 ; j ++ ) for ( i = 0 ; i < 2 ; i ++ ) dst [ j * linesize + i ] = v [ get_bits1 ( gb ) ] ; break ; case 3 : for ( j = 0 ; j < 2 ; j ++ ) for ( i = 0 ; i < 2 ; i ++ ) dst [ j * linesize + i ] = get_bits ( gb , 8 ) ; } }
47
1
static void cirrus_bitblt_cputovideo_next(CirrusVGAState * s) { int copy_count; uint8_t *end_ptr; if (s->cirrus_srccounter > 0) { if (s->cirrus_blt_mode & CIRRUS_BLTMODE_PATTERNCOPY) { cirrus_bitblt_common_patterncopy(s, s->cirrus_bltbuf); the_end: s->cirrus_srccounter = 0; cirrus_bitblt_reset(s); } else { /* at least one scan line */ do { (*s->cirrus_rop)(s, s->vram_ptr + s->cirrus_blt_dstaddr, s->cirrus_bltbuf, 0, 0, s->cirrus_blt_width, 1); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, 0, s->cirrus_blt_width, 1); s->cirrus_blt_dstaddr += s->cirrus_blt_dstpitch; s->cirrus_srccounter -= s->cirrus_blt_srcpitch; if (s->cirrus_srccounter <= 0) goto the_end; /* more bytes than needed can be transfered because of word alignment, so we keep them for the next line */ /* XXX: keep alignment to speed up transfer */ end_ptr = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; copy_count = s->cirrus_srcptr_end - end_ptr; memmove(s->cirrus_bltbuf, end_ptr, copy_count); s->cirrus_srcptr = s->cirrus_bltbuf + copy_count; s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; } while (s->cirrus_srcptr >= s->cirrus_srcptr_end); } } }
48
1
getlogin_r (name, name_len) char *name; size_t name_len; { char tty_pathname[2 + 2 * NAME_MAX]; char *real_tty_path = tty_pathname; int result = 0; struct utmp *ut, line, buffer; { int d = __open ("/dev/tty", 0); if (d < 0) return errno; result = __ttyname_r (d, real_tty_path, sizeof (tty_pathname)); (void) __close (d); if (result != 0) { __set_errno (result); return result; } } real_tty_path += 5; /* Remove "/dev/". */ __setutent (); strncpy (line.ut_line, real_tty_path, sizeof line.ut_line); if (__getutline_r (&line, &buffer, &ut) < 0) { if (errno == ESRCH) /* The caller expects ENOENT if nothing is found. */ result = ENOENT; else result = errno; } else { size_t needed = strlen (ut->ut_line) + 1; if (needed < name_len) { __set_errno (ERANGE); result = ERANGE; } else { memcpy (name, ut->ut_line, needed); result = 0; } } __endutent (); return result; }
50
1
static void cirrus_mem_writeb_mode4and5_16bpp(CirrusVGAState * s, unsigned mode, unsigned offset, uint32_t mem_value) { int x; unsigned val = mem_value; uint8_t *dst; dst = s->vram_ptr + offset; for (x = 0; x < 8; x++) { if (val & 0x80) { *dst = s->cirrus_shadow_gr1; *(dst + 1) = s->gr[0x11]; } else if (mode == 5) { *dst = s->cirrus_shadow_gr0; *(dst + 1) = s->gr[0x10]; } val <<= 1; dst += 2; } cpu_physical_memory_set_dirty(s->vram_offset + offset); cpu_physical_memory_set_dirty(s->vram_offset + offset + 15); }
51
1
static void vp6_parse_coeff_huffman(VP56Context *s) { VP56Model *model = s->modelp; uint8_t *permute = s->scantable.permutated; VLC *vlc_coeff; int coeff, sign, coeff_idx; int b, cg, idx; int pt = 0; /* plane type (0 for Y, 1 for U or V) */ for (b=0; b<6; b++) { int ct = 0; /* code type */ if (b > 3) pt = 1; vlc_coeff = &s->dccv_vlc[pt]; for (coeff_idx=0; coeff_idx<64; ) { int run = 1; if (coeff_idx<2 && s->nb_null[coeff_idx][pt]) { s->nb_null[coeff_idx][pt]--; if (coeff_idx) break; } else { if (get_bits_count(&s->gb) >= s->gb.size_in_bits) return; coeff = get_vlc2(&s->gb, vlc_coeff->table, 9, 3); if (coeff == 0) { if (coeff_idx) { int pt = (coeff_idx >= 6); run += get_vlc2(&s->gb, s->runv_vlc[pt].table, 9, 3); if (run >= 9) run += get_bits(&s->gb, 6); } else s->nb_null[0][pt] = vp6_get_nb_null(s); ct = 0; } else if (coeff == 11) { /* end of block */ if (coeff_idx == 1) /* first AC coeff ? */ s->nb_null[1][pt] = vp6_get_nb_null(s); break; } else { int coeff2 = vp56_coeff_bias[coeff]; if (coeff > 4) coeff2 += get_bits(&s->gb, coeff <= 9 ? coeff - 4 : 11); ct = 1 + (coeff2 > 1); sign = get_bits1(&s->gb); coeff2 = (coeff2 ^ -sign) + sign; if (coeff_idx) coeff2 *= s->dequant_ac; idx = model->coeff_index_to_pos[coeff_idx]; s->block_coeff[b][permute[idx]] = coeff2; } } coeff_idx+=run; cg = FFMIN(vp6_coeff_groups[coeff_idx], 3); vlc_coeff = &s->ract_vlc[pt][ct][cg]; } } }
52
0
int PEM_write_bio_ ## name ( BIO * bp , type * x ) ; # define DECLARE_PEM_write_bio_const ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , const type * x ) ; # define DECLARE_PEM_write_cb_bio ( name , type ) int PEM_write_bio_ ## name ( BIO * bp , type * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ; # define DECLARE_PEM_write ( name , type ) DECLARE_PEM_write_bio ( name , type ) DECLARE_PEM_write_fp ( name , type ) # define DECLARE_PEM_write_const ( name , type ) DECLARE_PEM_write_bio_const ( name , type ) DECLARE_PEM_write_fp_const ( name , type ) # define DECLARE_PEM_write_cb ( name , type ) DECLARE_PEM_write_cb_bio ( name , type ) DECLARE_PEM_write_cb_fp ( name , type ) # define DECLARE_PEM_read ( name , type ) DECLARE_PEM_read_bio ( name , type ) DECLARE_PEM_read_fp ( name , type ) # define DECLARE_PEM_rw ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write ( name , type ) # define DECLARE_PEM_rw_const ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_const ( name , type ) # define DECLARE_PEM_rw_cb ( name , type ) DECLARE_PEM_read ( name , type ) DECLARE_PEM_write_cb ( name , type ) typedef int pem_password_cb ( char * buf , int size , int rwflag , void * userdata ) ; int PEM_get_EVP_CIPHER_INFO ( char * header , EVP_CIPHER_INFO * cipher ) ; int PEM_do_header ( EVP_CIPHER_INFO * cipher , unsigned char * data , long * len , pem_password_cb * callback , void * u ) ; int PEM_read_bio ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len ) ; # define PEM_FLAG_SECURE 0x1 # define PEM_FLAG_EAY_COMPATIBLE 0x2 # define PEM_FLAG_ONLY_B64 0x4 int PEM_read_bio_ex ( BIO * bp , char * * name , char * * header , unsigned char * * data , long * len , unsigned int flags ) ; int PEM_bytes_read_bio_secmem ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ; int PEM_write_bio ( BIO * bp , const char * name , const char * hdr , const unsigned char * data , long len ) ; int PEM_bytes_read_bio ( unsigned char * * pdata , long * plen , char * * pnm , const char * name , BIO * bp , pem_password_cb * cb , void * u ) ; void * PEM_ASN1_read_bio ( d2i_of_void * d2i , const char * name , BIO * bp , void * * x , pem_password_cb * cb , void * u ) ; int PEM_ASN1_write_bio ( i2d_of_void * i2d , const char * name , BIO * bp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cb , void * u ) ; STACK_OF ( X509_INFO ) * PEM_X509_INFO_read_bio ( BIO * bp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ; int PEM_X509_INFO_write_bio ( BIO * bp , X509_INFO * xi , EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * cd , void * u ) ; # ifndef OPENSSL_NO_STDIO int PEM_read ( FILE * fp , char * * name , char * * header , unsigned char * * data , long * len ) ; int PEM_write ( FILE * fp , const char * name , const char * hdr , const unsigned char * data , long len ) ; void * PEM_ASN1_read ( d2i_of_void * d2i , const char * name , FILE * fp , void * * x , pem_password_cb * cb , void * u ) ; int PEM_ASN1_write ( i2d_of_void * i2d , const char * name , FILE * fp , void * x , const EVP_CIPHER * enc , unsigned char * kstr , int klen , pem_password_cb * callback , void * u ) ; STACK_OF ( X509_INFO ) * PEM_X509_INFO_read ( FILE * fp , STACK_OF ( X509_INFO ) * sk , pem_password_cb * cb , void * u ) ; # endif int PEM_SignInit ( EVP_MD_CTX * ctx , EVP_MD * type ) ; int PEM_SignUpdate ( EVP_MD_CTX * ctx , unsigned char * d , unsigned int cnt ) ; int PEM_SignFinal ( EVP_MD_CTX * ctx , unsigned char * sigret , unsigned int * siglen , EVP_PKEY * pkey ) ; int PEM_def_callback ( char * buf , int num , int rwflag , void * userdata ) ; void PEM_proc_type ( char * buf , int type ) ; void PEM_dek_info ( char * buf , const char * type , int len , char * str ) ; # include < openssl / symhacks . h > DECLARE_PEM_rw ( X509 , X509 ) DECLARE_PEM_rw ( X509_AUX , X509 ) DECLARE_PEM_rw ( X509_REQ , X509_REQ ) DECLARE_PEM_write ( X509_REQ_NEW , X509_REQ ) DECLARE_PEM_rw ( X509_CRL , X509_CRL ) DECLARE_PEM_rw ( PKCS7 , PKCS7 ) DECLARE_PEM_rw ( NETSCAPE_CERT_SEQUENCE , NETSCAPE_CERT_SEQUENCE ) DECLARE_PEM_rw ( PKCS8 , X509_SIG ) DECLARE_PEM_rw ( PKCS8_PRIV_KEY_INFO , PKCS8_PRIV_KEY_INFO ) # ifndef OPENSSL_NO_RSA DECLARE_PEM_rw_cb ( RSAPrivateKey , RSA ) DECLARE_PEM_rw_const ( RSAPublicKey , RSA ) DECLARE_PEM_rw ( RSA_PUBKEY , RSA ) # endif # ifndef OPENSSL_NO_DSA DECLARE_PEM_rw_cb ( DSAPrivateKey , DSA ) DECLARE_PEM_rw ( DSA_PUBKEY , DSA ) DECLARE_PEM_rw_const ( DSAparams , DSA ) # endif # ifndef OPENSSL_NO_EC DECLARE_PEM_rw_const ( ECPKParameters , EC_GROUP ) DECLARE_PEM_rw_cb ( ECPrivateKey , EC_KEY ) DECLARE_PEM_rw ( EC_PUBKEY , EC_KEY ) # endif # ifndef OPENSSL_NO_DH DECLARE_PEM_rw_const ( DHparams , DH ) DECLARE_PEM_write_const ( DHxparams , DH ) # endif DECLARE_PEM_rw_cb ( PrivateKey , EVP_PKEY ) DECLARE_PEM_rw ( PUBKEY , EVP_PKEY )
54
1
static int em_fxrstor(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; int rc; rc = check_fxsr(ctxt); if (rc != X86EMUL_CONTINUE) return rc; rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512); if (rc != X86EMUL_CONTINUE) return rc; if (fx_state.mxcsr >> 16) return emulate_gp(ctxt, 0); ctxt->ops->get_fpu(ctxt); if (ctxt->mode < X86EMUL_MODE_PROT64) rc = fxrstor_fixup(ctxt, &fx_state); if (rc == X86EMUL_CONTINUE) rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); ctxt->ops->put_fpu(ctxt); return rc; }
55
1
check_1_6_dummy(kadm5_principal_ent_t entry, long mask, int n_ks_tuple, krb5_key_salt_tuple *ks_tuple, char **passptr) { int i; char *password = *passptr; /* Old-style randkey operations disallowed tickets to start. */ if (!(mask & KADM5_ATTRIBUTES) || !(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX)) return; /* The 1.6 dummy password was the octets 1..255. */ for (i = 0; (unsigned char) password[i] == i + 1; i++); if (password[i] != '\0' || i != 255) return; /* This will make the caller use a random password instead. */ *passptr = NULL; }
56
1
static int cirrus_bitblt_videotovideo_copy(CirrusVGAState * s) { if (s->ds->dpy_copy) { cirrus_do_copy(s, s->cirrus_blt_dstaddr - s->start_addr, s->cirrus_blt_srcaddr - s->start_addr, s->cirrus_blt_width, s->cirrus_blt_height); } else { if (BLTUNSAFE(s)) return 0; (*s->cirrus_rop) (s, s->vram_ptr + (s->cirrus_blt_dstaddr & s->cirrus_addr_mask), s->vram_ptr + (s->cirrus_blt_srcaddr & s->cirrus_addr_mask), s->cirrus_blt_dstpitch, s->cirrus_blt_srcpitch, s->cirrus_blt_width, s->cirrus_blt_height); cirrus_invalidate_region(s, s->cirrus_blt_dstaddr, s->cirrus_blt_dstpitch, s->cirrus_blt_width, s->cirrus_blt_height); } return 1; }
57
0
static void test_list_fields ( ) { MYSQL_RES * result ; int rc ; myheader ( "test_list_fields" ) ; rc = mysql_query ( mysql , "drop table if exists t1" ) ; myquery ( rc ) ; rc = mysql_query ( mysql , "create table t1(c1 int primary key auto_increment, c2 char(10) default 'mysql')" ) ; myquery ( rc ) ; result = mysql_list_fields ( mysql , "t1" , NULL ) ; mytest ( result ) ; rc = my_process_result_set ( result ) ; DIE_UNLESS ( rc == 0 ) ; verify_prepare_field ( result , 0 , "c1" , "c1" , MYSQL_TYPE_LONG , "t1" , "t1" , current_db , 11 , "0" ) ; verify_prepare_field ( result , 1 , "c2" , "c2" , MYSQL_TYPE_STRING , "t1" , "t1" , current_db , 10 , "mysql" ) ; mysql_free_result ( result ) ; myquery ( mysql_query ( mysql , "drop table t1" ) ) ; }
58
1
static int vncws_start_tls_handshake(VncState *vs) { int ret = gnutls_handshake(vs->tls.session); if (ret < 0) { if (!gnutls_error_is_fatal(ret)) { VNC_DEBUG("Handshake interrupted (blocking)\n"); if (!gnutls_record_get_direction(vs->tls.session)) { qemu_set_fd_handler(vs->csock, vncws_tls_handshake_io, NULL, vs); } else { qemu_set_fd_handler(vs->csock, NULL, vncws_tls_handshake_io, vs); } return 0; } VNC_DEBUG("Handshake failed %s\n", gnutls_strerror(ret)); vnc_client_error(vs); return -1; } if (vs->vd->tls.x509verify) { if (vnc_tls_validate_certificate(vs) < 0) { VNC_DEBUG("Client verification failed\n"); vnc_client_error(vs); return -1; } else { VNC_DEBUG("Client verification passed\n"); } } VNC_DEBUG("Handshake done, switching to TLS data mode\n"); qemu_set_fd_handler(vs->csock, vncws_handshake_read, NULL, vs); return 0; }
59
0
static int matroska_probe ( AVProbeData * p ) { uint64_t total = 0 ; int len_mask = 0x80 , size = 1 , n = 1 , i ; if ( AV_RB32 ( p -> buf ) != EBML_ID_HEADER ) return 0 ; total = p -> buf [ 4 ] ; while ( size <= 8 && ! ( total & len_mask ) ) { size ++ ; len_mask >>= 1 ; } if ( size > 8 ) return 0 ; total &= ( len_mask - 1 ) ; while ( n < size ) total = ( total << 8 ) | p -> buf [ 4 + n ++ ] ; if ( p -> buf_size < 4 + size + total ) return 0 ; for ( i = 0 ; i < FF_ARRAY_ELEMS ( matroska_doctypes ) ; i ++ ) { int probelen = strlen ( matroska_doctypes [ i ] ) ; if ( total < probelen ) continue ; for ( n = 4 + size ; n <= 4 + size + total - probelen ; n ++ ) if ( ! memcmp ( p -> buf + n , matroska_doctypes [ i ] , probelen ) ) return AVPROBE_SCORE_MAX ; } return AVPROBE_SCORE_EXTENSION ; }
60
0
check_1_6_dummy(kadm5_principal_ent_t entry, long mask, int n_ks_tuple, krb5_key_salt_tuple *ks_tuple, char **passptr) { int i; char *password = *passptr; /* Old-style randkey operations disallowed tickets to start. */ if (password == NULL || !(mask & KADM5_ATTRIBUTES) || !(entry->attributes & KRB5_KDB_DISALLOW_ALL_TIX)) return; /* The 1.6 dummy password was the octets 1..255. */ for (i = 0; (unsigned char) password[i] == i + 1; i++); if (password[i] != '\0' || i != 255) return; /* This will make the caller use a random password instead. */ *passptr = NULL; }
62
1
asmlinkage long compat_sys_mount(char __user * dev_name, char __user * dir_name, char __user * type, unsigned long flags, void __user * data) { unsigned long type_page; unsigned long data_page; unsigned long dev_page; char *dir_page; int retval; retval = copy_mount_options (type, &type_page); if (retval < 0) goto out; dir_page = getname(dir_name); retval = PTR_ERR(dir_page); if (IS_ERR(dir_page)) goto out1; retval = copy_mount_options (dev_name, &dev_page); if (retval < 0) goto out2; retval = copy_mount_options (data, &data_page); if (retval < 0) goto out3; retval = -EINVAL; if (type_page) { if (!strcmp((char *)type_page, SMBFS_NAME)) { do_smb_super_data_conv((void *)data_page); } else if (!strcmp((char *)type_page, NCPFS_NAME)) { do_ncp_super_data_conv((void *)data_page); } else if (!strcmp((char *)type_page, NFS4_NAME)) { if (do_nfs4_super_data_conv((void *) data_page)) goto out4; } } lock_kernel(); retval = do_mount((char*)dev_page, dir_page, (char*)type_page, flags, (void*)data_page); unlock_kernel(); out4: free_page(data_page); out3: free_page(dev_page); out2: putname(dir_page); out1: free_page(type_page); out: return retval; }
63
0
process_chpw_request(krb5_context context, void *server_handle, char *realm, krb5_keytab keytab, const krb5_fulladdr *local_faddr, const krb5_fulladdr *remote_faddr, krb5_data *req, krb5_data *rep) { krb5_error_code ret; char *ptr; unsigned int plen, vno; krb5_data ap_req, ap_rep = empty_data(); krb5_data cipher = empty_data(), clear = empty_data(); krb5_auth_context auth_context = NULL; krb5_principal changepw = NULL; krb5_principal client, target = NULL; krb5_ticket *ticket = NULL; krb5_replay_data replay; krb5_error krberror; int numresult; char strresult[1024]; char *clientstr = NULL, *targetstr = NULL; const char *errmsg = NULL; size_t clen; char *cdots; struct sockaddr_storage ss; socklen_t salen; char addrbuf[100]; krb5_address *addr = remote_faddr->address; *rep = empty_data(); if (req->length < 4) { /* either this, or the server is printing bad messages, or the caller passed in garbage */ ret = KRB5KRB_AP_ERR_MODIFIED; numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Request was truncated", sizeof(strresult)); goto bailout; } ptr = req->data; /* verify length */ plen = (*ptr++ & 0xff); plen = (plen<<8) | (*ptr++ & 0xff); if (plen != req->length) { ret = KRB5KRB_AP_ERR_MODIFIED; numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Request length was inconsistent", sizeof(strresult)); goto bailout; } /* verify version number */ vno = (*ptr++ & 0xff) ; vno = (vno<<8) | (*ptr++ & 0xff); if (vno != 1 && vno != RFC3244_VERSION) { ret = KRB5KDC_ERR_BAD_PVNO; numresult = KRB5_KPASSWD_BAD_VERSION; snprintf(strresult, sizeof(strresult), "Request contained unknown protocol version number %d", vno); goto bailout; } /* read, check ap-req length */ ap_req.length = (*ptr++ & 0xff); ap_req.length = (ap_req.length<<8) | (*ptr++ & 0xff); if (ptr + ap_req.length >= req->data + req->length) { ret = KRB5KRB_AP_ERR_MODIFIED; numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Request was truncated in AP-REQ", sizeof(strresult)); goto bailout; } /* verify ap_req */ ap_req.data = ptr; ptr += ap_req.length; ret = krb5_auth_con_init(context, &auth_context); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed initializing auth context", sizeof(strresult)); goto chpwfail; } ret = krb5_auth_con_setflags(context, auth_context, KRB5_AUTH_CONTEXT_DO_SEQUENCE); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed initializing auth context", sizeof(strresult)); goto chpwfail; } ret = krb5_build_principal(context, &changepw, strlen(realm), realm, "kadmin", "changepw", NULL); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed building kadmin/changepw principal", sizeof(strresult)); goto chpwfail; } ret = krb5_rd_req(context, &auth_context, &ap_req, changepw, keytab, NULL, &ticket); if (ret) { numresult = KRB5_KPASSWD_AUTHERROR; strlcpy(strresult, "Failed reading application request", sizeof(strresult)); goto chpwfail; } /* construct the ap-rep */ ret = krb5_mk_rep(context, auth_context, &ap_rep); if (ret) { numresult = KRB5_KPASSWD_AUTHERROR; strlcpy(strresult, "Failed replying to application request", sizeof(strresult)); goto chpwfail; } /* decrypt the ChangePasswdData */ cipher.length = (req->data + req->length) - ptr; cipher.data = ptr; /* * Don't set a remote address in auth_context before calling krb5_rd_priv, * so that we can work against clients behind a NAT. Reflection attacks * aren't a concern since we use sequence numbers and since our requests * don't look anything like our responses. Also don't set a local address, * since we don't know what interface the request was received on. */ ret = krb5_rd_priv(context, auth_context, &cipher, &clear, &replay); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed decrypting request", sizeof(strresult)); goto chpwfail; } client = ticket->enc_part2->client; /* decode ChangePasswdData for setpw requests */ if (vno == RFC3244_VERSION) { krb5_data *clear_data; ret = decode_krb5_setpw_req(&clear, &clear_data, &target); if (ret != 0) { numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Failed decoding ChangePasswdData", sizeof(strresult)); goto chpwfail; } zapfree(clear.data, clear.length); clear = *clear_data; free(clear_data); if (target != NULL) { ret = krb5_unparse_name(context, target, &targetstr); if (ret != 0) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed unparsing target name for log", sizeof(strresult)); goto chpwfail; } } } ret = krb5_unparse_name(context, client, &clientstr); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed unparsing client name for log", sizeof(strresult)); goto chpwfail; } /* for cpw, verify that this is an AS_REQ ticket */ if (vno == 1 && (ticket->enc_part2->flags & TKT_FLG_INITIAL) == 0) { numresult = KRB5_KPASSWD_INITIAL_FLAG_NEEDED; strlcpy(strresult, "Ticket must be derived from a password", sizeof(strresult)); goto chpwfail; } /* change the password */ ptr = k5memdup0(clear.data, clear.length, &ret); ret = schpw_util_wrapper(server_handle, client, target, (ticket->enc_part2->flags & TKT_FLG_INITIAL) != 0, ptr, NULL, strresult, sizeof(strresult)); if (ret) errmsg = krb5_get_error_message(context, ret); /* zap the password */ zapfree(clear.data, clear.length); zapfree(ptr, clear.length); clear = empty_data(); clen = strlen(clientstr); trunc_name(&clen, &cdots); switch (addr->addrtype) { case ADDRTYPE_INET: { struct sockaddr_in *sin = ss2sin(&ss); sin->sin_family = AF_INET; memcpy(&sin->sin_addr, addr->contents, addr->length); sin->sin_port = htons(remote_faddr->port); salen = sizeof(*sin); break; } case ADDRTYPE_INET6: { struct sockaddr_in6 *sin6 = ss2sin6(&ss); sin6->sin6_family = AF_INET6; memcpy(&sin6->sin6_addr, addr->contents, addr->length); sin6->sin6_port = htons(remote_faddr->port); salen = sizeof(*sin6); break; } default: { struct sockaddr *sa = ss2sa(&ss); sa->sa_family = AF_UNSPEC; salen = sizeof(*sa); break; } } if (getnameinfo(ss2sa(&ss), salen, addrbuf, sizeof(addrbuf), NULL, 0, NI_NUMERICHOST | NI_NUMERICSERV) != 0) strlcpy(addrbuf, "<unprintable>", sizeof(addrbuf)); if (vno == RFC3244_VERSION) { size_t tlen; char *tdots; const char *targetp; if (target == NULL) { tlen = clen; tdots = cdots; targetp = targetstr; } else { tlen = strlen(targetstr); trunc_name(&tlen, &tdots); targetp = clientstr; } krb5_klog_syslog(LOG_NOTICE, _("setpw request from %s by %.*s%s for " "%.*s%s: %s"), addrbuf, (int) clen, clientstr, cdots, (int) tlen, targetp, tdots, errmsg ? errmsg : "success"); } else { krb5_klog_syslog(LOG_NOTICE, _("chpw request from %s for %.*s%s: %s"), addrbuf, (int) clen, clientstr, cdots, errmsg ? errmsg : "success"); } switch (ret) { case KADM5_AUTH_CHANGEPW: numresult = KRB5_KPASSWD_ACCESSDENIED; break; case KADM5_PASS_Q_TOOSHORT: case KADM5_PASS_REUSE: case KADM5_PASS_Q_CLASS: case KADM5_PASS_Q_DICT: case KADM5_PASS_Q_GENERIC: case KADM5_PASS_TOOSOON: numresult = KRB5_KPASSWD_SOFTERROR; break; case 0: numresult = KRB5_KPASSWD_SUCCESS; strlcpy(strresult, "", sizeof(strresult)); break; default: numresult = KRB5_KPASSWD_HARDERROR; break; } chpwfail: clear.length = 2 + strlen(strresult); clear.data = (char *) malloc(clear.length); ptr = clear.data; *ptr++ = (numresult>>8) & 0xff; *ptr++ = numresult & 0xff; memcpy(ptr, strresult, strlen(strresult)); cipher = empty_data(); if (ap_rep.length) { ret = krb5_auth_con_setaddrs(context, auth_context, local_faddr->address, NULL); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed storing client and server internet addresses", sizeof(strresult)); } else { ret = krb5_mk_priv(context, auth_context, &clear, &cipher, &replay); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed encrypting reply", sizeof(strresult)); } } } /* if no KRB-PRIV was constructed, then we need a KRB-ERROR. if this fails, just bail. there's nothing else we can do. */ if (cipher.length == 0) { /* clear out ap_rep now, so that it won't be inserted in the reply */ if (ap_rep.length) { free(ap_rep.data); ap_rep = empty_data(); } krberror.ctime = 0; krberror.cusec = 0; krberror.susec = 0; ret = krb5_timeofday(context, &krberror.stime); if (ret) goto bailout; /* this is really icky. but it's what all the other callers to mk_error do. */ krberror.error = ret; krberror.error -= ERROR_TABLE_BASE_krb5; if (krberror.error < 0 || krberror.error > 128) krberror.error = KRB_ERR_GENERIC; krberror.client = NULL; ret = krb5_build_principal(context, &krberror.server, strlen(realm), realm, "kadmin", "changepw", NULL); if (ret) goto bailout; krberror.text.length = 0; krberror.e_data = clear; ret = krb5_mk_error(context, &krberror, &cipher); krb5_free_principal(context, krberror.server); if (ret) goto bailout; } /* construct the reply */ ret = alloc_data(rep, 6 + ap_rep.length + cipher.length); if (ret) goto bailout; ptr = rep->data; /* length */ *ptr++ = (rep->length>>8) & 0xff; *ptr++ = rep->length & 0xff; /* version == 0x0001 big-endian */ *ptr++ = 0; *ptr++ = 1; /* ap_rep length, big-endian */ *ptr++ = (ap_rep.length>>8) & 0xff; *ptr++ = ap_rep.length & 0xff; /* ap-rep data */ if (ap_rep.length) { memcpy(ptr, ap_rep.data, ap_rep.length); ptr += ap_rep.length; } /* krb-priv or krb-error */ memcpy(ptr, cipher.data, cipher.length); bailout: krb5_auth_con_free(context, auth_context); krb5_free_principal(context, changepw); krb5_free_ticket(context, ticket); free(ap_rep.data); free(clear.data); free(cipher.data); krb5_free_principal(context, target); krb5_free_unparsed_name(context, targetstr); krb5_free_unparsed_name(context, clientstr); krb5_free_error_message(context, errmsg); return ret; }
64
1
unsigned short atalk_checksum(struct ddpehdr *ddp, int len) { unsigned long sum = 0; /* Assume unsigned long is >16 bits */ unsigned char *data = (unsigned char *)ddp; len -= 4; /* skip header 4 bytes */ data += 4; /* This ought to be unwrapped neatly. I'll trust gcc for now */ while (len--) { sum += *data; sum <<= 1; if (sum & 0x10000) { sum++; sum &= 0xFFFF; } data++; } /* Use 0xFFFF for 0. 0 itself means none */ return sum ? htons((unsigned short)sum) : 0xFFFF; }
66
1
static int em_fxsave(struct x86_emulate_ctxt *ctxt) { struct fxregs_state fx_state; size_t size; int rc; rc = check_fxsr(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->ops->get_fpu(ctxt); rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); ctxt->ops->put_fpu(ctxt); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR) size = offsetof(struct fxregs_state, xmm_space[8 * 16/4]); else size = offsetof(struct fxregs_state, xmm_space[0]); return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size); }
67
1
static int qcow2_create(const char *filename, QemuOpts *opts, Error **errp) { char *backing_file = NULL; char *backing_fmt = NULL; char *buf = NULL; uint64_t size = 0; int flags = 0; size_t cluster_size = DEFAULT_CLUSTER_SIZE; PreallocMode prealloc; int version; uint64_t refcount_bits; int refcount_order; const char *encryptfmt = NULL; Error *local_err = NULL; int ret; /* Read out options */ size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), BDRV_SECTOR_SIZE); backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); encryptfmt = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT); if (encryptfmt) { if (qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT)) { error_setg(errp, "Options " BLOCK_OPT_ENCRYPT " and " BLOCK_OPT_ENCRYPT_FORMAT " are mutually exclusive"); ret = -EINVAL; goto finish; } } else if (qemu_opt_get_bool_del(opts, BLOCK_OPT_ENCRYPT, false)) { encryptfmt = "aes"; } cluster_size = qcow2_opt_get_cluster_size_del(opts, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto finish; } buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); prealloc = qapi_enum_parse(PreallocMode_lookup, buf, PREALLOC_MODE__MAX, PREALLOC_MODE_OFF, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto finish; } version = qcow2_opt_get_version_del(opts, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto finish; } if (qemu_opt_get_bool_del(opts, BLOCK_OPT_LAZY_REFCOUNTS, false)) { flags |= BLOCK_FLAG_LAZY_REFCOUNTS; } if (backing_file && prealloc != PREALLOC_MODE_OFF) { error_setg(errp, "Backing file and preallocation cannot be used at " "the same time"); ret = -EINVAL; goto finish; } if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { error_setg(errp, "Lazy refcounts only supported with compatibility " "level 1.1 and above (use compat=1.1 or greater)"); ret = -EINVAL; goto finish; } refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; goto finish; } refcount_order = ctz32(refcount_bits); ret = qcow2_create2(filename, size, backing_file, backing_fmt, flags, cluster_size, prealloc, opts, version, refcount_order, encryptfmt, &local_err); error_propagate(errp, local_err); finish: g_free(backing_file); g_free(backing_fmt); g_free(buf); return ret; }
68
1
static void quantize_mantissas(AC3EncodeContext *s) { int blk, ch; for (blk = 0; blk < AC3_MAX_BLOCKS; blk++) { AC3Block *block = &s->blocks[blk]; s->mant1_cnt = s->mant2_cnt = s->mant4_cnt = 0; s->qmant1_ptr = s->qmant2_ptr = s->qmant4_ptr = NULL; for (ch = 0; ch < s->channels; ch++) { quantize_mantissas_blk_ch(s, block->fixed_coef[ch], block->exp_shift[ch], block->exp[ch], block->bap[ch], block->qmant[ch], s->nb_coefs[ch]); } } }
69
1
static int ltalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { /* Expand any short form frames */ if (skb->mac.raw[2] == 1) { struct ddpehdr *ddp; /* Find our address */ struct atalk_addr *ap = atalk_find_dev_addr(dev); if (!ap || skb->len < sizeof(struct ddpshdr)) goto freeit; /* * The push leaves us with a ddephdr not an shdr, and * handily the port bytes in the right place preset. */ skb_push(skb, sizeof(*ddp) - 4); /* FIXME: use skb->cb to be able to use shared skbs */ ddp = (struct ddpehdr *)skb->data; /* Now fill in the long header */ /* * These two first. The mac overlays the new source/dest * network information so we MUST copy these before * we write the network numbers ! */ ddp->deh_dnode = skb->mac.raw[0]; /* From physical header */ ddp->deh_snode = skb->mac.raw[1]; /* From physical header */ ddp->deh_dnet = ap->s_net; /* Network number */ ddp->deh_snet = ap->s_net; ddp->deh_sum = 0; /* No checksum */ /* * Not sure about this bit... */ ddp->deh_len = skb->len; ddp->deh_hops = DDP_MAXHOPS; /* Non routable, so force a drop if we slip up later */ /* Mend the byte order */ *((__u16 *)ddp) = htons(*((__u16 *)ddp)); } skb->h.raw = skb->data; return atalk_rcv(skb, dev, pt); freeit: kfree_skb(skb); return 0; }
71
1
process_chpw_request(krb5_context context, void *server_handle, char *realm, krb5_keytab keytab, const krb5_fulladdr *local_faddr, const krb5_fulladdr *remote_faddr, krb5_data *req, krb5_data *rep) { krb5_error_code ret; char *ptr; unsigned int plen, vno; krb5_data ap_req, ap_rep = empty_data(); krb5_data cipher = empty_data(), clear = empty_data(); krb5_auth_context auth_context = NULL; krb5_principal changepw = NULL; krb5_principal client, target = NULL; krb5_ticket *ticket = NULL; krb5_replay_data replay; krb5_error krberror; int numresult; char strresult[1024]; char *clientstr = NULL, *targetstr = NULL; const char *errmsg = NULL; size_t clen; char *cdots; struct sockaddr_storage ss; socklen_t salen; char addrbuf[100]; krb5_address *addr = remote_faddr->address; *rep = empty_data(); if (req->length < 4) { /* either this, or the server is printing bad messages, or the caller passed in garbage */ ret = KRB5KRB_AP_ERR_MODIFIED; numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Request was truncated", sizeof(strresult)); goto chpwfail; } ptr = req->data; /* verify length */ plen = (*ptr++ & 0xff); plen = (plen<<8) | (*ptr++ & 0xff); if (plen != req->length) { ret = KRB5KRB_AP_ERR_MODIFIED; numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Request length was inconsistent", sizeof(strresult)); goto chpwfail; } /* verify version number */ vno = (*ptr++ & 0xff) ; vno = (vno<<8) | (*ptr++ & 0xff); if (vno != 1 && vno != RFC3244_VERSION) { ret = KRB5KDC_ERR_BAD_PVNO; numresult = KRB5_KPASSWD_BAD_VERSION; snprintf(strresult, sizeof(strresult), "Request contained unknown protocol version number %d", vno); goto chpwfail; } /* read, check ap-req length */ ap_req.length = (*ptr++ & 0xff); ap_req.length = (ap_req.length<<8) | (*ptr++ & 0xff); if (ptr + ap_req.length >= req->data + req->length) { ret = KRB5KRB_AP_ERR_MODIFIED; numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Request was truncated in AP-REQ", sizeof(strresult)); goto chpwfail; } /* verify ap_req */ ap_req.data = ptr; ptr += ap_req.length; ret = krb5_auth_con_init(context, &auth_context); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed initializing auth context", sizeof(strresult)); goto chpwfail; } ret = krb5_auth_con_setflags(context, auth_context, KRB5_AUTH_CONTEXT_DO_SEQUENCE); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed initializing auth context", sizeof(strresult)); goto chpwfail; } ret = krb5_build_principal(context, &changepw, strlen(realm), realm, "kadmin", "changepw", NULL); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed building kadmin/changepw principal", sizeof(strresult)); goto chpwfail; } ret = krb5_rd_req(context, &auth_context, &ap_req, changepw, keytab, NULL, &ticket); if (ret) { numresult = KRB5_KPASSWD_AUTHERROR; strlcpy(strresult, "Failed reading application request", sizeof(strresult)); goto chpwfail; } /* construct the ap-rep */ ret = krb5_mk_rep(context, auth_context, &ap_rep); if (ret) { numresult = KRB5_KPASSWD_AUTHERROR; strlcpy(strresult, "Failed replying to application request", sizeof(strresult)); goto chpwfail; } /* decrypt the ChangePasswdData */ cipher.length = (req->data + req->length) - ptr; cipher.data = ptr; /* * Don't set a remote address in auth_context before calling krb5_rd_priv, * so that we can work against clients behind a NAT. Reflection attacks * aren't a concern since we use sequence numbers and since our requests * don't look anything like our responses. Also don't set a local address, * since we don't know what interface the request was received on. */ ret = krb5_rd_priv(context, auth_context, &cipher, &clear, &replay); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed decrypting request", sizeof(strresult)); goto chpwfail; } client = ticket->enc_part2->client; /* decode ChangePasswdData for setpw requests */ if (vno == RFC3244_VERSION) { krb5_data *clear_data; ret = decode_krb5_setpw_req(&clear, &clear_data, &target); if (ret != 0) { numresult = KRB5_KPASSWD_MALFORMED; strlcpy(strresult, "Failed decoding ChangePasswdData", sizeof(strresult)); goto chpwfail; } zapfree(clear.data, clear.length); clear = *clear_data; free(clear_data); if (target != NULL) { ret = krb5_unparse_name(context, target, &targetstr); if (ret != 0) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed unparsing target name for log", sizeof(strresult)); goto chpwfail; } } } ret = krb5_unparse_name(context, client, &clientstr); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed unparsing client name for log", sizeof(strresult)); goto chpwfail; } /* for cpw, verify that this is an AS_REQ ticket */ if (vno == 1 && (ticket->enc_part2->flags & TKT_FLG_INITIAL) == 0) { numresult = KRB5_KPASSWD_INITIAL_FLAG_NEEDED; strlcpy(strresult, "Ticket must be derived from a password", sizeof(strresult)); goto chpwfail; } /* change the password */ ptr = k5memdup0(clear.data, clear.length, &ret); ret = schpw_util_wrapper(server_handle, client, target, (ticket->enc_part2->flags & TKT_FLG_INITIAL) != 0, ptr, NULL, strresult, sizeof(strresult)); if (ret) errmsg = krb5_get_error_message(context, ret); /* zap the password */ zapfree(clear.data, clear.length); zapfree(ptr, clear.length); clear = empty_data(); clen = strlen(clientstr); trunc_name(&clen, &cdots); switch (addr->addrtype) { case ADDRTYPE_INET: { struct sockaddr_in *sin = ss2sin(&ss); sin->sin_family = AF_INET; memcpy(&sin->sin_addr, addr->contents, addr->length); sin->sin_port = htons(remote_faddr->port); salen = sizeof(*sin); break; } case ADDRTYPE_INET6: { struct sockaddr_in6 *sin6 = ss2sin6(&ss); sin6->sin6_family = AF_INET6; memcpy(&sin6->sin6_addr, addr->contents, addr->length); sin6->sin6_port = htons(remote_faddr->port); salen = sizeof(*sin6); break; } default: { struct sockaddr *sa = ss2sa(&ss); sa->sa_family = AF_UNSPEC; salen = sizeof(*sa); break; } } if (getnameinfo(ss2sa(&ss), salen, addrbuf, sizeof(addrbuf), NULL, 0, NI_NUMERICHOST | NI_NUMERICSERV) != 0) strlcpy(addrbuf, "<unprintable>", sizeof(addrbuf)); if (vno == RFC3244_VERSION) { size_t tlen; char *tdots; const char *targetp; if (target == NULL) { tlen = clen; tdots = cdots; targetp = targetstr; } else { tlen = strlen(targetstr); trunc_name(&tlen, &tdots); targetp = clientstr; } krb5_klog_syslog(LOG_NOTICE, _("setpw request from %s by %.*s%s for " "%.*s%s: %s"), addrbuf, (int) clen, clientstr, cdots, (int) tlen, targetp, tdots, errmsg ? errmsg : "success"); } else { krb5_klog_syslog(LOG_NOTICE, _("chpw request from %s for %.*s%s: %s"), addrbuf, (int) clen, clientstr, cdots, errmsg ? errmsg : "success"); } switch (ret) { case KADM5_AUTH_CHANGEPW: numresult = KRB5_KPASSWD_ACCESSDENIED; break; case KADM5_PASS_Q_TOOSHORT: case KADM5_PASS_REUSE: case KADM5_PASS_Q_CLASS: case KADM5_PASS_Q_DICT: case KADM5_PASS_Q_GENERIC: case KADM5_PASS_TOOSOON: numresult = KRB5_KPASSWD_SOFTERROR; break; case 0: numresult = KRB5_KPASSWD_SUCCESS; strlcpy(strresult, "", sizeof(strresult)); break; default: numresult = KRB5_KPASSWD_HARDERROR; break; } chpwfail: clear.length = 2 + strlen(strresult); clear.data = (char *) malloc(clear.length); ptr = clear.data; *ptr++ = (numresult>>8) & 0xff; *ptr++ = numresult & 0xff; memcpy(ptr, strresult, strlen(strresult)); cipher = empty_data(); if (ap_rep.length) { ret = krb5_auth_con_setaddrs(context, auth_context, local_faddr->address, NULL); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed storing client and server internet addresses", sizeof(strresult)); } else { ret = krb5_mk_priv(context, auth_context, &clear, &cipher, &replay); if (ret) { numresult = KRB5_KPASSWD_HARDERROR; strlcpy(strresult, "Failed encrypting reply", sizeof(strresult)); } } } /* if no KRB-PRIV was constructed, then we need a KRB-ERROR. if this fails, just bail. there's nothing else we can do. */ if (cipher.length == 0) { /* clear out ap_rep now, so that it won't be inserted in the reply */ if (ap_rep.length) { free(ap_rep.data); ap_rep = empty_data(); } krberror.ctime = 0; krberror.cusec = 0; krberror.susec = 0; ret = krb5_timeofday(context, &krberror.stime); if (ret) goto bailout; /* this is really icky. but it's what all the other callers to mk_error do. */ krberror.error = ret; krberror.error -= ERROR_TABLE_BASE_krb5; if (krberror.error < 0 || krberror.error > 128) krberror.error = KRB_ERR_GENERIC; krberror.client = NULL; ret = krb5_build_principal(context, &krberror.server, strlen(realm), realm, "kadmin", "changepw", NULL); if (ret) goto bailout; krberror.text.length = 0; krberror.e_data = clear; ret = krb5_mk_error(context, &krberror, &cipher); krb5_free_principal(context, krberror.server); if (ret) goto bailout; } /* construct the reply */ ret = alloc_data(rep, 6 + ap_rep.length + cipher.length); if (ret) goto bailout; ptr = rep->data; /* length */ *ptr++ = (rep->length>>8) & 0xff; *ptr++ = rep->length & 0xff; /* version == 0x0001 big-endian */ *ptr++ = 0; *ptr++ = 1; /* ap_rep length, big-endian */ *ptr++ = (ap_rep.length>>8) & 0xff; *ptr++ = ap_rep.length & 0xff; /* ap-rep data */ if (ap_rep.length) { memcpy(ptr, ap_rep.data, ap_rep.length); ptr += ap_rep.length; } /* krb-priv or krb-error */ memcpy(ptr, cipher.data, cipher.length); bailout: krb5_auth_con_free(context, auth_context); krb5_free_principal(context, changepw); krb5_free_ticket(context, ticket); free(ap_rep.data); free(clear.data); free(cipher.data); krb5_free_principal(context, target); krb5_free_unparsed_name(context, targetstr); krb5_free_unparsed_name(context, clientstr); krb5_free_error_message(context, errmsg); return ret; }
72
1
static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { struct ddpehdr *ddp = ddp_hdr(skb); struct sock *sock; struct atalk_iface *atif; struct sockaddr_at tosat; int origlen; struct ddpebits ddphv; /* Size check */ if (skb->len < sizeof(*ddp)) goto freeit; /* * Fix up the length field [Ok this is horrible but otherwise * I end up with unions of bit fields and messy bit field order * compiler/endian dependencies..] * * FIXME: This is a write to a shared object. Granted it * happens to be safe BUT.. (Its safe as user space will not * run until we put it back) */ *((__u16 *)&ddphv) = ntohs(*((__u16 *)ddp)); /* Trim buffer in case of stray trailing data */ origlen = skb->len; skb_trim(skb, min_t(unsigned int, skb->len, ddphv.deh_len)); /* * Size check to see if ddp->deh_len was crap * (Otherwise we'll detonate most spectacularly * in the middle of recvmsg()). */ if (skb->len < sizeof(*ddp)) goto freeit; /* * Any checksums. Note we don't do htons() on this == is assumed to be * valid for net byte orders all over the networking code... */ if (ddp->deh_sum && atalk_checksum(ddp, ddphv.deh_len) != ddp->deh_sum) /* Not a valid AppleTalk frame - dustbin time */ goto freeit; /* Check the packet is aimed at us */ if (!ddp->deh_dnet) /* Net 0 is 'this network' */ atif = atalk_find_anynet(ddp->deh_dnode, dev); else atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode); /* Not ours, so we route the packet via the correct AppleTalk iface */ if (!atif) { atalk_route_packet(skb, dev, ddp, &ddphv, origlen); goto out; } /* if IP over DDP is not selected this code will be optimized out */ if (is_ip_over_ddp(skb)) return handle_ip_over_ddp(skb); /* * Which socket - atalk_search_socket() looks for a *full match* * of the <net, node, port> tuple. */ tosat.sat_addr.s_net = ddp->deh_dnet; tosat.sat_addr.s_node = ddp->deh_dnode; tosat.sat_port = ddp->deh_dport; sock = atalk_search_socket(&tosat, atif); if (!sock) /* But not one of our sockets */ goto freeit; /* Queue packet (standard) */ skb->sk = sock; if (sock_queue_rcv_skb(sock, skb) < 0) goto freeit; out: return 0; freeit: kfree_skb(skb); goto out; }
73
1
init_ctx_reselect(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc, OM_uint32 acc_negState, gss_OID supportedMech, gss_buffer_t *responseToken, gss_buffer_t *mechListMIC, OM_uint32 *negState, send_token_flag *tokflag) { OM_uint32 tmpmin; size_t i; generic_gss_release_oid(&tmpmin, &sc->internal_mech); gss_delete_sec_context(&tmpmin, &sc->ctx_handle, GSS_C_NO_BUFFER); /* Find supportedMech in sc->mech_set. */ for (i = 0; i < sc->mech_set->count; i++) { if (g_OID_equal(supportedMech, &sc->mech_set->elements[i])) break; } if (i == sc->mech_set->count) return GSS_S_DEFECTIVE_TOKEN; sc->internal_mech = &sc->mech_set->elements[i]; /* * Windows 2003 and earlier don't correctly send a * negState of request-mic when counter-proposing a * mechanism. They probably don't handle mechListMICs * properly either. */ if (acc_negState != REQUEST_MIC) return GSS_S_DEFECTIVE_TOKEN; sc->mech_complete = 0; sc->mic_reqd = 1; *negState = REQUEST_MIC; *tokflag = CONT_TOKEN_SEND; return GSS_S_CONTINUE_NEEDED; }
74
0
static void pdo_stmt_iter_move_forwards ( zend_object_iterator * iter TSRMLS_DC ) { struct php_pdo_iterator * I = ( struct php_pdo_iterator * ) iter -> data ; if ( I -> fetch_ahead ) { zval_ptr_dtor ( & I -> fetch_ahead ) ; I -> fetch_ahead = NULL ; } MAKE_STD_ZVAL ( I -> fetch_ahead ) ; if ( ! do_fetch ( I -> stmt , TRUE , I -> fetch_ahead , PDO_FETCH_USE_DEFAULT , PDO_FETCH_ORI_NEXT , 0 , 0 TSRMLS_CC ) ) { pdo_stmt_t * stmt = I -> stmt ; PDO_HANDLE_STMT_ERR ( ) ; I -> key = ( ulong ) - 1 ; FREE_ZVAL ( I -> fetch_ahead ) ; I -> fetch_ahead = NULL ; return ; } I -> key ++ ; }
75
1
static void dma_blk_cb(void *opaque, int ret) { DMAAIOCB *dbs = (DMAAIOCB *)opaque; dma_addr_t cur_addr, cur_len; void *mem; trace_dma_blk_cb(dbs, ret); dbs->acb = NULL; dbs->sector_num += dbs->iov.size / 512; if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { dma_complete(dbs, ret); return; } dma_blk_unmap(dbs); while (dbs->sg_cur_index < dbs->sg->nsg) { cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir); if (!mem) break; qemu_iovec_add(&dbs->iov, mem, cur_len); dbs->sg_cur_byte += cur_len; if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) { dbs->sg_cur_byte = 0; ++dbs->sg_cur_index; } } if (dbs->iov.size == 0) { trace_dma_map_wait(dbs); cpu_register_map_client(dbs, continue_after_map_failure); return; } if (dbs->iov.size & ~BDRV_SECTOR_MASK) { qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK); } dbs->acb = dbs->io_func(dbs->blk, dbs->sector_num, &dbs->iov, dbs->iov.size / 512, dma_blk_cb, dbs); assert(dbs->acb); }
76
1
static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, int len) { struct sock *sk = sock->sk; struct atalk_sock *at = at_sk(sk); struct sockaddr_at *usat = (struct sockaddr_at *)msg->msg_name; int flags = msg->msg_flags; int loopback = 0; struct sockaddr_at local_satalk, gsat; struct sk_buff *skb; struct net_device *dev; struct ddpehdr *ddp; int size; struct atalk_route *rt; int err; if (flags & ~MSG_DONTWAIT) return -EINVAL; if (len > DDP_MAXSZ) return -EMSGSIZE; if (usat) { if (sk->sk_zapped) if (atalk_autobind(sk) < 0) return -EBUSY; if (msg->msg_namelen < sizeof(*usat) || usat->sat_family != AF_APPLETALK) return -EINVAL; /* netatalk doesn't implement this check */ if (usat->sat_addr.s_node == ATADDR_BCAST && !sock_flag(sk, SOCK_BROADCAST)) { printk(KERN_INFO "SO_BROADCAST: Fix your netatalk as " "it will break before 2.2\n"); #if 0 return -EPERM; #endif } } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; usat = &local_satalk; usat->sat_family = AF_APPLETALK; usat->sat_port = at->dest_port; usat->sat_addr.s_node = at->dest_node; usat->sat_addr.s_net = at->dest_net; } /* Build a packet */ SOCK_DEBUG(sk, "SK %p: Got address.\n", sk); /* For headers */ size = sizeof(struct ddpehdr) + len + ddp_dl->header_length; if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) { rt = atrtr_find(&usat->sat_addr); if (!rt) return -ENETUNREACH; dev = rt->dev; } else { struct atalk_addr at_hint; at_hint.s_node = 0; at_hint.s_net = at->src_net; rt = atrtr_find(&at_hint); if (!rt) return -ENETUNREACH; dev = rt->dev; } SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", sk, size, dev->name); size += dev->hard_header_len; skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err); if (!skb) return err; skb->sk = sk; skb_reserve(skb, ddp_dl->header_length); skb_reserve(skb, dev->hard_header_len); skb->dev = dev; SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk); ddp = (struct ddpehdr *)skb_put(skb, sizeof(struct ddpehdr)); ddp->deh_pad = 0; ddp->deh_hops = 0; ddp->deh_len = len + sizeof(*ddp); /* * Fix up the length field [Ok this is horrible but otherwise * I end up with unions of bit fields and messy bit field order * compiler/endian dependencies.. */ *((__u16 *)ddp) = ntohs(*((__u16 *)ddp)); ddp->deh_dnet = usat->sat_addr.s_net; ddp->deh_snet = at->src_net; ddp->deh_dnode = usat->sat_addr.s_node; ddp->deh_snode = at->src_node; ddp->deh_dport = usat->sat_port; ddp->deh_sport = at->src_port; SOCK_DEBUG(sk, "SK %p: Copy user data (%d bytes).\n", sk, len); err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (err) { kfree_skb(skb); return -EFAULT; } if (sk->sk_no_check == 1) ddp->deh_sum = 0; else ddp->deh_sum = atalk_checksum(ddp, len + sizeof(*ddp)); /* * Loopback broadcast packets to non gateway targets (ie routes * to group we are in) */ if (ddp->deh_dnode == ATADDR_BCAST && !(rt->flags & RTF_GATEWAY) && !(dev->flags & IFF_LOOPBACK)) { struct sk_buff *skb2 = skb_copy(skb, GFP_KERNEL); if (skb2) { loopback = 1; SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk); if (aarp_send_ddp(dev, skb2, &usat->sat_addr, NULL) == -1) kfree_skb(skb2); /* else queued/sent above in the aarp queue */ } } if (dev->flags & IFF_LOOPBACK || loopback) { SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk); /* loop back */ skb_orphan(skb); ddp_dl->request(ddp_dl, skb, dev->dev_addr); } else { SOCK_DEBUG(sk, "SK %p: send out.\n", sk); if (rt->flags & RTF_GATEWAY) { gsat.sat_addr = rt->gateway; usat = &gsat; } if (aarp_send_ddp(dev, skb, &usat->sat_addr, NULL) == -1) kfree_skb(skb); /* else queued/sent above in the aarp queue */ } SOCK_DEBUG(sk, "SK %p: Done write (%d).\n", sk, len); return len; }
78
1
long do_sigreturn(CPUPPCState *env) { struct target_sigcontext *sc = NULL; struct target_mcontext *sr = NULL; target_ulong sr_addr = 0, sc_addr; sigset_t blocked; target_sigset_t set; sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE; if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) goto sigsegv; #if defined(TARGET_PPC64) set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32); #else __get_user(set.sig[0], &sc->oldmask); __get_user(set.sig[1], &sc->_unused[3]); #endif target_to_host_sigset_internal(&blocked, &set); set_sigmask(&blocked); __get_user(sr_addr, &sc->regs); if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1)) goto sigsegv; restore_user_regs(env, sr, 1); unlock_user_struct(sr, sr_addr, 1); unlock_user_struct(sc, sc_addr, 1); return -TARGET_QEMU_ESIGRETURN; sigsegv: unlock_user_struct(sr, sr_addr, 1); unlock_user_struct(sc, sc_addr, 1); force_sig(TARGET_SIGSEGV); return 0; }
79
1
static void voutf ( struct GlobalConfig * config , const char * prefix , const char * fmt , va_list ap ) { size_t width = ( 79 - strlen ( prefix ) ) ; if ( ! config -> mute ) { size_t len ; char * ptr ; char * print_buffer ; print_buffer = curlx_mvaprintf ( fmt , ap ) ; if ( ! print_buffer ) return ; len = strlen ( print_buffer ) ; ptr = print_buffer ; while ( len > 0 ) { fputs ( prefix , config -> errors ) ; if ( len > width ) { size_t cut = width - 1 ; while ( ! ISSPACE ( ptr [ cut ] ) && cut ) { cut -- ; } if ( 0 == cut ) cut = width - 1 ; ( void ) fwrite ( ptr , cut + 1 , 1 , config -> errors ) ; fputs ( "\n" , config -> errors ) ; ptr += cut + 1 ; len -= cut ; } else { fputs ( ptr , config -> errors ) ; len = 0 ; } } curl_free ( print_buffer ) ; } }
80
1
acc_ctx_cont(OM_uint32 *minstat, gss_buffer_t buf, gss_ctx_id_t *ctx, gss_buffer_t *responseToken, gss_buffer_t *mechListMIC, OM_uint32 *negState, send_token_flag *return_token) { OM_uint32 ret, tmpmin; gss_OID supportedMech; spnego_gss_ctx_id_t sc; unsigned int len; unsigned char *ptr, *bufstart; sc = (spnego_gss_ctx_id_t)*ctx; ret = GSS_S_DEFECTIVE_TOKEN; *negState = REJECT; *minstat = 0; supportedMech = GSS_C_NO_OID; *return_token = ERROR_TOKEN_SEND; *responseToken = *mechListMIC = GSS_C_NO_BUFFER; ptr = bufstart = buf->value; #define REMAIN (buf->length - (ptr - bufstart)) if (REMAIN > INT_MAX) return GSS_S_DEFECTIVE_TOKEN; /* * Attempt to work with old Sun SPNEGO. */ if (*ptr == HEADER_ID) { ret = g_verify_token_header(gss_mech_spnego, &len, &ptr, 0, REMAIN); if (ret) { *minstat = ret; return GSS_S_DEFECTIVE_TOKEN; } } if (*ptr != (CONTEXT | 0x01)) { return GSS_S_DEFECTIVE_TOKEN; } ret = get_negTokenResp(minstat, ptr, REMAIN, negState, &supportedMech, responseToken, mechListMIC); if (ret != GSS_S_COMPLETE) goto cleanup; if (*responseToken == GSS_C_NO_BUFFER && *mechListMIC == GSS_C_NO_BUFFER) { ret = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } if (supportedMech != GSS_C_NO_OID) { ret = GSS_S_DEFECTIVE_TOKEN; goto cleanup; } sc->firstpass = 0; *negState = ACCEPT_INCOMPLETE; *return_token = CONT_TOKEN_SEND; cleanup: if (supportedMech != GSS_C_NO_OID) { generic_gss_release_oid(&tmpmin, &supportedMech); } return ret; #undef REMAIN }
81
0
static inline PixelTrait GetPixelGreenTraits ( const Image * restrict image ) { return ( image -> channel_map [ GreenPixelChannel ] . traits ) ; }
82
1
static int fat_ioctl_filldir(void *__buf, const char *name, int name_len, loff_t offset, u64 ino, unsigned int d_type) { struct fat_ioctl_filldir_callback *buf = __buf; struct dirent __user *d1 = buf->dirent; struct dirent __user *d2 = d1 + 1; if (buf->result) return -EINVAL; buf->result++; if (name != NULL) { /* dirent has only short name */ if (name_len >= sizeof(d1->d_name)) name_len = sizeof(d1->d_name) - 1; if (put_user(0, d2->d_name) || put_user(0, &d2->d_reclen) || copy_to_user(d1->d_name, name, name_len) || put_user(0, d1->d_name + name_len) || put_user(name_len, &d1->d_reclen)) goto efault; } else { /* dirent has short and long name */ const char *longname = buf->longname; int long_len = buf->long_len; const char *shortname = buf->shortname; int short_len = buf->short_len; if (long_len >= sizeof(d1->d_name)) long_len = sizeof(d1->d_name) - 1; if (short_len >= sizeof(d1->d_name)) short_len = sizeof(d1->d_name) - 1; if (copy_to_user(d2->d_name, longname, long_len) || put_user(0, d2->d_name + long_len) || put_user(long_len, &d2->d_reclen) || put_user(ino, &d2->d_ino) || put_user(offset, &d2->d_off) || copy_to_user(d1->d_name, shortname, short_len) || put_user(0, d1->d_name + short_len) || put_user(short_len, &d1->d_reclen)) goto efault; } return 0; efault: buf->result = -EFAULT; return -EFAULT; }
83
1
static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size) { VirtIOGPU *g = opaque; struct virtio_gpu_simple_resource *res; struct virtio_gpu_scanout *scanout; uint32_t resource_id, pformat; int i; g->hostmem = 0; resource_id = qemu_get_be32(f); while (resource_id != 0) { res = g_new0(struct virtio_gpu_simple_resource, 1); res->resource_id = resource_id; res->width = qemu_get_be32(f); res->height = qemu_get_be32(f); res->format = qemu_get_be32(f); res->iov_cnt = qemu_get_be32(f); /* allocate */ pformat = get_pixman_format(res->format); if (!pformat) { return -EINVAL; res->image = pixman_image_create_bits(pformat, res->width, res->height, NULL, 0); if (!res->image) { return -EINVAL; res->hostmem = PIXMAN_FORMAT_BPP(pformat) * res->width * res->height; res->addrs = g_new(uint64_t, res->iov_cnt); res->iov = g_new(struct iovec, res->iov_cnt); /* read data */ for (i = 0; i < res->iov_cnt; i++) { res->addrs[i] = qemu_get_be64(f); res->iov[i].iov_len = qemu_get_be32(f); qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), pixman_image_get_stride(res->image) * res->height); /* restore mapping */ for (i = 0; i < res->iov_cnt; i++) { hwaddr len = res->iov[i].iov_len; res->iov[i].iov_base = cpu_physical_memory_map(res->addrs[i], &len, 1); if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { return -EINVAL; QTAILQ_INSERT_HEAD(&g->reslist, res, next); g->hostmem += res->hostmem; resource_id = qemu_get_be32(f); /* load & apply scanout state */ vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); for (i = 0; i < g->conf.max_outputs; i++) { scanout = &g->scanout[i]; if (!scanout->resource_id) { continue; res = virtio_gpu_find_resource(g, scanout->resource_id); if (!res) { return -EINVAL; scanout->ds = qemu_create_displaysurface_pixman(res->image); if (!scanout->ds) { return -EINVAL; dpy_gfx_replace_surface(scanout->con, scanout->ds); dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height); update_cursor(g, &scanout->cursor); res->scanout_bitmask |= (1 << i); return 0;
84
1
static av_cold int iv_alloc_frames(Indeo3DecodeContext *s) { int luma_width = (s->width + 3) & ~3, luma_height = (s->height + 3) & ~3, chroma_width = ((luma_width >> 2) + 3) & ~3, chroma_height = ((luma_height >> 2) + 3) & ~3, luma_pixels = luma_width * luma_height, chroma_pixels = chroma_width * chroma_height, i; unsigned int bufsize = luma_pixels * 2 + luma_width * 3 + (chroma_pixels + chroma_width) * 4; if(!(s->buf = av_malloc(bufsize))) return AVERROR(ENOMEM); s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width; s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height; s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width; s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height; s->iv_frame[0].Ybuf = s->buf + luma_width; i = luma_pixels + luma_width * 2; s->iv_frame[1].Ybuf = s->buf + i; i += (luma_pixels + luma_width); s->iv_frame[0].Ubuf = s->buf + i; i += (chroma_pixels + chroma_width); s->iv_frame[1].Ubuf = s->buf + i; i += (chroma_pixels + chroma_width); s->iv_frame[0].Vbuf = s->buf + i; i += (chroma_pixels + chroma_width); s->iv_frame[1].Vbuf = s->buf + i; for(i = 1; i <= luma_width; i++) s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] = s->iv_frame[0].Ubuf[-i] = 0x80; for(i = 1; i <= chroma_width; i++) { s->iv_frame[1].Ubuf[-i] = 0x80; s->iv_frame[0].Vbuf[-i] = 0x80; s->iv_frame[1].Vbuf[-i] = 0x80; s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80; } return 0; }
85
0
static void e1000e_set_fcrth ( E1000ECore * core , int index , uint32_t val ) { core -> mac [ FCRTH ] = val & 0xFFF8 ; }
86
1
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, enum x86_transfer_type transfer, struct desc_struct *desc) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) /* for NULL selector skip all following checks */ goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) { if (transfer == X86_TRANSFER_CALL_JMP) return X86EMUL_UNHANDLEABLE; goto exception; } if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or segment selector's RPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and (both RPL and CPL > DPL)) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { /* mark segment as accessed */ if (!(seg_desc.type & 1)) { seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; if (is_noncanonical_address(get_desc_base(&seg_desc) | ((u64)base3 << 32))) return emulate_gp(ctxt, 0); } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); if (desc) *desc = seg_desc; return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); }
87
1
static int fat_dir_ioctl(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { struct fat_ioctl_filldir_callback buf; struct dirent __user *d1; int ret, short_only, both; switch (cmd) { case VFAT_IOCTL_READDIR_SHORT: short_only = 1; both = 0; break; case VFAT_IOCTL_READDIR_BOTH: short_only = 0; both = 1; break; default: return fat_generic_ioctl(inode, filp, cmd, arg); } d1 = (struct dirent __user *)arg; if (!access_ok(VERIFY_WRITE, d1, sizeof(struct dirent[2]))) return -EFAULT; /* * Yes, we don't need this put_user() absolutely. However old * code didn't return the right value. So, app use this value, * in order to check whether it is EOF. */ if (put_user(0, &d1->d_reclen)) return -EFAULT; buf.dirent = d1; buf.result = 0; mutex_lock(&inode->i_mutex); ret = -ENOENT; if (!IS_DEADDIR(inode)) { ret = __fat_readdir(inode, filp, &buf, fat_ioctl_filldir, short_only, both); } mutex_unlock(&inode->i_mutex); if (ret >= 0) ret = buf.result; return ret; }
88
0
static int mov_write_minf_tag(AVIOContext *pb, MOVMuxContext *mov, MOVTrack *track) { int64_t pos = avio_tell(pb); int ret; avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "minf"); if (track->enc->codec_type == AVMEDIA_TYPE_VIDEO) mov_write_vmhd_tag(pb); else if (track->enc->codec_type == AVMEDIA_TYPE_AUDIO) mov_write_smhd_tag(pb); else if (track->enc->codec_type == AVMEDIA_TYPE_SUBTITLE) { if (track->tag == MKTAG('t','e','x','t') || is_clcp_track(track)) { mov_write_gmhd_tag(pb, track); } else { mov_write_nmhd_tag(pb); } } else if (track->tag == MKTAG('r','t','p',' ')) { mov_write_hmhd_tag(pb); } else if (track->tag == MKTAG('t','m','c','d')) { mov_write_gmhd_tag(pb, track); } if (track->mode == MODE_MOV) /* FIXME: Why do it for MODE_MOV only ? */ mov_write_hdlr_tag(pb, NULL); mov_write_dinf_tag(pb); if ((ret = mov_write_stbl_tag(pb, mov, track)) < 0) return ret; return update_size(pb, pos); }
89
1
static long fat_compat_dir_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct compat_dirent __user *p = compat_ptr(arg); int ret; mm_segment_t oldfs = get_fs(); struct dirent d[2]; switch (cmd) { case VFAT_IOCTL_READDIR_BOTH32: cmd = VFAT_IOCTL_READDIR_BOTH; break; case VFAT_IOCTL_READDIR_SHORT32: cmd = VFAT_IOCTL_READDIR_SHORT; break; default: return -ENOIOCTLCMD; } set_fs(KERNEL_DS); lock_kernel(); ret = fat_dir_ioctl(file->f_path.dentry->d_inode, file, cmd, (unsigned long) &d); unlock_kernel(); set_fs(oldfs); if (ret >= 0) { ret |= fat_compat_put_dirent32(&d[0], p); ret |= fat_compat_put_dirent32(&d[1], p + 1); } return ret; }
90
0
double get_variable_numdistinct ( VariableStatData * vardata , bool * isdefault ) { double stadistinct ; double stanullfrac = 0.0 ; double ntuples ; * isdefault = false ; if ( HeapTupleIsValid ( vardata -> statsTuple ) ) { Form_pg_statistic stats ; stats = ( Form_pg_statistic ) GETSTRUCT ( vardata -> statsTuple ) ; stadistinct = stats -> stadistinct ; stanullfrac = stats -> stanullfrac ; } else if ( vardata -> vartype == BOOLOID ) { stadistinct = 2.0 ; } else { if ( vardata -> var && IsA ( vardata -> var , Var ) ) { switch ( ( ( Var * ) vardata -> var ) -> varattno ) { case ObjectIdAttributeNumber : case SelfItemPointerAttributeNumber : stadistinct = - 1.0 ; break ; case TableOidAttributeNumber : stadistinct = 1.0 ; break ; default : stadistinct = 0.0 ; break ; } } else stadistinct = 0.0 ; } if ( vardata -> isunique ) stadistinct = - 1.0 * ( 1.0 - stanullfrac ) ; if ( stadistinct > 0.0 ) return clamp_row_est ( stadistinct ) ; if ( vardata -> rel == NULL ) { * isdefault = true ; return DEFAULT_NUM_DISTINCT ; } ntuples = vardata -> rel -> tuples ; if ( ntuples <= 0.0 ) { * isdefault = true ; return DEFAULT_NUM_DISTINCT ; } if ( stadistinct < 0.0 ) return clamp_row_est ( - stadistinct * ntuples ) ; if ( ntuples < DEFAULT_NUM_DISTINCT ) return clamp_row_est ( ntuples ) ; * isdefault = true ; return DEFAULT_NUM_DISTINCT ; }
91
1
kadm5_randkey_principal_3(void *server_handle, krb5_principal principal, krb5_boolean keepold, int n_ks_tuple, krb5_key_salt_tuple *ks_tuple, krb5_keyblock **keyblocks, int *n_keys) { krb5_db_entry *kdb; osa_princ_ent_rec adb; krb5_int32 now; kadm5_policy_ent_rec pol; int ret, last_pwd; krb5_boolean have_pol = FALSE; kadm5_server_handle_t handle = server_handle; krb5_keyblock *act_mkey; krb5_kvno act_kvno; int new_n_ks_tuple = 0; krb5_key_salt_tuple *new_ks_tuple = NULL; if (keyblocks) *keyblocks = NULL; CHECK_HANDLE(server_handle); krb5_clear_error_message(handle->context); if (principal == NULL) return EINVAL; if ((ret = kdb_get_entry(handle, principal, &kdb, &adb))) return(ret); ret = apply_keysalt_policy(handle, adb.policy, n_ks_tuple, ks_tuple, &new_n_ks_tuple, &new_ks_tuple); if (ret) goto done; if (krb5_principal_compare(handle->context, principal, hist_princ)) { /* If changing the history entry, the new entry must have exactly one * key. */ if (keepold) return KADM5_PROTECT_PRINCIPAL; new_n_ks_tuple = 1; } ret = kdb_get_active_mkey(handle, &act_kvno, &act_mkey); if (ret) goto done; ret = krb5_dbe_crk(handle->context, act_mkey, new_ks_tuple, new_n_ks_tuple, keepold, kdb); if (ret) goto done; ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno); if (ret) goto done; kdb->attributes &= ~KRB5_KDB_REQUIRES_PWCHANGE; ret = krb5_timeofday(handle->context, &now); if (ret) goto done; if ((adb.aux_attributes & KADM5_POLICY)) { ret = get_policy(handle, adb.policy, &pol, &have_pol); if (ret) goto done; } if (have_pol) { ret = krb5_dbe_lookup_last_pwd_change(handle->context, kdb, &last_pwd); if (ret) goto done; #if 0 /* * The spec says this check is overridden if the caller has * modify privilege. The admin server therefore makes this * check itself (in chpass_principal_wrapper, misc.c). A * local caller implicitly has all authorization bits. */ if((now - last_pwd) < pol.pw_min_life && !(kdb->attributes & KRB5_KDB_REQUIRES_PWCHANGE)) { ret = KADM5_PASS_TOOSOON; goto done; } #endif if (pol.pw_max_life) kdb->pw_expiration = now + pol.pw_max_life; else kdb->pw_expiration = 0; } else { kdb->pw_expiration = 0; } ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now); if (ret) goto done; /* unlock principal on this KDC */ kdb->fail_auth_count = 0; if (keyblocks) { ret = decrypt_key_data(handle->context, kdb->n_key_data, kdb->key_data, keyblocks, n_keys); if (ret) goto done; } /* key data changed, let the database provider know */ kdb->mask = KADM5_KEY_DATA | KADM5_FAIL_AUTH_COUNT; /* | KADM5_RANDKEY_USED */; ret = k5_kadm5_hook_chpass(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_PRECOMMIT, principal, keepold, new_n_ks_tuple, new_ks_tuple, NULL); if (ret) goto done; if ((ret = kdb_put_entry(handle, kdb, &adb))) goto done; (void) k5_kadm5_hook_chpass(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_POSTCOMMIT, principal, keepold, new_n_ks_tuple, new_ks_tuple, NULL); ret = KADM5_OK; done: free(new_ks_tuple); kdb_free_entry(handle, kdb, &adb); if (have_pol) kadm5_free_policy_ent(handle->lhandle, &pol); return ret; }
93
1
static long fat_compat_put_dirent32(struct dirent *d, struct compat_dirent __user *d32) { if (!access_ok(VERIFY_WRITE, d32, sizeof(struct compat_dirent))) return -EFAULT; __put_user(d->d_ino, &d32->d_ino); __put_user(d->d_off, &d32->d_off); __put_user(d->d_reclen, &d32->d_reclen); if (__copy_to_user(d32->d_name, d->d_name, d->d_reclen)) return -EFAULT; return 0; }
94
0
int av_packet_ref(AVPacket *dst, AVPacket *src) { int ret; ret = av_packet_copy_props(dst, src); if (ret < 0) return ret; if (!src->buf) { ret = packet_alloc(&dst->buf, src->size); if (ret < 0) goto fail; memcpy(dst->buf->data, src->data, src->size); } else dst->buf = av_buffer_ref(src->buf); dst->size = src->size; dst->data = dst->buf->data; return 0; fail: av_packet_free_side_data(dst); return ret; }
95
1
krb5_ldap_get_password_policy_from_dn(krb5_context context, char *pol_name, char *pol_dn, osa_policy_ent_t *policy) { krb5_error_code st=0, tempst=0; LDAP *ld=NULL; LDAPMessage *result=NULL,*ent=NULL; kdb5_dal_handle *dal_handle=NULL; krb5_ldap_context *ldap_context=NULL; krb5_ldap_server_handle *ldap_server_handle=NULL; /* Clear the global error string */ krb5_clear_error_message(context); /* validate the input parameters */ if (pol_dn == NULL) return EINVAL; *policy = NULL; SETUP_CONTEXT(); GET_HANDLE(); *(policy) = (osa_policy_ent_t) malloc(sizeof(osa_policy_ent_rec)); if (*policy == NULL) { st = ENOMEM; goto cleanup; } memset(*policy, 0, sizeof(osa_policy_ent_rec)); LDAP_SEARCH(pol_dn, LDAP_SCOPE_BASE, "(objectclass=krbPwdPolicy)", password_policy_attributes); ent=ldap_first_entry(ld, result); if (ent != NULL) { if ((st = populate_policy(context, ld, ent, pol_name, *policy)) != 0) goto cleanup; } cleanup: ldap_msgfree(result); if (st != 0) { if (*policy != NULL) { krb5_ldap_free_password_policy(context, *policy); *policy = NULL; } } krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle); return st; }
96
0
static LaunchLocation * find_launch_location_for_file ( GList * list , NautilusFile * file ) { LaunchLocation * location ; GList * l ; for ( l = list ; l != NULL ; l = l -> next ) { location = l -> data ; if ( location -> file == file ) { return location ; } } return NULL ; }
97
1
static int vfat_ioctl32(unsigned fd, unsigned cmd, unsigned long arg) { struct compat_dirent __user *p = compat_ptr(arg); int ret; mm_segment_t oldfs = get_fs(); struct dirent d[2]; switch(cmd) { case VFAT_IOCTL_READDIR_BOTH32: cmd = VFAT_IOCTL_READDIR_BOTH; break; case VFAT_IOCTL_READDIR_SHORT32: cmd = VFAT_IOCTL_READDIR_SHORT; break; } set_fs(KERNEL_DS); ret = sys_ioctl(fd,cmd,(unsigned long)&d); set_fs(oldfs); if (ret >= 0) { ret |= put_dirent32(&d[0], p); ret |= put_dirent32(&d[1], p + 1); } return ret; }
98
0
av_cold void ff_float_dsp_init_ppc(AVFloatDSPContext *fdsp, int bit_exact) { #if HAVE_ALTIVEC if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) return; fdsp->vector_fmul = ff_vector_fmul_altivec; fdsp->vector_fmul_add = ff_vector_fmul_add_altivec; fdsp->vector_fmul_reverse = ff_vector_fmul_reverse_altivec; if (!bit_exact) { fdsp->vector_fmul_window = ff_vector_fmul_window_altivec; } #endif }
99
0
static int config_props(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; LutContext *lut = ctx->priv; const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format]; int min[4], max[4]; int val, comp, ret; lut->hsub = desc->log2_chroma_w; lut->vsub = desc->log2_chroma_h; lut->var_values[VAR_W] = inlink->w; lut->var_values[VAR_H] = inlink->h; switch (inlink->format) { case PIX_FMT_YUV410P: case PIX_FMT_YUV411P: case PIX_FMT_YUV420P: case PIX_FMT_YUV422P: case PIX_FMT_YUV440P: case PIX_FMT_YUV444P: case PIX_FMT_YUVA420P: min[Y] = min[U] = min[V] = 16; max[Y] = 235; max[U] = max[V] = 240; min[A] = 0; max[A] = 255; break; default: min[0] = min[1] = min[2] = min[3] = 0; max[0] = max[1] = max[2] = max[3] = 255; } lut->is_yuv = lut->is_rgb = 0; if (ff_fmt_is_in(inlink->format, yuv_pix_fmts)) lut->is_yuv = 1; else if (ff_fmt_is_in(inlink->format, rgb_pix_fmts)) lut->is_rgb = 1; if (lut->is_rgb) { switch (inlink->format) { case PIX_FMT_ARGB: lut->rgba_map[A] = 0; lut->rgba_map[R] = 1; lut->rgba_map[G] = 2; lut->rgba_map[B] = 3; break; case PIX_FMT_ABGR: lut->rgba_map[A] = 0; lut->rgba_map[B] = 1; lut->rgba_map[G] = 2; lut->rgba_map[R] = 3; break; case PIX_FMT_RGBA: case PIX_FMT_RGB24: lut->rgba_map[R] = 0; lut->rgba_map[G] = 1; lut->rgba_map[B] = 2; lut->rgba_map[A] = 3; break; case PIX_FMT_BGRA: case PIX_FMT_BGR24: lut->rgba_map[B] = 0; lut->rgba_map[G] = 1; lut->rgba_map[R] = 2; lut->rgba_map[A] = 3; break; } lut->step = av_get_bits_per_pixel(desc) >> 3; } for (comp = 0; comp < desc->nb_components; comp++) { double res; /* create the parsed expression */ ret = av_expr_parse(&lut->comp_expr[comp], lut->comp_expr_str[comp], var_names, funcs1_names, funcs1, NULL, NULL, 0, ctx); if (ret < 0) { av_log(ctx, AV_LOG_ERROR, "Error when parsing the expression '%s' for the component %d.\n", lut->comp_expr_str[comp], comp); return AVERROR(EINVAL); } /* compute the lut */ lut->var_values[VAR_MAXVAL] = max[comp]; lut->var_values[VAR_MINVAL] = min[comp]; for (val = 0; val < 256; val++) { lut->var_values[VAR_VAL] = val; lut->var_values[VAR_CLIPVAL] = av_clip(val, min[comp], max[comp]); lut->var_values[VAR_NEGVAL] = av_clip(min[comp] + max[comp] - lut->var_values[VAR_VAL], min[comp], max[comp]); res = av_expr_eval(lut->comp_expr[comp], lut->var_values, lut); if (isnan(res)) { av_log(ctx, AV_LOG_ERROR, "Error when evaluating the expression '%s' for the value %d for the component #%d.\n", lut->comp_expr_str[comp], val, comp); return AVERROR(EINVAL); } lut->lut[comp][val] = av_clip((int)res, min[comp], max[comp]); av_log(ctx, AV_LOG_DEBUG, "val[%d][%d] = %d\n", comp, val, lut->lut[comp][val]); } } return 0; }
100
0
static av_cold int rpza_decode_init(AVCodecContext *avctx) { RpzaContext *s = avctx->priv_data; s->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_RGB555; s->frame.data[0] = NULL; return 0; }
103
0
void proto_register_zbee_zcl_part ( void ) { guint8 i , j ; static hf_register_info hf [ ] = { { & hf_zbee_zcl_part_attr_id , { "Attribute" , "zbee_zcl_general.part.attr_id" , FT_UINT16 , BASE_HEX , VALS ( zbee_zcl_part_attr_names ) , 0x0 , NULL , HFILL } } , { & hf_zbee_zcl_part_srv_tx_cmd_id , { "Command" , "zbee_zcl_general.part.cmd.srv_tx.id" , FT_UINT8 , BASE_HEX , VALS ( zbee_zcl_part_srv_tx_cmd_names ) , 0x0 , NULL , HFILL } } , { & hf_zbee_zcl_part_srv_rx_cmd_id , { "Command" , "zbee_zcl_general.part.cmd.srv_rx.id" , FT_UINT8 , BASE_HEX , VALS ( zbee_zcl_part_srv_rx_cmd_names ) , 0x0 , NULL , HFILL } } , { & hf_zbee_zcl_part_opt , { "Fragmentation Options" , "zbee_zcl_general.part.opt" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL } } , { & hf_zbee_zcl_part_opt_first_block , { "First Block" , "zbee_zcl_general.part.opt.first_block" , FT_UINT8 , BASE_HEX , NULL , ZBEE_ZCL_PART_OPT_1_BLOCK , NULL , HFILL } } , { & hf_zbee_zcl_part_opt_indic_len , { "Indicator length" , "zbee_zcl_general.part.opt.indic_len" , FT_UINT8 , BASE_DEC , VALS ( zbee_zcl_part_id_length_names ) , ZBEE_ZCL_PART_OPT_INDIC_LEN , NULL , HFILL } } , { & hf_zbee_zcl_part_opt_res , { "Reserved" , "zbee_zcl_general.part.opt.res" , FT_UINT8 , BASE_HEX , NULL , ZBEE_ZCL_PART_OPT_RESERVED , NULL , HFILL } } , { & hf_zbee_zcl_part_first_frame_id , { "First Frame ID" , "zbee_zcl_general.part.first_frame_id" , FT_UINT16 , BASE_DEC , NULL , 0x00 , NULL , HFILL } } , { & hf_zbee_zcl_part_part_indicator , { "Partition Indicator" , "zbee_zcl_general.part.part_indicator" , FT_UINT16 , BASE_DEC , NULL , 0x00 , NULL , HFILL } } , { & hf_zbee_zcl_part_part_frame_len , { "Partition Frame Length" , "zbee_zcl_general.part.part_frame_length" , FT_UINT8 , BASE_DEC , NULL , 0x00 , NULL , HFILL } } , { & hf_zbee_zcl_part_part_frame , { "Partition Frame" , "zbee_zcl_general.part.part_frame" , FT_BYTES , SEP_COLON , NULL , 0x00 , NULL , HFILL } } , { & hf_zbee_zcl_part_partitioned_cluster_id , { "Partitioned Cluster ID" , "zbee_zcl_general.part.part_cluster_id" , FT_UINT16 , BASE_HEX , VALS ( zbee_aps_cid_names ) , 0x00 , NULL , HFILL } } , { & hf_zbee_zcl_part_ack_opt , { "Ack Options" , "zbee_zcl_general.ack_opt.part" , FT_UINT8 , BASE_HEX , NULL , 0x0 , NULL , HFILL } } , { & hf_zbee_zcl_part_ack_opt_nack_id_len , { "Nack Id Length" , "zbee_zcl_general.ack_opt.part.nack_id.len" , FT_UINT8 , BASE_HEX , VALS ( zbee_zcl_part_id_length_names ) , ZBEE_ZCL_PART_ACK_OPT_NACK_LEN , NULL , HFILL } } , { & hf_zbee_zcl_part_ack_opt_res , { "Reserved" , "zbee_zcl_general.part.ack_opt.reserved" , FT_UINT8 , BASE_HEX , NULL , ZBEE_ZCL_PART_ACK_OPT_RESERVED , NULL , HFILL } } , { & hf_zbee_zcl_part_nack_id , { "Nack Id" , "zbee_zcl_general.part.nack_id" , FT_UINT16 , BASE_DEC , NULL , 0x00 , NULL , HFILL } } } ; gint * ett [ ZBEE_ZCL_PART_NUM_ETT ] ; ett [ 0 ] = & ett_zbee_zcl_part ; ett [ 1 ] = & ett_zbee_zcl_part_fragm_options ; ett [ 2 ] = & ett_zbee_zcl_part_ack_opts ; for ( i = 0 , j = ZBEE_ZCL_PART_NUM_GENERIC_ETT ; i < ZBEE_ZCL_PART_NUM_NACK_ID_ETT ; i ++ , j ++ ) { ett_zbee_zcl_part_nack_id_list [ i ] = - 1 ; ett [ j ] = & ett_zbee_zcl_part_nack_id_list [ i ] ; } for ( i = 0 ; i < ZBEE_ZCL_PART_NUM_ATTRS_ID_ETT ; i ++ , j ++ ) { ett_zbee_zcl_part_attrs_id_list [ i ] = - 1 ; ett [ j ] = & ett_zbee_zcl_part_attrs_id_list [ i ] ; } proto_zbee_zcl_part = proto_register_protocol ( "ZigBee ZCL Partition" , "ZCL Partition" , ZBEE_PROTOABBREV_ZCL_PART ) ; proto_register_field_array ( proto_zbee_zcl_part , hf , array_length ( hf ) ) ; proto_register_subtree_array ( ett , array_length ( ett ) ) ; register_dissector ( ZBEE_PROTOABBREV_ZCL_PART , dissect_zbee_zcl_part , proto_zbee_zcl_part ) ; }
104
1
static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) { switch (ctxt->op_bytes) { case 2: ctxt->_eip = (u16)dst; break; case 4: ctxt->_eip = (u32)dst; break; case 8: ctxt->_eip = dst; break; default: WARN(1, "unsupported eip assignment size\n"); } }
105
1
krb5_encode_krbsecretkey(krb5_key_data *key_data_in, int n_key_data, krb5_kvno mkvno) { struct berval **ret = NULL; int currkvno; int num_versions = 1; int i, j, last; krb5_error_code err = 0; krb5_key_data *key_data; if (n_key_data <= 0) return NULL; /* Make a shallow copy of the key data so we can alter it. */ key_data = k5calloc(n_key_data, sizeof(*key_data), &err); if (key_data_in == NULL) goto cleanup; memcpy(key_data, key_data_in, n_key_data * sizeof(*key_data)); /* Unpatched krb5 1.11 and 1.12 cannot decode KrbKey sequences with no salt * field. For compatibility, always encode a salt field. */ for (i = 0; i < n_key_data; i++) { if (key_data[i].key_data_ver == 1) { key_data[i].key_data_ver = 2; key_data[i].key_data_type[1] = KRB5_KDB_SALTTYPE_NORMAL; key_data[i].key_data_length[1] = 0; key_data[i].key_data_contents[1] = NULL; } } /* Find the number of key versions */ for (i = 0; i < n_key_data - 1; i++) if (key_data[i].key_data_kvno != key_data[i + 1].key_data_kvno) num_versions++; ret = (struct berval **) calloc (num_versions + 1, sizeof (struct berval *)); if (ret == NULL) { err = ENOMEM; goto cleanup; } for (i = 0, last = 0, j = 0, currkvno = key_data[0].key_data_kvno; i < n_key_data; i++) { krb5_data *code; if (i == n_key_data - 1 || key_data[i + 1].key_data_kvno != currkvno) { ret[j] = k5alloc(sizeof(struct berval), &err); if (ret[j] == NULL) goto cleanup; err = asn1_encode_sequence_of_keys(key_data + last, (krb5_int16)i - last + 1, mkvno, &code); if (err) goto cleanup; /*CHECK_NULL(ret[j]); */ ret[j]->bv_len = code->length; ret[j]->bv_val = code->data; free(code); j++; last = i + 1; if (i < n_key_data - 1) currkvno = key_data[i + 1].key_data_kvno; } } ret[num_versions] = NULL; cleanup: free(key_data); if (err != 0) { if (ret != NULL) { for (i = 0; i <= num_versions; i++) if (ret[i] != NULL) free (ret[i]); free (ret); ret = NULL; } } return ret; }
106
1
static unsigned long convert_eip_to_linear(struct task_struct *child, struct pt_regs *regs) { unsigned long addr, seg; addr = regs->eip; seg = regs->xcs & 0xffff; if (regs->eflags & VM_MASK) { addr = (addr & 0xffff) + (seg << 4); return addr; } /* * We'll assume that the code segments in the GDT * are all zero-based. That is largely true: the * TLS segments are used for data, and the PNPBIOS * and APM bios ones we just ignore here. */ if (seg & LDT_SEGMENT) { u32 *desc; unsigned long base; down(&child->mm->context.sem); desc = child->mm->context.ldt + (seg & ~7); base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000); /* 16-bit code segment? */ if (!((desc[1] >> 22) & 1)) addr &= 0xffff; addr += base; up(&child->mm->context.sem); } return addr; }
107
1
static int huffman_decode(MPADecodeContext *s, GranuleDef *g, int16_t *exponents, int end_pos2) { int s_index; int i; int last_pos, bits_left; VLC *vlc; int end_pos = FFMIN(end_pos2, s->gb.size_in_bits); /* low frequencies (called big values) */ s_index = 0; for (i = 0; i < 3; i++) { int j, k, l, linbits; j = g->region_size[i]; if (j == 0) continue; /* select vlc table */ k = g->table_select[i]; l = mpa_huff_data[k][0]; linbits = mpa_huff_data[k][1]; vlc = &huff_vlc[l]; if (!l) { memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * 2 * j); s_index += 2 * j; continue; } /* read huffcode and compute each couple */ for (; j > 0; j--) { int exponent, x, y; int v; int pos = get_bits_count(&s->gb); if (pos >= end_pos){ switch_buffer(s, &pos, &end_pos, &end_pos2); if (pos >= end_pos) break; } y = get_vlc2(&s->gb, vlc->table, 7, 3); if (!y) { g->sb_hybrid[s_index ] = g->sb_hybrid[s_index+1] = 0; s_index += 2; continue; } exponent= exponents[s_index]; ff_dlog(s->avctx, "region=%d n=%d x=%d y=%d exp=%d\n", i, g->region_size[i] - j, x, y, exponent); if (y & 16) { x = y >> 5; y = y & 0x0f; if (x < 15) { READ_FLIP_SIGN(g->sb_hybrid + s_index, RENAME(expval_table)[exponent] + x) } else { x += get_bitsz(&s->gb, linbits); v = l3_unscale(x, exponent); if (get_bits1(&s->gb)) v = -v; g->sb_hybrid[s_index] = v; } if (y < 15) { READ_FLIP_SIGN(g->sb_hybrid + s_index + 1, RENAME(expval_table)[exponent] + y) } else { y += get_bitsz(&s->gb, linbits); v = l3_unscale(y, exponent); if (get_bits1(&s->gb)) v = -v; g->sb_hybrid[s_index+1] = v; } } else { x = y >> 5; y = y & 0x0f; x += y; if (x < 15) { READ_FLIP_SIGN(g->sb_hybrid + s_index + !!y, RENAME(expval_table)[exponent] + x) } else { x += get_bitsz(&s->gb, linbits); v = l3_unscale(x, exponent); if (get_bits1(&s->gb)) v = -v; g->sb_hybrid[s_index+!!y] = v; } g->sb_hybrid[s_index + !y] = 0; } s_index += 2; } } /* high frequencies */ vlc = &huff_quad_vlc[g->count1table_select]; last_pos = 0; while (s_index <= 572) { int pos, code; pos = get_bits_count(&s->gb); if (pos >= end_pos) { if (pos > end_pos2 && last_pos) { /* some encoders generate an incorrect size for this part. We must go back into the data */ s_index -= 4; skip_bits_long(&s->gb, last_pos - pos); av_log(s->avctx, AV_LOG_INFO, "overread, skip %d enddists: %d %d\n", last_pos - pos, end_pos-pos, end_pos2-pos); if(s->err_recognition & AV_EF_BITSTREAM) s_index=0; break; } switch_buffer(s, &pos, &end_pos, &end_pos2); if (pos >= end_pos) break; } last_pos = pos; code = get_vlc2(&s->gb, vlc->table, vlc->bits, 1); ff_dlog(s->avctx, "t=%d code=%d\n", g->count1table_select, code); g->sb_hybrid[s_index+0] = g->sb_hybrid[s_index+1] = g->sb_hybrid[s_index+2] = g->sb_hybrid[s_index+3] = 0; while (code) { static const int idxtab[16] = { 3,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0 }; int v; int pos = s_index + idxtab[code]; code ^= 8 >> idxtab[code]; READ_FLIP_SIGN(g->sb_hybrid + pos, RENAME(exp_table)+exponents[pos]) } s_index += 4; } /* skip extension bits */ bits_left = end_pos2 - get_bits_count(&s->gb); if (bits_left < 0 && (s->err_recognition & AV_EF_BUFFER)) { av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left); s_index=0; } else if (bits_left > 0 && (s->err_recognition & AV_EF_BUFFER)) { av_log(s->avctx, AV_LOG_ERROR, "bits_left=%d\n", bits_left); s_index = 0; } memset(&g->sb_hybrid[s_index], 0, sizeof(*g->sb_hybrid) * (576 - s_index)); skip_bits_long(&s->gb, bits_left); i = get_bits_count(&s->gb); switch_buffer(s, &i, &end_pos, &end_pos2); return 0; }
108
0
static void test_bug17667 ( ) { int rc ; MYSQL_STMT * stmt ; enum query_type { QT_NORMAL , QT_PREPARED } ; struct buffer_and_length { enum query_type qt ; const char * buffer ; const uint length ; } statements [ ] = { { QT_NORMAL , "drop table if exists bug17667" , 29 } , { QT_NORMAL , "create table bug17667 (c varchar(20))" , 37 } , { QT_NORMAL , "insert into bug17667 (c) values ('regular') /* NUL=\0 with comment */" , 68 } , { QT_PREPARED , "insert into bug17667 (c) values ('prepared') /* NUL=\0 with comment */" , 69 , } , { QT_NORMAL , "insert into bug17667 (c) values ('NUL=\0 in value')" , 50 } , { QT_NORMAL , "insert into bug17667 (c) values ('5 NULs=\0\0\0\0\0')" , 48 } , { QT_PREPARED , "insert into bug17667 (c) values ('6 NULs=\0\0\0\0\0\0')" , 50 } , { QT_NORMAL , "/* NUL=\0 with comment */ insert into bug17667 (c) values ('encore')" , 67 } , { QT_NORMAL , "drop table bug17667" , 19 } , { QT_NORMAL , NULL , 0 } } ; struct buffer_and_length * statement_cursor ; FILE * log_file ; char * master_log_filename ; myheader ( "test_bug17667" ) ; master_log_filename = ( char * ) malloc ( strlen ( opt_vardir ) + strlen ( "/log/master.log" ) + 1 ) ; strxmov ( master_log_filename , opt_vardir , "/log/master.log" , NullS ) ; if ( ! opt_silent ) printf ( "Opening '%s'\n" , master_log_filename ) ; log_file = my_fopen ( master_log_filename , ( int ) ( O_RDONLY | O_BINARY ) , MYF ( 0 ) ) ; free ( master_log_filename ) ; if ( log_file == NULL ) { if ( ! opt_silent ) { printf ( "Could not find the log file, VARDIR/log/master.log, so " "test_bug17667 is not run.\n" "Run test from the mysql-test/mysql-test-run* program to set up " "correct environment for this test.\n\n" ) ; } return ; } enable_query_logs ( 1 ) ; for ( statement_cursor = statements ; statement_cursor -> buffer != NULL ; statement_cursor ++ ) { if ( statement_cursor -> qt == QT_NORMAL ) { rc = mysql_real_query ( mysql , statement_cursor -> buffer , statement_cursor -> length ) ; myquery ( rc ) ; } else if ( statement_cursor -> qt == QT_PREPARED ) { stmt = mysql_stmt_init ( mysql ) ; rc = mysql_stmt_prepare ( stmt , statement_cursor -> buffer , statement_cursor -> length ) ; check_execute ( stmt , rc ) ; rc = mysql_stmt_execute ( stmt ) ; check_execute ( stmt , rc ) ; mysql_stmt_close ( stmt ) ; } else { DIE_UNLESS ( 0 == 1 ) ; } } rc = mysql_query ( mysql , "flush logs" ) ; myquery ( rc ) ; for ( statement_cursor = statements ; statement_cursor -> buffer != NULL ; statement_cursor ++ ) { int expected_hits = 1 , hits = 0 ; char line_buffer [ MAX_TEST_QUERY_LENGTH * 2 ] ; if ( statement_cursor -> qt == QT_PREPARED ) expected_hits ++ ; do { do { memset ( line_buffer , '/' , MAX_TEST_QUERY_LENGTH * 2 ) ; if ( fgets ( line_buffer , MAX_TEST_QUERY_LENGTH * 2 , log_file ) == NULL ) { if ( feof ( log_file ) ) DIE ( "Found EOF before all statements where found" ) ; fprintf ( stderr , "Got error %d while reading from file\n" , ferror ( log_file ) ) ; DIE ( "Read error" ) ; } } while ( my_memmem ( line_buffer , MAX_TEST_QUERY_LENGTH * 2 , statement_cursor -> buffer , statement_cursor -> length ) == NULL ) ; hits ++ ; } while ( hits < expected_hits ) ; if ( ! opt_silent ) printf ( "Found statement starting with \"%s\"\n" , statement_cursor -> buffer ) ; } restore_query_logs ( ) ; if ( ! opt_silent ) printf ( "success. All queries found intact in the log.\n" ) ; my_fclose ( log_file , MYF ( 0 ) ) ; }
109
1
static int em_call(struct x86_emulate_ctxt *ctxt) { long rel = ctxt->src.val; ctxt->src.val = (unsigned long)ctxt->_eip; jmp_rel(ctxt, rel); return em_push(ctxt); }
110
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
88
Edit dataset card

Collection including AISE-TUDelft/ML4SE23_G6_Original_Prev_Diverse