id
int32 0
27.3k
| func
stringlengths 26
142k
| target
bool 2
classes | project
stringclasses 2
values | commit_id
stringlengths 40
40
|
---|---|---|---|---|
26,635 | static int decode_cell(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
Plane *plane, Cell *cell, const uint8_t *data_ptr,
const uint8_t *last_ptr)
{
int x, mv_x, mv_y, mode, vq_index, prim_indx, second_indx;
int zoom_fac;
int offset, error = 0, swap_quads[2];
uint8_t code, *block, *ref_block = 0;
const vqEntry *delta[2];
const uint8_t *data_start = data_ptr;
/* get coding mode and VQ table index from the VQ descriptor byte */
code = *data_ptr++;
mode = code >> 4;
vq_index = code & 0xF;
/* setup output and reference pointers */
offset = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2);
block = plane->pixels[ctx->buf_sel] + offset;
if (!cell->mv_ptr) {
/* use previous line as reference for INTRA cells */
ref_block = block - plane->pitch;
} else if (mode >= 10) {
/* for mode 10 and 11 INTER first copy the predicted cell into the current one */
/* so we don't need to do data copying for each RLE code later */
copy_cell(ctx, plane, cell);
} else {
/* set the pointer to the reference pixels for modes 0-4 INTER */
mv_y = cell->mv_ptr[0];
mv_x = cell->mv_ptr[1];
offset += mv_y * plane->pitch + mv_x;
ref_block = plane->pixels[ctx->buf_sel ^ 1] + offset;
/* select VQ tables as follows: */
/* modes 0 and 3 use only the primary table for all lines in a block */
/* while modes 1 and 4 switch between primary and secondary tables on alternate lines */
if (mode == 1 || mode == 4) {
code = ctx->alt_quant[vq_index];
prim_indx = (code >> 4) + ctx->cb_offset;
second_indx = (code & 0xF) + ctx->cb_offset;
} else {
vq_index += ctx->cb_offset;
prim_indx = second_indx = vq_index;
if (prim_indx >= 24 || second_indx >= 24) {
av_log(avctx, AV_LOG_ERROR, "Invalid VQ table indexes! Primary: %d, secondary: %d!\n",
prim_indx, second_indx);
delta[0] = &vq_tab[second_indx];
delta[1] = &vq_tab[prim_indx];
swap_quads[0] = second_indx >= 16;
swap_quads[1] = prim_indx >= 16;
/* requantize the prediction if VQ index of this cell differs from VQ index */
/* of the predicted cell in order to avoid overflows. */
if (vq_index >= 8 && ref_block) {
for (x = 0; x < cell->width << 2; x++)
ref_block[x] = requant_tab[vq_index & 7][ref_block[x]];
error = IV3_NOERR;
switch (mode) {
case 0: /*------------------ MODES 0 & 1 (4x4 block processing) --------------------*/
case 1:
case 3: /*------------------ MODES 3 & 4 (4x8 block processing) --------------------*/
case 4:
if (mode >= 3 && cell->mv_ptr) {
av_log(avctx, AV_LOG_ERROR, "Attempt to apply Mode 3/4 to an INTER cell!\n");
zoom_fac = mode >= 3;
error = decode_cell_data(cell, block, ref_block, plane->pitch, 0, zoom_fac,
mode, delta, swap_quads, &data_ptr, last_ptr);
break;
case 10: /*-------------------- MODE 10 (8x8 block processing) ---------------------*/
case 11: /*----------------- MODE 11 (4x8 INTER block processing) ------------------*/
if (mode == 10 && !cell->mv_ptr) { /* MODE 10 INTRA processing */
error = decode_cell_data(cell, block, ref_block, plane->pitch, 1, 1,
mode, delta, swap_quads, &data_ptr, last_ptr);
} else { /* mode 10 and 11 INTER processing */
if (mode == 11 && !cell->mv_ptr) {
av_log(avctx, AV_LOG_ERROR, "Attempt to use Mode 11 for an INTRA cell!\n");
zoom_fac = mode == 10;
error = decode_cell_data(cell, block, ref_block, plane->pitch,
zoom_fac, 1, mode, delta, swap_quads,
&data_ptr, last_ptr);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unsupported coding mode: %d\n", mode);
}//switch mode
switch (error) {
case IV3_BAD_RLE:
av_log(avctx, AV_LOG_ERROR, "Mode %d: RLE code %X is not allowed at the current line\n",
mode, data_ptr[-1]);
case IV3_BAD_DATA:
av_log(avctx, AV_LOG_ERROR, "Mode %d: invalid VQ data\n", mode);
case IV3_BAD_COUNTER:
av_log(avctx, AV_LOG_ERROR, "Mode %d: RLE-FB invalid counter: %d\n", mode, code);
case IV3_UNSUPPORTED:
av_log(avctx, AV_LOG_ERROR, "Mode %d: unsupported RLE code: %X\n", mode, data_ptr[-1]);
case IV3_OUT_OF_DATA:
av_log(avctx, AV_LOG_ERROR, "Mode %d: attempt to read past end of buffer\n", mode);
return data_ptr - data_start; /* report number of bytes consumed from the input buffer */
| true | FFmpeg | 56ffa3fefb22605ac6507efa046ebddc38301521 |
26,636 | void ff_riff_write_info_tag(AVIOContext *pb, const char *tag, const char *str)
{
int len = strlen(str);
if (len > 0) {
len++;
ffio_wfourcc(pb, tag);
avio_wl32(pb, len);
avio_put_str(pb, str);
if (len & 1)
avio_w8(pb, 0);
}
}
| true | FFmpeg | c3671e1d5760d79c083e7565d951f4628c06cf41 |
26,637 | static int mjpegb_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MJpegDecodeContext *s = avctx->priv_data;
const uint8_t *buf_end, *buf_ptr;
GetBitContext hgb; /* for the header */
uint32_t dqt_offs, dht_offs, sof_offs, sos_offs, second_field_offs;
uint32_t field_size, sod_offs;
int ret;
buf_ptr = buf;
buf_end = buf + buf_size;
s->got_picture = 0;
read_header:
/* reset on every SOI */
s->restart_interval = 0;
s->restart_count = 0;
s->mjpb_skiptosod = 0;
if (buf_end - buf_ptr >= 1 << 28)
return AVERROR_INVALIDDATA;
init_get_bits(&hgb, buf_ptr, /*buf_size*/(buf_end - buf_ptr)*8);
skip_bits(&hgb, 32); /* reserved zeros */
if (get_bits_long(&hgb, 32) != MKBETAG('m','j','p','g'))
{
av_log(avctx, AV_LOG_WARNING, "not mjpeg-b (bad fourcc)\n");
return AVERROR_INVALIDDATA;
}
field_size = get_bits_long(&hgb, 32); /* field size */
av_log(avctx, AV_LOG_DEBUG, "field size: 0x%x\n", field_size);
skip_bits(&hgb, 32); /* padded field size */
second_field_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "second_field_offs is %d and size is %d\n");
av_log(avctx, AV_LOG_DEBUG, "second field offs: 0x%x\n", second_field_offs);
dqt_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dqt is %d and size is %d\n");
av_log(avctx, AV_LOG_DEBUG, "dqt offs: 0x%x\n", dqt_offs);
if (dqt_offs)
{
init_get_bits(&s->gb, buf_ptr+dqt_offs, (buf_end - (buf_ptr+dqt_offs))*8);
s->start_code = DQT;
if (ff_mjpeg_decode_dqt(s) < 0 &&
(avctx->err_recognition & AV_EF_EXPLODE))
return AVERROR_INVALIDDATA;
}
dht_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dht is %d and size is %d\n");
av_log(avctx, AV_LOG_DEBUG, "dht offs: 0x%x\n", dht_offs);
if (dht_offs)
{
init_get_bits(&s->gb, buf_ptr+dht_offs, (buf_end - (buf_ptr+dht_offs))*8);
s->start_code = DHT;
ff_mjpeg_decode_dht(s);
}
sof_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n");
av_log(avctx, AV_LOG_DEBUG, "sof offs: 0x%x\n", sof_offs);
if (sof_offs)
{
init_get_bits(&s->gb, buf_ptr+sof_offs, (buf_end - (buf_ptr+sof_offs))*8);
s->start_code = SOF0;
if (ff_mjpeg_decode_sof(s) < 0)
return -1;
}
sos_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sos is %d and size is %d\n");
av_log(avctx, AV_LOG_DEBUG, "sos offs: 0x%x\n", sos_offs);
sod_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n");
av_log(avctx, AV_LOG_DEBUG, "sod offs: 0x%x\n", sod_offs);
if (sos_offs)
{
init_get_bits(&s->gb, buf_ptr + sos_offs,
8 * FFMIN(field_size, buf_end - buf_ptr - sos_offs));
s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16));
s->start_code = SOS;
if (ff_mjpeg_decode_sos(s, NULL, NULL) < 0 &&
(avctx->err_recognition & AV_EF_EXPLODE))
return AVERROR_INVALIDDATA;
}
if (s->interlaced) {
s->bottom_field ^= 1;
/* if not bottom field, do not output image yet */
if (s->bottom_field != s->interlace_polarity && second_field_offs)
{
buf_ptr = buf + second_field_offs;
goto read_header;
}
}
//XXX FIXME factorize, this looks very similar to the EOI code
if(!s->got_picture) {
av_log(avctx, AV_LOG_WARNING, "no picture\n");
return buf_size;
}
if ((ret = av_frame_ref(data, s->picture_ptr)) < 0)
return ret;
*got_frame = 1;
if (!s->lossless && avctx->debug & FF_DEBUG_QP) {
av_log(avctx, AV_LOG_DEBUG, "QP: %d\n",
FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]));
}
return buf_size;
}
| true | FFmpeg | 2884688bd51a808ccda3c0e13367619cd79e0579 |
26,638 | void helper_wrpil(CPUSPARCState *env, target_ulong new_pil)
{
#if !defined(CONFIG_USER_ONLY)
trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil);
env->psrpil = new_pil;
if (cpu_interrupts_enabled(env)) {
cpu_check_irqs(env);
}
#endif
} | true | qemu | 5ee5993001cf32addb86a92e2ae8cb090fbc1462 |
26,639 | const char *qdict_get_try_str(const QDict *qdict, const char *key)
{
QObject *obj;
obj = qdict_get(qdict, key);
if (!obj || qobject_type(obj) != QTYPE_QSTRING)
return NULL;
return qstring_get_str(qobject_to_qstring(obj));
}
| true | qemu | 7f0278435df1fa845b3bd9556942f89296d4246b |
26,640 | static void tricore_testboard_init(MachineState *machine, int board_id)
{
TriCoreCPU *cpu;
CPUTriCoreState *env;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ext_cram = g_new(MemoryRegion, 1);
MemoryRegion *ext_dram = g_new(MemoryRegion, 1);
MemoryRegion *int_cram = g_new(MemoryRegion, 1);
MemoryRegion *int_dram = g_new(MemoryRegion, 1);
MemoryRegion *pcp_data = g_new(MemoryRegion, 1);
MemoryRegion *pcp_text = g_new(MemoryRegion, 1);
if (!machine->cpu_model) {
machine->cpu_model = "tc1796";
}
cpu = cpu_tricore_init(machine->cpu_model);
if (!cpu) {
error_report("Unable to find CPU definition");
exit(1);
}
env = &cpu->env;
memory_region_init_ram(ext_cram, NULL, "powerlink_ext_c.ram", 2*1024*1024, &error_abort);
vmstate_register_ram_global(ext_cram);
memory_region_init_ram(ext_dram, NULL, "powerlink_ext_d.ram", 4*1024*1024, &error_abort);
vmstate_register_ram_global(ext_dram);
memory_region_init_ram(int_cram, NULL, "powerlink_int_c.ram", 48*1024, &error_abort);
vmstate_register_ram_global(int_cram);
memory_region_init_ram(int_dram, NULL, "powerlink_int_d.ram", 48*1024, &error_abort);
vmstate_register_ram_global(int_dram);
memory_region_init_ram(pcp_data, NULL, "powerlink_pcp_data.ram", 16*1024, &error_abort);
vmstate_register_ram_global(pcp_data);
memory_region_init_ram(pcp_text, NULL, "powerlink_pcp_text.ram", 32*1024, &error_abort);
vmstate_register_ram_global(pcp_text);
memory_region_add_subregion(sysmem, 0x80000000, ext_cram);
memory_region_add_subregion(sysmem, 0xa1000000, ext_dram);
memory_region_add_subregion(sysmem, 0xd4000000, int_cram);
memory_region_add_subregion(sysmem, 0xd0000000, int_dram);
memory_region_add_subregion(sysmem, 0xf0050000, pcp_data);
memory_region_add_subregion(sysmem, 0xf0060000, pcp_text);
tricoretb_binfo.ram_size = machine->ram_size;
tricoretb_binfo.kernel_filename = machine->kernel_filename;
if (machine->kernel_filename) {
tricore_load_kernel(env);
}
}
| true | qemu | f8ed85ac992c48814d916d5df4d44f9a971c5de4 |
26,641 | static void ahci_write_fis_sdb(AHCIState *s, int port, uint32_t finished)
{
AHCIPortRegs *pr = &s->dev[port].port_regs;
IDEState *ide_state;
uint8_t *sdb_fis;
if (!s->dev[port].res_fis ||
!(pr->cmd & PORT_CMD_FIS_RX)) {
return;
}
sdb_fis = &s->dev[port].res_fis[RES_FIS_SDBFIS];
ide_state = &s->dev[port].port.ifs[0];
/* clear memory */
*(uint32_t*)sdb_fis = 0;
/* write values */
sdb_fis[0] = ide_state->error;
sdb_fis[2] = ide_state->status & 0x77;
s->dev[port].finished |= finished;
*(uint32_t*)(sdb_fis + 4) = cpu_to_le32(s->dev[port].finished);
ahci_trigger_irq(s, &s->dev[port], PORT_IRQ_SDB_FIS);
}
| true | qemu | fac7aa7fc2ebc26803b0a7b44b010f47ce3e1dd8 |
26,643 | int v9fs_set_xattr(FsContext *ctx, const char *path, const char *name,
void *value, size_t size, int flags)
{
XattrOperations *xops = get_xattr_operations(ctx->xops, name);
if (xops) {
return xops->setxattr(ctx, path, name, value, size, flags);
}
errno = -EOPNOTSUPP;
return -1;
}
| true | qemu | 8af00205445eb901f17ca5b632d976065187538e |
26,644 | void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
{
struct kvm_irq_routing_entry e;
assert(pin < s->gsi_count);
e.gsi = irq;
e.type = KVM_IRQ_ROUTING_IRQCHIP;
e.flags = 0;
e.u.irqchip.irqchip = irqchip;
e.u.irqchip.pin = pin;
kvm_add_routing_entry(s, &e);
}
| true | qemu | 0fbc20740342713f282b118b4a446c4c43df3f4a |
26,645 | static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
{
TCGv_i32 t0;
int l1, l2;
l1 = gen_new_label();
l2 = gen_new_label();
t0 = tcg_temp_local_new_i32();
/* No error here: 6 bits are used */
tcg_gen_andi_i32(t0, arg2, 0x3F);
tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
tcg_gen_shr_i32(ret, arg1, t0);
tcg_gen_br(l2);
gen_set_label(l1);
tcg_gen_movi_i32(ret, 0);
gen_set_label(l2);
tcg_temp_free_i32(t0);
}
| false | qemu | 42a268c241183877192c376d03bd9b6d527407c7 |
26,648 | static int hqx_decode_frame(AVCodecContext *avctx, void *data,
int *got_picture_ptr, AVPacket *avpkt)
{
HQXContext *ctx = avctx->priv_data;
uint8_t *src = avpkt->data;
uint32_t info_tag;
int data_start;
int i, ret;
if (avpkt->size < 4 + 4) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small %d.\n", avpkt->size);
return AVERROR_INVALIDDATA;
}
info_tag = AV_RL32(src);
if (info_tag == MKTAG('I', 'N', 'F', 'O')) {
int info_offset = AV_RL32(src + 4);
if (info_offset > UINT32_MAX - 8 || info_offset + 8 > avpkt->size) {
av_log(avctx, AV_LOG_ERROR,
"Invalid INFO header offset: 0x%08"PRIX32" is too large.\n",
info_offset);
return AVERROR_INVALIDDATA;
}
ff_canopus_parse_info_tag(avctx, src + 8, info_offset);
info_offset += 8;
src += info_offset;
}
data_start = src - avpkt->data;
ctx->data_size = avpkt->size - data_start;
ctx->src = src;
ctx->pic = data;
if (ctx->data_size < HQX_HEADER_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Frame too small.\n");
return AVERROR_INVALIDDATA;
}
if (src[0] != 'H' || src[1] != 'Q') {
av_log(avctx, AV_LOG_ERROR, "Not an HQX frame.\n");
return AVERROR_INVALIDDATA;
}
ctx->interlaced = !(src[2] & 0x80);
ctx->format = src[2] & 7;
ctx->dcb = (src[3] & 3) + 8;
ctx->width = AV_RB16(src + 4);
ctx->height = AV_RB16(src + 6);
for (i = 0; i < 17; i++)
ctx->slice_off[i] = AV_RB24(src + 8 + i * 3);
if (ctx->dcb == 8) {
av_log(avctx, AV_LOG_ERROR, "Invalid DC precision %d.\n", ctx->dcb);
return AVERROR_INVALIDDATA;
}
ret = av_image_check_size(ctx->width, ctx->height, 0, avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid stored dimenstions %dx%d.\n",
ctx->width, ctx->height);
return AVERROR_INVALIDDATA;
}
avctx->coded_width = FFALIGN(ctx->width, 16);
avctx->coded_height = FFALIGN(ctx->height, 16);
avctx->width = ctx->width;
avctx->height = ctx->height;
avctx->bits_per_raw_sample = 10;
switch (ctx->format) {
case HQX_422:
avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
ctx->decode_func = hqx_decode_422;
break;
case HQX_444:
avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
ctx->decode_func = hqx_decode_444;
break;
case HQX_422A:
avctx->pix_fmt = AV_PIX_FMT_YUVA422P16;
ctx->decode_func = hqx_decode_422a;
break;
case HQX_444A:
avctx->pix_fmt = AV_PIX_FMT_YUVA444P16;
ctx->decode_func = hqx_decode_444a;
break;
default:
av_log(avctx, AV_LOG_ERROR, "Invalid format: %d.\n", ctx->format);
return AVERROR_INVALIDDATA;
}
ret = ff_get_buffer(avctx, ctx->pic, 0);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer.\n");
return ret;
}
avctx->execute2(avctx, decode_slice_thread, NULL, NULL, 16);
ctx->pic->key_frame = 1;
ctx->pic->pict_type = AV_PICTURE_TYPE_I;
*got_picture_ptr = 1;
return avpkt->size;
}
| false | FFmpeg | 9fd2bf09dbc630484d9e88a1d27f7e8508b70a2c |
26,649 | START_TEST(qlist_append_test)
{
QInt *qi;
QList *qlist;
QListEntry *entry;
qi = qint_from_int(42);
qlist = qlist_new();
qlist_append(qlist, qi);
entry = QTAILQ_FIRST(&qlist->head);
fail_unless(entry != NULL);
fail_unless(entry->value == QOBJECT(qi));
// destroy doesn't exist yet
QDECREF(qi);
g_free(entry);
g_free(qlist);
}
| false | qemu | 91479dd0b5bd3b087b92ddd7bc3f2c54982cfe17 |
26,650 | static int parse_chr(DeviceState *dev, Property *prop, const char *str)
{
CharDriverState **ptr = qdev_get_prop_ptr(dev, prop);
*ptr = qemu_chr_find(str);
if (*ptr == NULL)
return -ENOENT;
return 0;
}
| false | qemu | 2d6c1ef40f3678ab47a4d14fb5dadaa486bfcda6 |
26,651 | static unsigned virtio_pci_get_features(void *opaque)
{
unsigned ret = 0;
ret |= (1 << VIRTIO_F_NOTIFY_ON_EMPTY);
ret |= (1 << VIRTIO_RING_F_INDIRECT_DESC);
ret |= (1 << VIRTIO_F_BAD_FEATURE);
return ret;
}
| false | qemu | 8172539d21a03e982aa7f139ddc1607dc1422045 |
26,652 | target_ulong HELPER(mfspr)(CPUOpenRISCState *env,
target_ulong rd, target_ulong ra, uint32_t offset)
{
#ifndef CONFIG_USER_ONLY
int spr = (ra | offset);
int idx;
OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
switch (spr) {
case TO_SPR(0, 0): /* VR */
return env->vr & SPR_VR;
case TO_SPR(0, 1): /* UPR */
return env->upr; /* TT, DM, IM, UP present */
case TO_SPR(0, 2): /* CPUCFGR */
return env->cpucfgr;
case TO_SPR(0, 3): /* DMMUCFGR */
return env->dmmucfgr; /* 1Way, 64 entries */
case TO_SPR(0, 4): /* IMMUCFGR */
return env->immucfgr;
case TO_SPR(0, 16): /* NPC */
return env->npc;
case TO_SPR(0, 17): /* SR */
return env->sr;
case TO_SPR(0, 18): /* PPC */
return env->ppc;
case TO_SPR(0, 32): /* EPCR */
return env->epcr;
case TO_SPR(0, 48): /* EEAR */
return env->eear;
case TO_SPR(0, 64): /* ESR */
return env->esr;
case TO_SPR(1, 512) ... TO_SPR(1, 639): /* DTLBW0MR 0-127 */
idx = spr - TO_SPR(1, 512);
return env->tlb->dtlb[0][idx].mr;
case TO_SPR(1, 640) ... TO_SPR(1, 767): /* DTLBW0TR 0-127 */
idx = spr - TO_SPR(1, 640);
return env->tlb->dtlb[0][idx].tr;
case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */
case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */
case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
break;
case TO_SPR(2, 512) ... TO_SPR(2, 639): /* ITLBW0MR 0-127 */
idx = spr - TO_SPR(2, 512);
return env->tlb->itlb[0][idx].mr;
case TO_SPR(2, 640) ... TO_SPR(2, 767): /* ITLBW0TR 0-127 */
idx = spr - TO_SPR(2, 640);
return env->tlb->itlb[0][idx].tr;
case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */
case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */
case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
break;
case TO_SPR(9, 0): /* PICMR */
return env->picmr;
case TO_SPR(9, 2): /* PICSR */
return env->picsr;
case TO_SPR(10, 0): /* TTMR */
return env->ttmr;
case TO_SPR(10, 1): /* TTCR */
cpu_openrisc_count_update(cpu);
return env->ttcr;
default:
break;
}
#endif
/*If we later need to add tracepoints (or debug printfs) for the return
value, it may be useful to structure the code like this:
target_ulong ret = 0;
switch() {
case x:
ret = y;
break;
case z:
ret = 42;
break;
...
}
later something like trace_spr_read(ret);
return ret;*/
/* for rd is passed in, if rd unchanged, just keep it back. */
return rd;
}
| false | qemu | 93147a180c10b97bf9575a87e01c9a1c93e6c9ce |
26,654 | ssize_t virtio_pdu_vunmarshal(V9fsPDU *pdu, size_t offset,
const char *fmt, va_list ap)
{
V9fsState *s = pdu->s;
V9fsVirtioState *v = container_of(s, V9fsVirtioState, state);
VirtQueueElement *elem = &v->elems[pdu->idx];
return v9fs_iov_vunmarshal(elem->out_sg, elem->out_num, offset, 1, fmt, ap);
}
| false | qemu | 51b19ebe4320f3dcd93cea71235c1219318ddfd2 |
26,655 | void do_td (int flags)
{
if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP);
}
| false | qemu | e1833e1f96456fd8fc17463246fe0b2050e68efb |
26,656 | static ssize_t block_crypto_read_func(QCryptoBlock *block,
void *opaque,
size_t offset,
uint8_t *buf,
size_t buflen,
Error **errp)
{
BlockDriverState *bs = opaque;
ssize_t ret;
ret = bdrv_pread(bs->file, offset, buf, buflen);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read encryption header");
return ret;
}
return ret;
}
| false | qemu | e4a3507e86a1ef1453d603031bca27d5ac4cff3c |
26,657 | vnc_display_setup_auth(VncDisplay *vd,
bool password,
bool sasl,
bool websocket,
Error **errp)
{
/*
* We have a choice of 3 authentication options
*
* 1. none
* 2. vnc
* 3. sasl
*
* The channel can be run in 2 modes
*
* 1. clear
* 2. tls
*
* And TLS can use 2 types of credentials
*
* 1. anon
* 2. x509
*
* We thus have 9 possible logical combinations
*
* 1. clear + none
* 2. clear + vnc
* 3. clear + sasl
* 4. tls + anon + none
* 5. tls + anon + vnc
* 6. tls + anon + sasl
* 7. tls + x509 + none
* 8. tls + x509 + vnc
* 9. tls + x509 + sasl
*
* These need to be mapped into the VNC auth schemes
* in an appropriate manner. In regular VNC, all the
* TLS options get mapped into VNC_AUTH_VENCRYPT
* sub-auth types.
*
* In websockets, the https:// protocol already provides
* TLS support, so there is no need to make use of the
* VeNCrypt extension. Furthermore, websockets browser
* clients could not use VeNCrypt even if they wanted to,
* as they cannot control when the TLS handshake takes
* place. Thus there is no option but to rely on https://,
* meaning combinations 4->6 and 7->9 will be mapped to
* VNC auth schemes in the same way as combos 1->3.
*
* Regardless of fact that we have a different mapping to
* VNC auth mechs for plain VNC vs websockets VNC, the end
* result has the same security characteristics.
*/
if (password) {
if (vd->tlscreds) {
vd->auth = VNC_AUTH_VENCRYPT;
if (object_dynamic_cast(OBJECT(vd->tlscreds),
TYPE_QCRYPTO_TLS_CREDS_X509)) {
VNC_DEBUG("Initializing VNC server with x509 password auth\n");
vd->subauth = VNC_AUTH_VENCRYPT_X509VNC;
} else if (object_dynamic_cast(OBJECT(vd->tlscreds),
TYPE_QCRYPTO_TLS_CREDS_ANON)) {
VNC_DEBUG("Initializing VNC server with TLS password auth\n");
vd->subauth = VNC_AUTH_VENCRYPT_TLSVNC;
} else {
error_setg(errp,
"Unsupported TLS cred type %s",
object_get_typename(OBJECT(vd->tlscreds)));
return -1;
}
} else {
VNC_DEBUG("Initializing VNC server with password auth\n");
vd->auth = VNC_AUTH_VNC;
vd->subauth = VNC_AUTH_INVALID;
}
if (websocket) {
vd->ws_auth = VNC_AUTH_VNC;
} else {
vd->ws_auth = VNC_AUTH_INVALID;
}
} else if (sasl) {
if (vd->tlscreds) {
vd->auth = VNC_AUTH_VENCRYPT;
if (object_dynamic_cast(OBJECT(vd->tlscreds),
TYPE_QCRYPTO_TLS_CREDS_X509)) {
VNC_DEBUG("Initializing VNC server with x509 SASL auth\n");
vd->subauth = VNC_AUTH_VENCRYPT_X509SASL;
} else if (object_dynamic_cast(OBJECT(vd->tlscreds),
TYPE_QCRYPTO_TLS_CREDS_ANON)) {
VNC_DEBUG("Initializing VNC server with TLS SASL auth\n");
vd->subauth = VNC_AUTH_VENCRYPT_TLSSASL;
} else {
error_setg(errp,
"Unsupported TLS cred type %s",
object_get_typename(OBJECT(vd->tlscreds)));
return -1;
}
} else {
VNC_DEBUG("Initializing VNC server with SASL auth\n");
vd->auth = VNC_AUTH_SASL;
vd->subauth = VNC_AUTH_INVALID;
}
if (websocket) {
vd->ws_auth = VNC_AUTH_SASL;
} else {
vd->ws_auth = VNC_AUTH_INVALID;
}
} else {
if (vd->tlscreds) {
vd->auth = VNC_AUTH_VENCRYPT;
if (object_dynamic_cast(OBJECT(vd->tlscreds),
TYPE_QCRYPTO_TLS_CREDS_X509)) {
VNC_DEBUG("Initializing VNC server with x509 no auth\n");
vd->subauth = VNC_AUTH_VENCRYPT_X509NONE;
} else if (object_dynamic_cast(OBJECT(vd->tlscreds),
TYPE_QCRYPTO_TLS_CREDS_ANON)) {
VNC_DEBUG("Initializing VNC server with TLS no auth\n");
vd->subauth = VNC_AUTH_VENCRYPT_TLSNONE;
} else {
error_setg(errp,
"Unsupported TLS cred type %s",
object_get_typename(OBJECT(vd->tlscreds)));
return -1;
}
} else {
VNC_DEBUG("Initializing VNC server with no auth\n");
vd->auth = VNC_AUTH_NONE;
vd->subauth = VNC_AUTH_INVALID;
}
if (websocket) {
vd->ws_auth = VNC_AUTH_NONE;
} else {
vd->ws_auth = VNC_AUTH_INVALID;
}
}
return 0;
}
| false | qemu | eda24e188637e2f86db31c3edb76d457212fdcb1 |
26,658 | static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
{
phdr->p_type = PT_NOTE;
phdr->p_offset = offset;
phdr->p_vaddr = 0;
phdr->p_paddr = 0;
phdr->p_filesz = sz;
phdr->p_memsz = 0;
phdr->p_flags = 0;
phdr->p_align = 0;
#ifdef BSWAP_NEEDED
bswap_phdr(phdr);
#endif
}
| false | qemu | 991f8f0c91d65cebf51fa931450e02b0d5209012 |
26,659 | static av_cold int vaapi_encode_mjpeg_init_internal(AVCodecContext *avctx)
{
static const VAConfigAttrib default_config_attributes[] = {
{ .type = VAConfigAttribRTFormat,
.value = VA_RT_FORMAT_YUV420 },
{ .type = VAConfigAttribEncPackedHeaders,
.value = VA_ENC_PACKED_HEADER_SEQUENCE },
};
VAAPIEncodeContext *ctx = avctx->priv_data;
VAAPIEncodeMJPEGContext *priv = ctx->priv_data;
int i;
ctx->va_profile = VAProfileJPEGBaseline;
ctx->va_entrypoint = VAEntrypointEncPicture;
ctx->input_width = avctx->width;
ctx->input_height = avctx->height;
ctx->aligned_width = FFALIGN(ctx->input_width, 8);
ctx->aligned_height = FFALIGN(ctx->input_height, 8);
for (i = 0; i < FF_ARRAY_ELEMS(default_config_attributes); i++) {
ctx->config_attributes[ctx->nb_config_attributes++] =
default_config_attributes[i];
}
priv->quality = avctx->global_quality;
if (priv->quality < 1 || priv->quality > 100) {
av_log(avctx, AV_LOG_ERROR, "Invalid quality value %d "
"(must be 1-100).\n", priv->quality);
return AVERROR(EINVAL);
}
vaapi_encode_mjpeg_init_tables(avctx);
return 0;
}
| false | FFmpeg | 80a5d05108cb218e8cd2e25c6621a3bfef0a832e |
26,660 | static gboolean tcp_chr_chan_close(GIOChannel *channel, GIOCondition cond,
void *opaque)
{
CharDriverState *chr = opaque;
if (cond != G_IO_HUP) {
return FALSE;
}
/* connection closed */
tcp_chr_disconnect(chr);
if (chr->fd_hup_tag) {
g_source_remove(chr->fd_hup_tag);
chr->fd_hup_tag = 0;
}
return TRUE;
}
| false | qemu | 812c1057f6175ac9a9829fa2920a2b5783814193 |
26,661 | static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
sPAPREnvironment *spapr,
uint32_t token, uint32_t nargs,
target_ulong args, uint32_t nret,
target_ulong rets)
{
sPAPRPHBState *sphb;
sPAPRPHBClass *spc;
uint64_t buid;
int ret;
if ((nargs != 3) || (nret != 1)) {
goto param_error_exit;
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
sphb = find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
spc = SPAPR_PCI_HOST_BRIDGE_GET_CLASS(sphb);
if (!spc->eeh_configure) {
goto param_error_exit;
}
ret = spc->eeh_configure(sphb);
rtas_st(rets, 0, ret);
return;
param_error_exit:
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
| false | qemu | 46c5874e9cd752ed8ded31af03472edd8fc3efc1 |
26,662 | static void sigp_cpu_start(void *arg)
{
CPUState *cs = arg;
S390CPU *cpu = S390_CPU(cs);
s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
DPRINTF("DONE: KVM cpu start: %p\n", &cpu->env);
}
| false | qemu | 6eb8f212d2686ed9b17077d554465df7ae06f805 |
26,666 | static int dca_subsubframe(DCAContext *s, int base_channel, int block_index)
{
int k, l;
int subsubframe = s->current_subsubframe;
const float *quant_step_table;
/* FIXME */
float (*subband_samples)[DCA_SUBBANDS][8] = s->subband_samples[block_index];
LOCAL_ALIGNED_16(int32_t, block, [8 * DCA_SUBBANDS]);
/*
* Audio data
*/
/* Select quantization step size table */
if (s->bit_rate_index == 0x1f)
quant_step_table = lossless_quant_d;
else
quant_step_table = lossy_quant_d;
for (k = base_channel; k < s->prim_channels; k++) {
float rscale[DCA_SUBBANDS];
if (get_bits_left(&s->gb) < 0)
return AVERROR_INVALIDDATA;
for (l = 0; l < s->vq_start_subband[k]; l++) {
int m;
/* Select the mid-tread linear quantizer */
int abits = s->bitalloc[k][l];
float quant_step_size = quant_step_table[abits];
/*
* Determine quantization index code book and its type
*/
/* Select quantization index code book */
int sel = s->quant_index_huffman[k][abits];
/*
* Extract bits from the bit stream
*/
if (!abits) {
rscale[l] = 0;
memset(block + 8 * l, 0, 8 * sizeof(block[0]));
} else {
/* Deal with transients */
int sfi = s->transition_mode[k][l] && subsubframe >= s->transition_mode[k][l];
rscale[l] = quant_step_size * s->scale_factor[k][l][sfi] *
s->scalefactor_adj[k][sel];
if (abits >= 11 || !dca_smpl_bitalloc[abits].vlc[sel].table) {
if (abits <= 7) {
/* Block code */
int block_code1, block_code2, size, levels, err;
size = abits_sizes[abits - 1];
levels = abits_levels[abits - 1];
block_code1 = get_bits(&s->gb, size);
block_code2 = get_bits(&s->gb, size);
err = decode_blockcodes(block_code1, block_code2,
levels, block + 8 * l);
if (err) {
av_log(s->avctx, AV_LOG_ERROR,
"ERROR: block code look-up failed\n");
return AVERROR_INVALIDDATA;
}
} else {
/* no coding */
for (m = 0; m < 8; m++)
block[8 * l + m] = get_sbits(&s->gb, abits - 3);
}
} else {
/* Huffman coded */
for (m = 0; m < 8; m++)
block[8 * l + m] = get_bitalloc(&s->gb,
&dca_smpl_bitalloc[abits], sel);
}
}
}
s->fmt_conv.int32_to_float_fmul_array8(&s->fmt_conv, subband_samples[k][0],
block, rscale, 8 * s->vq_start_subband[k]);
for (l = 0; l < s->vq_start_subband[k]; l++) {
int m;
/*
* Inverse ADPCM if in prediction mode
*/
if (s->prediction_mode[k][l]) {
int n;
if (s->predictor_history)
subband_samples[k][l][0] += (adpcm_vb[s->prediction_vq[k][l]][0] *
s->subband_samples_hist[k][l][3] +
adpcm_vb[s->prediction_vq[k][l]][1] *
s->subband_samples_hist[k][l][2] +
adpcm_vb[s->prediction_vq[k][l]][2] *
s->subband_samples_hist[k][l][1] +
adpcm_vb[s->prediction_vq[k][l]][3] *
s->subband_samples_hist[k][l][0]) *
(1.0f / 8192);
for (m = 1; m < 8; m++) {
float sum = adpcm_vb[s->prediction_vq[k][l]][0] *
subband_samples[k][l][m - 1];
for (n = 2; n <= 4; n++)
if (m >= n)
sum += adpcm_vb[s->prediction_vq[k][l]][n - 1] *
subband_samples[k][l][m - n];
else if (s->predictor_history)
sum += adpcm_vb[s->prediction_vq[k][l]][n - 1] *
s->subband_samples_hist[k][l][m - n + 4];
subband_samples[k][l][m] += sum * 1.0f / 8192;
}
}
}
/*
* Decode VQ encoded high frequencies
*/
for (l = s->vq_start_subband[k]; l < s->subband_activity[k]; l++) {
/* 1 vector -> 32 samples but we only need the 8 samples
* for this subsubframe. */
int hfvq = s->high_freq_vq[k][l];
if (!s->debug_flag & 0x01) {
av_log(s->avctx, AV_LOG_DEBUG,
"Stream with high frequencies VQ coding\n");
s->debug_flag |= 0x01;
}
int8x8_fmul_int32(&s->dcadsp, subband_samples[k][l],
&high_freq_vq[hfvq][subsubframe * 8],
s->scale_factor[k][l][0]);
}
}
/* Check for DSYNC after subsubframe */
if (s->aspf || subsubframe == s->subsubframes[s->current_subframe] - 1) {
if (0xFFFF == get_bits(&s->gb, 16)) { /* 0xFFFF */
#ifdef TRACE
av_log(s->avctx, AV_LOG_DEBUG, "Got subframe DSYNC\n");
#endif
} else {
av_log(s->avctx, AV_LOG_ERROR, "Didn't get subframe DSYNC\n");
return AVERROR_INVALIDDATA;
}
}
/* Backup predictor history for adpcm */
for (k = base_channel; k < s->prim_channels; k++)
for (l = 0; l < s->vq_start_subband[k]; l++)
AV_COPY128(s->subband_samples_hist[k][l], &subband_samples[k][l][4]);
return 0;
}
| false | FFmpeg | 4cb6964244fd6c099383d8b7e99731e72cc844b9 |
26,667 | int ff_mpeg_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src)
{
int i;
MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
if (dst == src)
return 0;
// FIXME can parameters change on I-frames?
// in that case dst may need a reinit
if (!s->context_initialized) {
memcpy(s, s1, sizeof(MpegEncContext));
s->avctx = dst;
s->bitstream_buffer = NULL;
s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
if (s1->context_initialized){
s->picture_range_start += MAX_PICTURE_COUNT;
s->picture_range_end += MAX_PICTURE_COUNT;
ff_MPV_common_init(s);
}
}
if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
int err;
s->context_reinit = 0;
s->height = s1->height;
s->width = s1->width;
if ((err = ff_MPV_common_frame_size_change(s)) < 0)
return err;
}
s->avctx->coded_height = s1->avctx->coded_height;
s->avctx->coded_width = s1->avctx->coded_width;
s->avctx->width = s1->avctx->width;
s->avctx->height = s1->avctx->height;
s->coded_picture_number = s1->coded_picture_number;
s->picture_number = s1->picture_number;
s->input_picture_number = s1->input_picture_number;
memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
memcpy(&s->last_picture, &s1->last_picture,
(char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
// reset s->picture[].f.extended_data to s->picture[].f.data
for (i = 0; i < s->picture_count; i++)
s->picture[i].f.extended_data = s->picture[i].f.data;
s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
// Error/bug resilience
s->next_p_frame_damaged = s1->next_p_frame_damaged;
s->workaround_bugs = s1->workaround_bugs;
s->padding_bug_score = s1->padding_bug_score;
// MPEG4 timing info
memcpy(&s->time_increment_bits, &s1->time_increment_bits,
(char *) &s1->shape - (char *) &s1->time_increment_bits);
// B-frame info
s->max_b_frames = s1->max_b_frames;
s->low_delay = s1->low_delay;
s->dropable = s1->dropable;
// DivX handling (doesn't work)
s->divx_packed = s1->divx_packed;
if (s1->bitstream_buffer) {
if (s1->bitstream_buffer_size +
FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
av_fast_malloc(&s->bitstream_buffer,
&s->allocated_bitstream_buffer_size,
s1->allocated_bitstream_buffer_size);
s->bitstream_buffer_size = s1->bitstream_buffer_size;
memcpy(s->bitstream_buffer, s1->bitstream_buffer,
s1->bitstream_buffer_size);
memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
FF_INPUT_BUFFER_PADDING_SIZE);
}
// MPEG2/interlacing info
memcpy(&s->progressive_sequence, &s1->progressive_sequence,
(char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
if (!s1->first_field) {
s->last_pict_type = s1->pict_type;
if (s1->current_picture_ptr)
s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
if (s1->pict_type != AV_PICTURE_TYPE_B) {
s->last_non_b_pict_type = s1->pict_type;
}
}
return 0;
}
| false | FFmpeg | f4d73f0fb55e0b5931c859ddb4d2d1617b60d560 |
26,668 | static int h261_probe(AVProbeData *p)
{
int code;
const uint8_t *d;
if (p->buf_size < 6)
return 0;
d = p->buf;
code = (d[0] << 12) | (d[1] << 4) | (d[2] >> 4);
if (code == 0x10) {
return 50;
}
return 0;
}
| false | FFmpeg | 87e8788680e16c51f6048af26f3f7830c35207a5 |
26,669 | static void netfilter_finalize(Object *obj)
{
NetFilterState *nf = NETFILTER(obj);
NetFilterClass *nfc = NETFILTER_GET_CLASS(obj);
if (nfc->cleanup) {
nfc->cleanup(nf);
}
if (nf->netdev && !QTAILQ_EMPTY(&nf->netdev->filters) &&
nf->next.tqe_prev) {
QTAILQ_REMOVE(&nf->netdev->filters, nf, next);
}
g_free(nf->netdev_id);
}
| false | qemu | 3b8c1761f0e1523622e008836d01a6544b1c21ab |
26,670 | static void ne2000_cleanup(NetClientState *nc)
{
NE2000State *s = qemu_get_nic_opaque(nc);
s->nic = NULL;
}
| false | qemu | 57407ea44cc0a3d630b9b89a2be011f1955ce5c1 |
26,672 | static void sys_write(void *opaque, hwaddr addr,
uint64_t value, unsigned size)
{
LM32SysState *s = opaque;
char *testname;
trace_lm32_sys_memory_write(addr, value);
addr >>= 2;
switch (addr) {
case R_CTRL:
qemu_system_shutdown_request();
break;
case R_PASSFAIL:
s->regs[addr] = value;
testname = (char *)s->testname;
qemu_log("TC %-16s %s\n", testname, (value) ? "FAILED" : "OK");
break;
case R_TESTNAME:
s->regs[addr] = value;
copy_testname(s);
break;
default:
error_report("lm32_sys: write access to unknown register 0x"
TARGET_FMT_plx, addr << 2);
break;
}
}
| false | qemu | e67b3ca53a891413a33c45495ff20c2728d69722 |
26,674 | static DeviceState *qbus_find_dev(BusState *bus, char *elem)
{
DeviceState *dev;
/*
* try to match in order:
* (1) instance id, if present
* (2) driver name
* (3) driver alias, if present
*/
LIST_FOREACH(dev, &bus->children, sibling) {
if (dev->id && strcmp(dev->id, elem) == 0) {
return dev;
}
}
LIST_FOREACH(dev, &bus->children, sibling) {
if (strcmp(dev->info->name, elem) == 0) {
return dev;
}
}
LIST_FOREACH(dev, &bus->children, sibling) {
if (dev->info->alias && strcmp(dev->info->alias, elem) == 0) {
return dev;
}
}
return NULL;
}
| false | qemu | 72cf2d4f0e181d0d3a3122e04129c58a95da713e |
26,675 | void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier)
{
trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
if (notifier) {
notifier_list_add(&req->cancel_notifiers, notifier);
}
if (req->io_canceled) {
return;
}
scsi_req_ref(req);
scsi_req_dequeue(req);
req->io_canceled = true;
if (req->aiocb) {
bdrv_aio_cancel_async(req->aiocb);
}
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce |
26,677 | static inline void temp_save(TCGContext *s, TCGTemp *ts,
TCGRegSet allocated_regs)
{
#ifdef USE_LIVENESS_ANALYSIS
/* ??? Liveness does not yet incorporate indirect bases. */
if (!ts->indirect_base) {
/* The liveness analysis already ensures that globals are back
in memory. Keep an assert for safety. */
tcg_debug_assert(ts->val_type == TEMP_VAL_MEM || ts->fixed_reg);
return;
}
#endif
temp_sync(s, ts, allocated_regs);
temp_dead(s, ts);
}
| false | qemu | eabb7b91b36b202b4dac2df2d59d698e3aff197a |
26,678 | static int mov_read_wfex(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
AVStream *st;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
ff_get_wav_header(pb, st->codec, atom.size);
return 0;
}
| false | FFmpeg | 2007082d2db25f9305b8a345798b840ea7784fdb |
26,679 | static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
{
target_phys_addr_t start_addr = section->offset_within_address_space;
ram_addr_t size = section->size;
target_phys_addr_t addr;
uint16_t section_index = phys_section_add(section);
assert(size);
addr = start_addr;
phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
section_index);
}
| false | qemu | a8170e5e97ad17ca169c64ba87ae2f53850dab4c |
26,681 | static int IRQ_get_next(OpenPICState *opp, IRQQueue *q)
{
if (q->next == -1) {
/* XXX: optimize */
IRQ_check(opp, q);
}
return q->next;
}
| false | qemu | 3c94378e2c500b6211e95d7457f4a9959955c3d1 |
26,682 | static int enable_write_target(BDRVVVFATState *s)
{
BlockDriver *bdrv_qcow;
QEMUOptionParameter *options;
Error *local_err = NULL;
int ret;
int size = sector2cluster(s, s->sector_count);
s->used_clusters = calloc(size, 1);
array_init(&(s->commits), sizeof(commit_t));
s->qcow_filename = g_malloc(1024);
ret = get_tmp_filename(s->qcow_filename, 1024);
if (ret < 0) {
goto err;
}
bdrv_qcow = bdrv_find_format("qcow");
options = parse_option_parameters("", bdrv_qcow->create_options, NULL);
set_option_parameter_int(options, BLOCK_OPT_SIZE, s->sector_count * 512);
set_option_parameter(options, BLOCK_OPT_BACKING_FILE, "fat:");
ret = bdrv_create(bdrv_qcow, s->qcow_filename, options, &local_err);
if (ret < 0) {
qerror_report_err(local_err);
error_free(local_err);
goto err;
}
s->qcow = NULL;
ret = bdrv_open(&s->qcow, s->qcow_filename, NULL, NULL,
BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, bdrv_qcow,
&local_err);
if (ret < 0) {
qerror_report_err(local_err);
error_free(local_err);
goto err;
}
#ifndef _WIN32
unlink(s->qcow_filename);
#endif
s->bs->backing_hd = bdrv_new("");
s->bs->backing_hd->drv = &vvfat_write_target;
s->bs->backing_hd->opaque = g_malloc(sizeof(void*));
*(void**)s->bs->backing_hd->opaque = s;
return 0;
err:
g_free(s->qcow_filename);
s->qcow_filename = NULL;
return ret;
}
| false | qemu | 98522f63f40adaebc412481e1d2e9170160d4539 |
26,683 | static void dump_json_image_info_list(ImageInfoList *list)
{
QString *str;
QmpOutputVisitor *ov = qmp_output_visitor_new();
QObject *obj;
visit_type_ImageInfoList(qmp_output_get_visitor(ov), NULL, &list,
&error_abort);
obj = qmp_output_get_qobject(ov);
str = qobject_to_json_pretty(obj);
assert(str != NULL);
printf("%s\n", qstring_get_str(str));
qobject_decref(obj);
qmp_output_visitor_cleanup(ov);
QDECREF(str);
}
| false | qemu | 1830f22a6777cedaccd67a08f675d30f7a85ebfd |
26,684 | static int get_segment32(CPUPPCState *env, struct mmu_ctx_hash32 *ctx,
target_ulong eaddr, int rw, int type)
{
hwaddr hash;
target_ulong vsid;
int ds, pr, target_page_bits;
int ret, ret2;
target_ulong sr, pgidx;
pr = msr_pr;
ctx->eaddr = eaddr;
sr = env->sr[eaddr >> 28];
ctx->key = (((sr & SR32_KP) && (pr != 0)) ||
((sr & SR32_KS) && (pr == 0))) ? 1 : 0;
ds = !!(sr & SR32_T);
ctx->nx = !!(sr & SR32_NX);
vsid = sr & SR32_VSID;
target_page_bits = TARGET_PAGE_BITS;
LOG_MMU("Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip="
TARGET_FMT_lx " lr=" TARGET_FMT_lx
" ir=%d dr=%d pr=%d %d t=%d\n",
eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
(int)msr_dr, pr != 0 ? 1 : 0, rw, type);
pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
hash = vsid ^ pgidx;
ctx->ptem = (vsid << 7) | (pgidx >> 10);
LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
ctx->key, ds, ctx->nx, vsid);
ret = -1;
if (!ds) {
/* Check if instruction fetch is allowed, if needed */
if (type != ACCESS_CODE || ctx->nx == 0) {
/* Page address translation */
LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
" hash " TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, hash);
ctx->hash[0] = hash;
ctx->hash[1] = ~hash;
/* Initialize real address with an invalid value */
ctx->raddr = (hwaddr)-1ULL;
LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ctx->ptem,
ctx->hash[0]);
/* Primary table lookup */
ret = find_pte32(env, ctx, 0, rw, type, target_page_bits);
if (ret < 0) {
/* Secondary table lookup */
LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
" vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ctx->ptem, ctx->hash[1]);
ret2 = find_pte32(env, ctx, 1, rw, type,
target_page_bits);
if (ret2 != -1) {
ret = ret2;
}
}
#if defined(DUMP_PAGE_TABLES)
if (qemu_log_enabled()) {
hwaddr curaddr;
uint32_t a0, a1, a2, a3;
qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
"\n", sdr, mask + 0x80);
for (curaddr = sdr; curaddr < (sdr + mask + 0x80);
curaddr += 16) {
a0 = ldl_phys(curaddr);
a1 = ldl_phys(curaddr + 4);
a2 = ldl_phys(curaddr + 8);
a3 = ldl_phys(curaddr + 12);
if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
curaddr, a0, a1, a2, a3);
}
}
}
#endif
} else {
LOG_MMU("No access allowed\n");
ret = -3;
}
} else {
target_ulong sr;
LOG_MMU("direct store...\n");
/* Direct-store segment : absolutely *BUGGY* for now */
/* Direct-store implies a 32-bit MMU.
* Check the Segment Register's bus unit ID (BUID).
*/
sr = env->sr[eaddr >> 28];
if ((sr & 0x1FF00000) >> 20 == 0x07f) {
/* Memory-forced I/O controller interface access */
/* If T=1 and BUID=x'07F', the 601 performs a memory access
* to SR[28-31] LA[4-31], bypassing all protection mechanisms.
*/
ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return 0;
}
switch (type) {
case ACCESS_INT:
/* Integer load/store : only access allowed */
break;
case ACCESS_CODE:
/* No code fetch is allowed in direct-store areas */
return -4;
case ACCESS_FLOAT:
/* Floating point load/store */
return -4;
case ACCESS_RES:
/* lwarx, ldarx or srwcx. */
return -4;
case ACCESS_CACHE:
/* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
/* Should make the instruction do no-op.
* As it already do no-op, it's quite easy :-)
*/
ctx->raddr = eaddr;
return 0;
case ACCESS_EXT:
/* eciwx or ecowx */
return -4;
default:
qemu_log("ERROR: instruction should not need "
"address translation\n");
return -4;
}
if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
ctx->raddr = eaddr;
ret = 2;
} else {
ret = -2;
}
}
return ret;
}
| false | qemu | 91cda45b69e45a089f9989979a65db3f710c9925 |
26,685 | static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
uint8_t *buffer, uint32_t count)
{
uint32_t type, i;
int ret;
size_t new_size;
uint32_t chunk_count;
int64_t offset = 0;
uint64_t data_offset;
uint64_t in_offset = ds->data_fork_offset;
uint64_t out_offset;
type = buff_read_uint32(buffer, offset);
/* skip data that is not a valid MISH block (invalid magic or too small) */
if (type != 0x6d697368 || count < 244) {
/* assume success for now */
return 0;
}
/* chunk offsets are relative to this sector number */
out_offset = buff_read_uint64(buffer, offset + 8);
/* location in data fork for (compressed) blob (in bytes) */
data_offset = buff_read_uint64(buffer, offset + 0x18);
in_offset += data_offset;
/* move to begin of chunk entries */
offset += 204;
chunk_count = (count - 204) / 40;
new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
s->types = g_realloc(s->types, new_size / 2);
s->offsets = g_realloc(s->offsets, new_size);
s->lengths = g_realloc(s->lengths, new_size);
s->sectors = g_realloc(s->sectors, new_size);
s->sectorcounts = g_realloc(s->sectorcounts, new_size);
for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
s->types[i] = buff_read_uint32(buffer, offset);
offset += 4;
if (s->types[i] != 0x80000005 && s->types[i] != 1 &&
s->types[i] != 2) {
chunk_count--;
i--;
offset += 36;
continue;
}
offset += 4;
s->sectors[i] = buff_read_uint64(buffer, offset);
s->sectors[i] += out_offset;
offset += 8;
s->sectorcounts[i] = buff_read_uint64(buffer, offset);
offset += 8;
if (s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
error_report("sector count %" PRIu64 " for chunk %" PRIu32
" is larger than max (%u)",
s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
ret = -EINVAL;
goto fail;
}
s->offsets[i] = buff_read_uint64(buffer, offset);
s->offsets[i] += in_offset;
offset += 8;
s->lengths[i] = buff_read_uint64(buffer, offset);
offset += 8;
if (s->lengths[i] > DMG_LENGTHS_MAX) {
error_report("length %" PRIu64 " for chunk %" PRIu32
" is larger than max (%u)",
s->lengths[i], i, DMG_LENGTHS_MAX);
ret = -EINVAL;
goto fail;
}
update_max_chunk_size(s, i, &ds->max_compressed_size,
&ds->max_sectors_per_chunk);
}
s->n_chunks += chunk_count;
return 0;
fail:
return ret;
}
| false | qemu | a8b10c6ead7f62e8eadbdaf944f371889c3c4c29 |
26,686 | static always_inline void gen_qemu_ldf (TCGv t0, TCGv t1, int flags)
{
TCGv tmp = tcg_temp_new(TCG_TYPE_I32);
tcg_gen_qemu_ld32u(tmp, t1, flags);
tcg_gen_helper_1_1(helper_memory_to_f, t0, tmp);
tcg_temp_free(tmp);
}
| false | qemu | a7812ae412311d7d47f8aa85656faadac9d64b56 |
26,687 | static int local_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp)
{
char buffer[PATH_MAX];
char *path = fs_path->data;
if (fs_ctx->fs_sm == SM_MAPPED) {
return local_set_xattr(rpath(fs_ctx, path, buffer), credp);
} else if ((fs_ctx->fs_sm == SM_PASSTHROUGH) ||
(fs_ctx->fs_sm == SM_NONE)) {
return chmod(rpath(fs_ctx, path, buffer), credp->fc_mode);
}
return -1;
}
| false | qemu | b97400caef60ccfb0bc81c59f8bd824c43a0d6c8 |
26,688 | static inline PageDesc *page_find(target_ulong index)
{
PageDesc *p;
p = l1_map[index >> L2_BITS];
if (!p)
return 0;
return p + (index & (L2_SIZE - 1));
}
| true | qemu | 434929bf11f0573d953c24287badbc2431a042ef |
26,689 | static int pcm_bluray_parse_header(AVCodecContext *avctx,
const uint8_t *header)
{
static const uint8_t bits_per_samples[4] = { 0, 16, 20, 24 };
static const uint32_t channel_layouts[16] = {
0, AV_CH_LAYOUT_MONO, 0, AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_SURROUND,
AV_CH_LAYOUT_2_1, AV_CH_LAYOUT_4POINT0, AV_CH_LAYOUT_2_2, AV_CH_LAYOUT_5POINT0,
AV_CH_LAYOUT_5POINT1, AV_CH_LAYOUT_7POINT0, AV_CH_LAYOUT_7POINT1, 0, 0, 0, 0
};
static const uint8_t channels[16] = {
0, 1, 0, 2, 3, 3, 4, 4, 5, 6, 7, 8, 0, 0, 0, 0
};
uint8_t channel_layout = header[2] >> 4;
if (avctx->debug & FF_DEBUG_PICT_INFO)
av_dlog(avctx, "pcm_bluray_parse_header: header = %02x%02x%02x%02x\n",
header[0], header[1], header[2], header[3]);
/* get the sample depth and derive the sample format from it */
avctx->bits_per_coded_sample = bits_per_samples[header[3] >> 6];
if (!avctx->bits_per_coded_sample) {
av_log(avctx, AV_LOG_ERROR, "unsupported sample depth (0)\n");
return -1;
}
avctx->sample_fmt = avctx->bits_per_coded_sample == 16 ? AV_SAMPLE_FMT_S16 :
AV_SAMPLE_FMT_S32;
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
avctx->bits_per_raw_sample = avctx->bits_per_coded_sample;
/* get the sample rate. Not all values are known or exist. */
switch (header[2] & 0x0f) {
case 1:
avctx->sample_rate = 48000;
break;
case 4:
avctx->sample_rate = 96000;
break;
case 5:
avctx->sample_rate = 192000;
break;
default:
avctx->sample_rate = 0;
av_log(avctx, AV_LOG_ERROR, "unsupported sample rate (%d)\n",
header[2] & 0x0f);
return -1;
}
/*
* get the channel number (and mapping). Not all values are known or exist.
* It must be noted that the number of channels in the MPEG stream can
* differ from the actual meaningful number, e.g. mono audio still has two
* channels, one being empty.
*/
avctx->channel_layout = channel_layouts[channel_layout];
avctx->channels = channels[channel_layout];
if (!avctx->channels) {
av_log(avctx, AV_LOG_ERROR, "unsupported channel configuration (%d)\n",
channel_layout);
return -1;
}
avctx->bit_rate = avctx->channels * avctx->sample_rate *
avctx->bits_per_coded_sample;
if (avctx->debug & FF_DEBUG_PICT_INFO)
av_dlog(avctx,
"pcm_bluray_parse_header: %d channels, %d bits per sample, %d kHz, %d kbit\n",
avctx->channels, avctx->bits_per_coded_sample,
avctx->sample_rate, avctx->bit_rate);
return 0;
}
| false | FFmpeg | cd0cfdc0a74cbf45f0d00b65faaf3cf5bd93c016 |
26,690 | static int protocol_client_auth_sasl_mechname_len(VncState *vs, uint8_t *data, size_t len)
{
uint32_t mechlen = read_u32(data, 0);
VNC_DEBUG("Got client mechname len %d\n", mechlen);
if (mechlen > 100) {
VNC_DEBUG("Too long SASL mechname data %d\n", mechlen);
vnc_client_error(vs);
return -1;
}
if (mechlen < 1) {
VNC_DEBUG("Too short SASL mechname %d\n", mechlen);
vnc_client_error(vs);
return -1;
}
vnc_read_when(vs, protocol_client_auth_sasl_mechname,mechlen);
return 0;
}
| true | qemu | 7364dbdabb7824d5bde1e341bb6d928282f01c83 |
26,691 | static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
uint64_t address, uint64_t context,
target_phys_addr_t *physical,
int is_nucleus)
{
uint64_t mask;
switch ((tlb->tte >> 61) & 3) {
default:
case 0x0: // 8k
mask = 0xffffffffffffe000ULL;
break;
case 0x1: // 64k
mask = 0xffffffffffff0000ULL;
break;
case 0x2: // 512k
mask = 0xfffffffffff80000ULL;
break;
case 0x3: // 4M
mask = 0xffffffffffc00000ULL;
break;
}
// valid, context match, virtual address match?
if (TTE_IS_VALID(tlb->tte) &&
((is_nucleus && compare_masked(0, tlb->tag, 0x1fff))
|| TTE_IS_GLOBAL(tlb->tte) || compare_masked(context, tlb->tag, 0x1fff))
&& compare_masked(address, tlb->tag, mask))
{
// decode physical address
*physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
return 1;
}
return 0;
}
| true | qemu | 299b520cd4092be3c53f8380b81315c33927d9d3 |
26,692 | int ff_dxva2_commit_buffer(AVCodecContext *avctx,
AVDXVAContext *ctx,
DECODER_BUFFER_DESC *dsc,
unsigned type, const void *data, unsigned size,
unsigned mb_count)
{
void *dxva_data;
unsigned dxva_size;
int result;
HRESULT hr;
#if CONFIG_D3D11VA
if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
hr = ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context,
D3D11VA_CONTEXT(ctx)->decoder,
type,
&dxva_size, &dxva_data);
#endif
#if CONFIG_DXVA2
if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
hr = IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder, type,
&dxva_data, &dxva_size);
#endif
if (FAILED(hr)) {
av_log(avctx, AV_LOG_ERROR, "Failed to get a buffer for %u: 0x%lx\n",
type, hr);
return -1;
}
if (size <= dxva_size) {
memcpy(dxva_data, data, size);
#if CONFIG_D3D11VA
if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) {
D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = dsc;
memset(dsc11, 0, sizeof(*dsc11));
dsc11->BufferType = type;
dsc11->DataSize = size;
dsc11->NumMBsInBuffer = mb_count;
}
#endif
#if CONFIG_DXVA2
if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) {
DXVA2_DecodeBufferDesc *dsc2 = dsc;
memset(dsc2, 0, sizeof(*dsc2));
dsc2->CompressedBufferType = type;
dsc2->DataSize = size;
dsc2->NumMBsInBuffer = mb_count;
}
#endif
result = 0;
} else {
av_log(avctx, AV_LOG_ERROR, "Buffer for type %u was too small\n", type);
result = -1;
}
#if CONFIG_D3D11VA
if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD)
hr = ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type);
#endif
#if CONFIG_DXVA2
if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD)
hr = IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type);
#endif
if (FAILED(hr)) {
av_log(avctx, AV_LOG_ERROR,
"Failed to release buffer type %u: 0x%lx\n",
type, hr);
result = -1;
}
return result;
}
| true | FFmpeg | 7ed5d78d619e45b46ba003e8014767b05b73b7d2 |
26,693 | static void new_video_stream(AVFormatContext *oc, int file_idx)
{
AVStream *st;
AVOutputStream *ost;
AVCodecContext *video_enc;
enum CodecID codec_id;
AVCodec *codec= NULL;
st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
ffmpeg_exit(1);
}
ost = new_output_stream(oc, file_idx);
output_codecs = grow_array(output_codecs, sizeof(*output_codecs), &nb_output_codecs, nb_output_codecs + 1);
if(!video_stream_copy){
if (video_codec_name) {
codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1,
avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(video_codec_name);
output_codecs[nb_output_codecs-1] = codec;
} else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
codec = avcodec_find_encoder(codec_id);
}
}
avcodec_get_context_defaults3(st->codec, codec);
ost->bitstream_filters = video_bitstream_filters;
video_bitstream_filters= NULL;
avcodec_thread_init(st->codec, thread_count);
video_enc = st->codec;
if(video_codec_tag)
video_enc->codec_tag= video_codec_tag;
if( (video_global_header&1)
|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))){
video_enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags|= CODEC_FLAG_GLOBAL_HEADER;
}
if(video_global_header&2){
video_enc->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
avcodec_opts[AVMEDIA_TYPE_VIDEO]->flags2|= CODEC_FLAG2_LOCAL_HEADER;
}
if (video_stream_copy) {
st->stream_copy = 1;
video_enc->codec_type = AVMEDIA_TYPE_VIDEO;
video_enc->sample_aspect_ratio =
st->sample_aspect_ratio = av_d2q(frame_aspect_ratio*frame_height/frame_width, 255);
} else {
const char *p;
int i;
AVRational fps= frame_rate.num ? frame_rate : (AVRational){25,1};
video_enc->codec_id = codec_id;
set_context_opts(video_enc, avcodec_opts[AVMEDIA_TYPE_VIDEO], AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM, codec);
if (codec && codec->supported_framerates && !force_fps)
fps = codec->supported_framerates[av_find_nearest_q_idx(fps, codec->supported_framerates)];
video_enc->time_base.den = fps.num;
video_enc->time_base.num = fps.den;
video_enc->width = frame_width;
video_enc->height = frame_height;
video_enc->sample_aspect_ratio = av_d2q(frame_aspect_ratio*video_enc->height/video_enc->width, 255);
video_enc->pix_fmt = frame_pix_fmt;
st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
choose_pixel_fmt(st, codec);
if (intra_only)
video_enc->gop_size = 0;
if (video_qscale || same_quality) {
video_enc->flags |= CODEC_FLAG_QSCALE;
video_enc->global_quality=
st->quality = FF_QP2LAMBDA * video_qscale;
}
if(intra_matrix)
video_enc->intra_matrix = intra_matrix;
if(inter_matrix)
video_enc->inter_matrix = inter_matrix;
p= video_rc_override_string;
for(i=0; p; i++){
int start, end, q;
int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
if(e!=3){
fprintf(stderr, "error parsing rc_override\n");
ffmpeg_exit(1);
}
video_enc->rc_override=
av_realloc(video_enc->rc_override,
sizeof(RcOverride)*(i+1));
video_enc->rc_override[i].start_frame= start;
video_enc->rc_override[i].end_frame = end;
if(q>0){
video_enc->rc_override[i].qscale= q;
video_enc->rc_override[i].quality_factor= 1.0;
}
else{
video_enc->rc_override[i].qscale= 0;
video_enc->rc_override[i].quality_factor= -q/100.0;
}
p= strchr(p, '/');
if(p) p++;
}
video_enc->rc_override_count=i;
if (!video_enc->rc_initial_buffer_occupancy)
video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size*3/4;
video_enc->me_threshold= me_threshold;
video_enc->intra_dc_precision= intra_dc_precision - 8;
if (do_psnr)
video_enc->flags|= CODEC_FLAG_PSNR;
/* two pass mode */
if (do_pass) {
if (do_pass == 1) {
video_enc->flags |= CODEC_FLAG_PASS1;
} else {
video_enc->flags |= CODEC_FLAG_PASS2;
}
}
if (forced_key_frames)
parse_forced_key_frames(forced_key_frames, ost, video_enc);
}
if (video_language) {
av_metadata_set2(&st->metadata, "language", video_language, 0);
av_freep(&video_language);
}
/* reset some key parameters */
video_disable = 0;
av_freep(&video_codec_name);
av_freep(&forced_key_frames);
video_stream_copy = 0;
frame_pix_fmt = PIX_FMT_NONE;
}
| true | FFmpeg | 4618637aca3b771b0bfb8fe15f3a080dacf9f0c0 |
26,694 | static ExitStatus trans_fop_wew_0e(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = assemble_rt64(insn);
unsigned ra = assemble_ra64(insn);
return do_fop_wew(ctx, rt, ra, di->f_wew);
}
| true | qemu | eff235eb2bcd7092901f4698a7907e742f3b7f2f |
26,695 | static int alac_decode_frame(AVCodecContext *avctx,
void *outbuffer, int *outputsize,
uint8_t *inbuffer, int input_buffer_size)
{
ALACContext *alac = avctx->priv_data;
int channels;
int32_t outputsamples;
/* short-circuit null buffers */
if (!inbuffer || !input_buffer_size)
return input_buffer_size;
/* initialize from the extradata */
if (!alac->context_initialized) {
if (alac->avctx->extradata_size != ALAC_EXTRADATA_SIZE) {
av_log(avctx, AV_LOG_ERROR, "alac: expected %d extradata bytes\n",
ALAC_EXTRADATA_SIZE);
return input_buffer_size;
}
alac_set_info(alac);
alac->context_initialized = 1;
}
outputsamples = alac->setinfo_max_samples_per_frame;
init_get_bits(&alac->gb, inbuffer, input_buffer_size * 8);
channels = get_bits(&alac->gb, 3);
*outputsize = outputsamples * alac->bytespersample;
switch(channels) {
case 0: { /* 1 channel */
int hassize;
int isnotcompressed;
int readsamplesize;
int wasted_bytes;
int ricemodifier;
/* 2^result = something to do with output waiting.
* perhaps matters if we read > 1 frame in a pass?
*/
get_bits(&alac->gb, 4);
get_bits(&alac->gb, 12); /* unknown, skip 12 bits */
hassize = get_bits(&alac->gb, 1); /* the output sample size is stored soon */
wasted_bytes = get_bits(&alac->gb, 2); /* unknown ? */
isnotcompressed = get_bits(&alac->gb, 1); /* whether the frame is compressed */
if (hassize) {
/* now read the number of samples,
* as a 32bit integer */
outputsamples = get_bits(&alac->gb, 32);
*outputsize = outputsamples * alac->bytespersample;
}
readsamplesize = alac->setinfo_sample_size - (wasted_bytes * 8);
if (!isnotcompressed) {
/* so it is compressed */
int16_t predictor_coef_table[32];
int predictor_coef_num;
int prediction_type;
int prediction_quantitization;
int i;
/* FIXME: skip 16 bits, not sure what they are. seem to be used in
* two channel case */
get_bits(&alac->gb, 8);
get_bits(&alac->gb, 8);
prediction_type = get_bits(&alac->gb, 4);
prediction_quantitization = get_bits(&alac->gb, 4);
ricemodifier = get_bits(&alac->gb, 3);
predictor_coef_num = get_bits(&alac->gb, 5);
/* read the predictor table */
for (i = 0; i < predictor_coef_num; i++) {
predictor_coef_table[i] = (int16_t)get_bits(&alac->gb, 16);
}
if (wasted_bytes) {
/* these bytes seem to have something to do with
* > 2 channel files.
*/
av_log(avctx, AV_LOG_ERROR, "FIXME: unimplemented, unhandling of wasted_bytes\n");
}
bastardized_rice_decompress(alac,
alac->predicterror_buffer_a,
outputsamples,
readsamplesize,
alac->setinfo_rice_initialhistory,
alac->setinfo_rice_kmodifier,
ricemodifier * alac->setinfo_rice_historymult / 4,
(1 << alac->setinfo_rice_kmodifier) - 1);
if (prediction_type == 0) {
/* adaptive fir */
predictor_decompress_fir_adapt(alac->predicterror_buffer_a,
alac->outputsamples_buffer_a,
outputsamples,
readsamplesize,
predictor_coef_table,
predictor_coef_num,
prediction_quantitization);
} else {
av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type);
/* i think the only other prediction type (or perhaps this is just a
* boolean?) runs adaptive fir twice.. like:
* predictor_decompress_fir_adapt(predictor_error, tempout, ...)
* predictor_decompress_fir_adapt(predictor_error, outputsamples ...)
* little strange..
*/
}
} else {
/* not compressed, easy case */
if (readsamplesize <= 16) {
int i;
for (i = 0; i < outputsamples; i++) {
int32_t audiobits = get_bits(&alac->gb, readsamplesize);
audiobits = SIGN_EXTENDED32(audiobits, readsamplesize);
alac->outputsamples_buffer_a[i] = audiobits;
}
} else {
int i;
for (i = 0; i < outputsamples; i++) {
int32_t audiobits;
audiobits = get_bits(&alac->gb, 16);
/* special case of sign extension..
* as we'll be ORing the low 16bits into this */
audiobits = audiobits << 16;
audiobits = audiobits >> (32 - readsamplesize);
audiobits |= get_bits(&alac->gb, readsamplesize - 16);
alac->outputsamples_buffer_a[i] = audiobits;
}
}
/* wasted_bytes = 0; // unused */
}
switch(alac->setinfo_sample_size) {
case 16: {
int i;
for (i = 0; i < outputsamples; i++) {
int16_t sample = alac->outputsamples_buffer_a[i];
((int16_t*)outbuffer)[i * alac->numchannels] = sample;
}
break;
}
case 20:
case 24:
case 32:
av_log(avctx, AV_LOG_ERROR, "FIXME: unimplemented sample size %i\n", alac->setinfo_sample_size);
break;
default:
break;
}
break;
}
case 1: { /* 2 channels */
int hassize;
int isnotcompressed;
int readsamplesize;
int wasted_bytes;
uint8_t interlacing_shift;
uint8_t interlacing_leftweight;
/* 2^result = something to do with output waiting.
* perhaps matters if we read > 1 frame in a pass?
*/
get_bits(&alac->gb, 4);
get_bits(&alac->gb, 12); /* unknown, skip 12 bits */
hassize = get_bits(&alac->gb, 1); /* the output sample size is stored soon */
wasted_bytes = get_bits(&alac->gb, 2); /* unknown ? */
isnotcompressed = get_bits(&alac->gb, 1); /* whether the frame is compressed */
if (hassize) {
/* now read the number of samples,
* as a 32bit integer */
outputsamples = get_bits(&alac->gb, 32);
*outputsize = outputsamples * alac->bytespersample;
}
readsamplesize = alac->setinfo_sample_size - (wasted_bytes * 8) + 1;
if (!isnotcompressed) {
/* compressed */
int16_t predictor_coef_table_a[32];
int predictor_coef_num_a;
int prediction_type_a;
int prediction_quantitization_a;
int ricemodifier_a;
int16_t predictor_coef_table_b[32];
int predictor_coef_num_b;
int prediction_type_b;
int prediction_quantitization_b;
int ricemodifier_b;
int i;
interlacing_shift = get_bits(&alac->gb, 8);
interlacing_leftweight = get_bits(&alac->gb, 8);
/******** channel 1 ***********/
prediction_type_a = get_bits(&alac->gb, 4);
prediction_quantitization_a = get_bits(&alac->gb, 4);
ricemodifier_a = get_bits(&alac->gb, 3);
predictor_coef_num_a = get_bits(&alac->gb, 5);
/* read the predictor table */
for (i = 0; i < predictor_coef_num_a; i++) {
predictor_coef_table_a[i] = (int16_t)get_bits(&alac->gb, 16);
}
/******** channel 2 *********/
prediction_type_b = get_bits(&alac->gb, 4);
prediction_quantitization_b = get_bits(&alac->gb, 4);
ricemodifier_b = get_bits(&alac->gb, 3);
predictor_coef_num_b = get_bits(&alac->gb, 5);
/* read the predictor table */
for (i = 0; i < predictor_coef_num_b; i++) {
predictor_coef_table_b[i] = (int16_t)get_bits(&alac->gb, 16);
}
/*********************/
if (wasted_bytes) {
/* see mono case */
av_log(avctx, AV_LOG_ERROR, "FIXME: unimplemented, unhandling of wasted_bytes\n");
}
/* channel 1 */
bastardized_rice_decompress(alac,
alac->predicterror_buffer_a,
outputsamples,
readsamplesize,
alac->setinfo_rice_initialhistory,
alac->setinfo_rice_kmodifier,
ricemodifier_a * alac->setinfo_rice_historymult / 4,
(1 << alac->setinfo_rice_kmodifier) - 1);
if (prediction_type_a == 0) {
/* adaptive fir */
predictor_decompress_fir_adapt(alac->predicterror_buffer_a,
alac->outputsamples_buffer_a,
outputsamples,
readsamplesize,
predictor_coef_table_a,
predictor_coef_num_a,
prediction_quantitization_a);
} else {
/* see mono case */
av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type_a);
}
/* channel 2 */
bastardized_rice_decompress(alac,
alac->predicterror_buffer_b,
outputsamples,
readsamplesize,
alac->setinfo_rice_initialhistory,
alac->setinfo_rice_kmodifier,
ricemodifier_b * alac->setinfo_rice_historymult / 4,
(1 << alac->setinfo_rice_kmodifier) - 1);
if (prediction_type_b == 0) {
/* adaptive fir */
predictor_decompress_fir_adapt(alac->predicterror_buffer_b,
alac->outputsamples_buffer_b,
outputsamples,
readsamplesize,
predictor_coef_table_b,
predictor_coef_num_b,
prediction_quantitization_b);
} else {
av_log(avctx, AV_LOG_ERROR, "FIXME: unhandled prediction type: %i\n", prediction_type_b);
}
} else {
/* not compressed, easy case */
if (alac->setinfo_sample_size <= 16) {
int i;
for (i = 0; i < outputsamples; i++) {
int32_t audiobits_a, audiobits_b;
audiobits_a = get_bits(&alac->gb, alac->setinfo_sample_size);
audiobits_b = get_bits(&alac->gb, alac->setinfo_sample_size);
audiobits_a = SIGN_EXTENDED32(audiobits_a, alac->setinfo_sample_size);
audiobits_b = SIGN_EXTENDED32(audiobits_b, alac->setinfo_sample_size);
alac->outputsamples_buffer_a[i] = audiobits_a;
alac->outputsamples_buffer_b[i] = audiobits_b;
}
} else {
int i;
for (i = 0; i < outputsamples; i++) {
int32_t audiobits_a, audiobits_b;
audiobits_a = get_bits(&alac->gb, 16);
audiobits_a = audiobits_a << 16;
audiobits_a = audiobits_a >> (32 - alac->setinfo_sample_size);
audiobits_a |= get_bits(&alac->gb, alac->setinfo_sample_size - 16);
audiobits_b = get_bits(&alac->gb, 16);
audiobits_b = audiobits_b << 16;
audiobits_b = audiobits_b >> (32 - alac->setinfo_sample_size);
audiobits_b |= get_bits(&alac->gb, alac->setinfo_sample_size - 16);
alac->outputsamples_buffer_a[i] = audiobits_a;
alac->outputsamples_buffer_b[i] = audiobits_b;
}
}
/* wasted_bytes = 0; */
interlacing_shift = 0;
interlacing_leftweight = 0;
}
switch(alac->setinfo_sample_size) {
case 16: {
deinterlace_16(alac->outputsamples_buffer_a,
alac->outputsamples_buffer_b,
(int16_t*)outbuffer,
alac->numchannels,
outputsamples,
interlacing_shift,
interlacing_leftweight);
break;
}
case 20:
case 24:
case 32:
av_log(avctx, AV_LOG_ERROR, "FIXME: unimplemented sample size %i\n", alac->setinfo_sample_size);
break;
default:
break;
}
break;
}
}
return input_buffer_size;
}
| true | FFmpeg | 1e25a7e7ebb55516d522a8ab1c4b7938b5060fe5 |
26,696 | static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
{
uint32_t insn, imm, shift, offset;
uint32_t rd, rn, rm, rs;
TCGv tmp;
TCGv tmp2;
TCGv tmp3;
TCGv addr;
TCGv_i64 tmp64;
int op;
int shiftop;
int conds;
int logic_cc;
if (!(arm_feature(env, ARM_FEATURE_THUMB2)
|| arm_feature (env, ARM_FEATURE_M))) {
/* Thumb-1 cores may need to treat bl and blx as a pair of
16-bit instructions to get correct prefetch abort behavior. */
insn = insn_hw1;
if ((insn & (1 << 12)) == 0) {
/* Second half of blx. */
offset = ((insn & 0x7ff) << 1);
tmp = load_reg(s, 14);
tcg_gen_addi_i32(tmp, tmp, offset);
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
tmp2 = new_tmp();
tcg_gen_movi_i32(tmp2, s->pc | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
return 0;
}
if (insn & (1 << 11)) {
/* Second half of bl. */
offset = ((insn & 0x7ff) << 1) | 1;
tmp = load_reg(s, 14);
tcg_gen_addi_i32(tmp, tmp, offset);
tmp2 = new_tmp();
tcg_gen_movi_i32(tmp2, s->pc | 1);
store_reg(s, 14, tmp2);
gen_bx(s, tmp);
return 0;
}
if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
/* Instruction spans a page boundary. Implement it as two
16-bit instructions in case the second half causes an
prefetch abort. */
offset = ((int32_t)insn << 21) >> 9;
tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
return 0;
}
/* Fall through to 32-bit decode. */
}
insn = lduw_code(s->pc);
s->pc += 2;
insn |= (uint32_t)insn_hw1 << 16;
if ((insn & 0xf800e800) != 0xf000e800) {
ARCH(6T2);
}
rn = (insn >> 16) & 0xf;
rs = (insn >> 12) & 0xf;
rd = (insn >> 8) & 0xf;
rm = insn & 0xf;
switch ((insn >> 25) & 0xf) {
case 0: case 1: case 2: case 3:
/* 16-bit instructions. Should never happen. */
abort();
case 4:
if (insn & (1 << 22)) {
/* Other load/store, table branch. */
if (insn & 0x01200000) {
/* Load/store doubleword. */
if (rn == 15) {
addr = new_tmp();
tcg_gen_movi_i32(addr, s->pc & ~3);
} else {
addr = load_reg(s, rn);
}
offset = (insn & 0xff) * 4;
if ((insn & (1 << 23)) == 0)
offset = -offset;
if (insn & (1 << 24)) {
tcg_gen_addi_i32(addr, addr, offset);
offset = 0;
}
if (insn & (1 << 20)) {
/* ldrd */
tmp = gen_ld32(addr, IS_USER(s));
store_reg(s, rs, tmp);
tcg_gen_addi_i32(addr, addr, 4);
tmp = gen_ld32(addr, IS_USER(s));
store_reg(s, rd, tmp);
} else {
/* strd */
tmp = load_reg(s, rs);
gen_st32(tmp, addr, IS_USER(s));
tcg_gen_addi_i32(addr, addr, 4);
tmp = load_reg(s, rd);
gen_st32(tmp, addr, IS_USER(s));
}
if (insn & (1 << 21)) {
/* Base writeback. */
if (rn == 15)
goto illegal_op;
tcg_gen_addi_i32(addr, addr, offset - 4);
store_reg(s, rn, addr);
} else {
dead_tmp(addr);
}
} else if ((insn & (1 << 23)) == 0) {
/* Load/store exclusive word. */
addr = tcg_temp_local_new();
load_reg_var(s, addr, rn);
tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
if (insn & (1 << 20)) {
gen_load_exclusive(s, rs, 15, addr, 2);
} else {
gen_store_exclusive(s, rd, rs, 15, addr, 2);
}
tcg_temp_free(addr);
} else if ((insn & (1 << 6)) == 0) {
/* Table Branch. */
if (rn == 15) {
addr = new_tmp();
tcg_gen_movi_i32(addr, s->pc);
} else {
addr = load_reg(s, rn);
}
tmp = load_reg(s, rm);
tcg_gen_add_i32(addr, addr, tmp);
if (insn & (1 << 4)) {
/* tbh */
tcg_gen_add_i32(addr, addr, tmp);
dead_tmp(tmp);
tmp = gen_ld16u(addr, IS_USER(s));
} else { /* tbb */
dead_tmp(tmp);
tmp = gen_ld8u(addr, IS_USER(s));
}
dead_tmp(addr);
tcg_gen_shli_i32(tmp, tmp, 1);
tcg_gen_addi_i32(tmp, tmp, s->pc);
store_reg(s, 15, tmp);
} else {
/* Load/store exclusive byte/halfword/doubleword. */
ARCH(7);
op = (insn >> 4) & 0x3;
if (op == 2) {
goto illegal_op;
}
addr = tcg_temp_local_new();
load_reg_var(s, addr, rn);
if (insn & (1 << 20)) {
gen_load_exclusive(s, rs, rd, addr, op);
} else {
gen_store_exclusive(s, rm, rs, rd, addr, op);
}
tcg_temp_free(addr);
}
} else {
/* Load/store multiple, RFE, SRS. */
if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
/* Not available in user mode. */
if (IS_USER(s))
goto illegal_op;
if (insn & (1 << 20)) {
/* rfe */
addr = load_reg(s, rn);
if ((insn & (1 << 24)) == 0)
tcg_gen_addi_i32(addr, addr, -8);
/* Load PC into tmp and CPSR into tmp2. */
tmp = gen_ld32(addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
tmp2 = gen_ld32(addr, 0);
if (insn & (1 << 21)) {
/* Base writeback. */
if (insn & (1 << 24)) {
tcg_gen_addi_i32(addr, addr, 4);
} else {
tcg_gen_addi_i32(addr, addr, -4);
}
store_reg(s, rn, addr);
} else {
dead_tmp(addr);
}
gen_rfe(s, tmp, tmp2);
} else {
/* srs */
op = (insn & 0x1f);
if (op == (env->uncached_cpsr & CPSR_M)) {
addr = load_reg(s, 13);
} else {
addr = new_tmp();
tmp = tcg_const_i32(op);
gen_helper_get_r13_banked(addr, cpu_env, tmp);
tcg_temp_free_i32(tmp);
}
if ((insn & (1 << 24)) == 0) {
tcg_gen_addi_i32(addr, addr, -8);
}
tmp = load_reg(s, 14);
gen_st32(tmp, addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
tmp = new_tmp();
gen_helper_cpsr_read(tmp);
gen_st32(tmp, addr, 0);
if (insn & (1 << 21)) {
if ((insn & (1 << 24)) == 0) {
tcg_gen_addi_i32(addr, addr, -4);
} else {
tcg_gen_addi_i32(addr, addr, 4);
}
if (op == (env->uncached_cpsr & CPSR_M)) {
store_reg(s, 13, addr);
} else {
tmp = tcg_const_i32(op);
gen_helper_set_r13_banked(cpu_env, tmp, addr);
tcg_temp_free_i32(tmp);
}
} else {
dead_tmp(addr);
}
}
} else {
int i;
/* Load/store multiple. */
addr = load_reg(s, rn);
offset = 0;
for (i = 0; i < 16; i++) {
if (insn & (1 << i))
offset += 4;
}
if (insn & (1 << 24)) {
tcg_gen_addi_i32(addr, addr, -offset);
}
for (i = 0; i < 16; i++) {
if ((insn & (1 << i)) == 0)
continue;
if (insn & (1 << 20)) {
/* Load. */
tmp = gen_ld32(addr, IS_USER(s));
if (i == 15) {
gen_bx(s, tmp);
} else {
store_reg(s, i, tmp);
}
} else {
/* Store. */
tmp = load_reg(s, i);
gen_st32(tmp, addr, IS_USER(s));
}
tcg_gen_addi_i32(addr, addr, 4);
}
if (insn & (1 << 21)) {
/* Base register writeback. */
if (insn & (1 << 24)) {
tcg_gen_addi_i32(addr, addr, -offset);
}
/* Fault if writeback register is in register list. */
if (insn & (1 << rn))
goto illegal_op;
store_reg(s, rn, addr);
} else {
dead_tmp(addr);
}
}
}
break;
case 5:
op = (insn >> 21) & 0xf;
if (op == 6) {
/* Halfword pack. */
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
if (insn & (1 << 5)) {
/* pkhtb */
if (shift == 0)
shift = 31;
tcg_gen_sari_i32(tmp2, tmp2, shift);
tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
tcg_gen_ext16u_i32(tmp2, tmp2);
} else {
/* pkhbt */
if (shift)
tcg_gen_shli_i32(tmp2, tmp2, shift);
tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
}
tcg_gen_or_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
store_reg(s, rd, tmp);
} else {
/* Data processing register constant shift. */
if (rn == 15) {
tmp = new_tmp();
tcg_gen_movi_i32(tmp, 0);
} else {
tmp = load_reg(s, rn);
}
tmp2 = load_reg(s, rm);
shiftop = (insn >> 4) & 3;
shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
conds = (insn & (1 << 20)) != 0;
logic_cc = (conds && thumb2_logic_op(op));
gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
goto illegal_op;
dead_tmp(tmp2);
if (rd != 15) {
store_reg(s, rd, tmp);
} else {
dead_tmp(tmp);
}
}
break;
case 13: /* Misc data processing. */
op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
if (op < 4 && (insn & 0xf000) != 0xf000)
goto illegal_op;
switch (op) {
case 0: /* Register controlled shift. */
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if ((insn & 0x70) != 0)
goto illegal_op;
op = (insn >> 21) & 3;
logic_cc = (insn & (1 << 20)) != 0;
gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
if (logic_cc)
gen_logic_CC(tmp);
store_reg_bx(env, s, rd, tmp);
break;
case 1: /* Sign/zero extend. */
tmp = load_reg(s, rm);
shift = (insn >> 4) & 3;
/* ??? In many cases it's not neccessary to do a
rotate, a shift is sufficient. */
if (shift != 0)
tcg_gen_rotri_i32(tmp, tmp, shift * 8);
op = (insn >> 20) & 7;
switch (op) {
case 0: gen_sxth(tmp); break;
case 1: gen_uxth(tmp); break;
case 2: gen_sxtb16(tmp); break;
case 3: gen_uxtb16(tmp); break;
case 4: gen_sxtb(tmp); break;
case 5: gen_uxtb(tmp); break;
default: goto illegal_op;
}
if (rn != 15) {
tmp2 = load_reg(s, rn);
if ((op >> 1) == 1) {
gen_add16(tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
}
store_reg(s, rd, tmp);
break;
case 2: /* SIMD add/subtract. */
op = (insn >> 20) & 7;
shift = (insn >> 4) & 7;
if ((op & 3) == 3 || (shift & 3) == 3)
goto illegal_op;
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
dead_tmp(tmp2);
store_reg(s, rd, tmp);
break;
case 3: /* Other data processing. */
op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
if (op < 4) {
/* Saturating add/subtract. */
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
gen_helper_double_saturate(tmp, tmp);
if (op & 2)
gen_helper_sub_saturate(tmp, tmp2, tmp);
else
gen_helper_add_saturate(tmp, tmp, tmp2);
dead_tmp(tmp2);
} else {
tmp = load_reg(s, rn);
switch (op) {
case 0x0a: /* rbit */
gen_helper_rbit(tmp, tmp);
break;
case 0x08: /* rev */
tcg_gen_bswap32_i32(tmp, tmp);
break;
case 0x09: /* rev16 */
gen_rev16(tmp);
break;
case 0x0b: /* revsh */
gen_revsh(tmp);
break;
case 0x10: /* sel */
tmp2 = load_reg(s, rm);
tmp3 = new_tmp();
tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
dead_tmp(tmp3);
dead_tmp(tmp2);
break;
case 0x18: /* clz */
gen_helper_clz(tmp, tmp);
break;
default:
goto illegal_op;
}
}
store_reg(s, rd, tmp);
break;
case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
op = (insn >> 4) & 0xf;
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
switch ((insn >> 20) & 7) {
case 0: /* 32 x 32 -> 32 */
tcg_gen_mul_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
if (op)
tcg_gen_sub_i32(tmp, tmp2, tmp);
else
tcg_gen_add_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
break;
case 1: /* 16 x 16 -> 32 */
gen_mulxy(tmp, tmp2, op & 2, op & 1);
dead_tmp(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
break;
case 2: /* Dual multiply add. */
case 4: /* Dual multiply subtract. */
if (op)
gen_swap_half(tmp2);
gen_smul_dual(tmp, tmp2);
/* This addition cannot overflow. */
if (insn & (1 << 22)) {
tcg_gen_sub_i32(tmp, tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
dead_tmp(tmp2);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
break;
case 3: /* 32 * 16 -> 32msb */
if (op)
tcg_gen_sari_i32(tmp2, tmp2, 16);
else
gen_sxth(tmp2);
tmp64 = gen_muls_i64_i32(tmp, tmp2);
tcg_gen_shri_i64(tmp64, tmp64, 16);
tmp = new_tmp();
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
gen_helper_add_setq(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
break;
case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
tmp64 = gen_muls_i64_i32(tmp, tmp2);
if (rs != 15) {
tmp = load_reg(s, rs);
if (insn & (1 << 20)) {
tmp64 = gen_addq_msw(tmp64, tmp);
} else {
tmp64 = gen_subq_msw(tmp64, tmp);
}
}
if (insn & (1 << 4)) {
tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
}
tcg_gen_shri_i64(tmp64, tmp64, 32);
tmp = new_tmp();
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
break;
case 7: /* Unsigned sum of absolute differences. */
gen_helper_usad8(tmp, tmp, tmp2);
dead_tmp(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
tcg_gen_add_i32(tmp, tmp, tmp2);
dead_tmp(tmp2);
}
break;
}
store_reg(s, rd, tmp);
break;
case 6: case 7: /* 64-bit multiply, Divide. */
op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if ((op & 0x50) == 0x10) {
/* sdiv, udiv */
if (!arm_feature(env, ARM_FEATURE_DIV))
goto illegal_op;
if (op & 0x20)
gen_helper_udiv(tmp, tmp, tmp2);
else
gen_helper_sdiv(tmp, tmp, tmp2);
dead_tmp(tmp2);
store_reg(s, rd, tmp);
} else if ((op & 0xe) == 0xc) {
/* Dual multiply accumulate long. */
if (op & 1)
gen_swap_half(tmp2);
gen_smul_dual(tmp, tmp2);
if (op & 0x10) {
tcg_gen_sub_i32(tmp, tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
dead_tmp(tmp2);
/* BUGFIX */
tmp64 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
dead_tmp(tmp);
gen_addq(s, tmp64, rs, rd);
gen_storeq_reg(s, rs, rd, tmp64);
tcg_temp_free_i64(tmp64);
} else {
if (op & 0x20) {
/* Unsigned 64-bit multiply */
tmp64 = gen_mulu_i64_i32(tmp, tmp2);
} else {
if (op & 8) {
/* smlalxy */
gen_mulxy(tmp, tmp2, op & 2, op & 1);
dead_tmp(tmp2);
tmp64 = tcg_temp_new_i64();
tcg_gen_ext_i32_i64(tmp64, tmp);
dead_tmp(tmp);
} else {
/* Signed 64-bit multiply */
tmp64 = gen_muls_i64_i32(tmp, tmp2);
}
}
if (op & 4) {
/* umaal */
gen_addq_lo(s, tmp64, rs);
gen_addq_lo(s, tmp64, rd);
} else if (op & 0x40) {
/* 64-bit accumulate. */
gen_addq(s, tmp64, rs, rd);
}
gen_storeq_reg(s, rs, rd, tmp64);
tcg_temp_free_i64(tmp64);
}
break;
}
break;
case 6: case 7: case 14: case 15:
/* Coprocessor. */
if (((insn >> 24) & 3) == 3) {
/* Translate into the equivalent ARM encoding. */
insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
if (disas_neon_data_insn(env, s, insn))
goto illegal_op;
} else {
if (insn & (1 << 28))
goto illegal_op;
if (disas_coproc_insn (env, s, insn))
goto illegal_op;
}
break;
case 8: case 9: case 10: case 11:
if (insn & (1 << 15)) {
/* Branches, misc control. */
if (insn & 0x5000) {
/* Unconditional branch. */
/* signextend(hw1[10:0]) -> offset[:12]. */
offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
/* hw1[10:0] -> offset[11:1]. */
offset |= (insn & 0x7ff) << 1;
/* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
offset[24:22] already have the same value because of the
sign extension above. */
offset ^= ((~insn) & (1 << 13)) << 10;
offset ^= ((~insn) & (1 << 11)) << 11;
if (insn & (1 << 14)) {
/* Branch and link. */
tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
}
offset += s->pc;
if (insn & (1 << 12)) {
/* b/bl */
gen_jmp(s, offset);
} else {
/* blx */
offset &= ~(uint32_t)2;
gen_bx_im(s, offset);
}
} else if (((insn >> 23) & 7) == 7) {
/* Misc control */
if (insn & (1 << 13))
goto illegal_op;
if (insn & (1 << 26)) {
/* Secure monitor call (v6Z) */
goto illegal_op; /* not implemented. */
} else {
op = (insn >> 20) & 7;
switch (op) {
case 0: /* msr cpsr. */
if (IS_M(env)) {
tmp = load_reg(s, rn);
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_msr(cpu_env, addr, tmp);
tcg_temp_free_i32(addr);
dead_tmp(tmp);
gen_lookup_tb(s);
break;
}
/* fall through */
case 1: /* msr spsr. */
if (IS_M(env))
goto illegal_op;
tmp = load_reg(s, rn);
if (gen_set_psr(s,
msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
op == 1, tmp))
goto illegal_op;
break;
case 2: /* cps, nop-hint. */
if (((insn >> 8) & 7) == 0) {
gen_nop_hint(s, insn & 0xff);
}
/* Implemented as NOP in user mode. */
if (IS_USER(s))
break;
offset = 0;
imm = 0;
if (insn & (1 << 10)) {
if (insn & (1 << 7))
offset |= CPSR_A;
if (insn & (1 << 6))
offset |= CPSR_I;
if (insn & (1 << 5))
offset |= CPSR_F;
if (insn & (1 << 9))
imm = CPSR_A | CPSR_I | CPSR_F;
}
if (insn & (1 << 8)) {
offset |= 0x1f;
imm |= (insn & 0x1f);
}
if (offset) {
gen_set_psr_im(s, offset, 0, imm);
}
break;
case 3: /* Special control operations. */
ARCH(7);
op = (insn >> 4) & 0xf;
switch (op) {
case 2: /* clrex */
gen_clrex(s);
break;
case 4: /* dsb */
case 5: /* dmb */
case 6: /* isb */
/* These execute as NOPs. */
break;
default:
goto illegal_op;
}
break;
case 4: /* bxj */
/* Trivial implementation equivalent to bx. */
tmp = load_reg(s, rn);
gen_bx(s, tmp);
break;
case 5: /* Exception return. */
if (IS_USER(s)) {
goto illegal_op;
}
if (rn != 14 || rd != 15) {
goto illegal_op;
}
tmp = load_reg(s, rn);
tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
gen_exception_return(s, tmp);
break;
case 6: /* mrs cpsr. */
tmp = new_tmp();
if (IS_M(env)) {
addr = tcg_const_i32(insn & 0xff);
gen_helper_v7m_mrs(tmp, cpu_env, addr);
tcg_temp_free_i32(addr);
} else {
gen_helper_cpsr_read(tmp);
}
store_reg(s, rd, tmp);
break;
case 7: /* mrs spsr. */
/* Not accessible in user mode. */
if (IS_USER(s) || IS_M(env))
goto illegal_op;
tmp = load_cpu_field(spsr);
store_reg(s, rd, tmp);
break;
}
}
} else {
/* Conditional branch. */
op = (insn >> 22) & 0xf;
/* Generate a conditional jump to next instruction. */
s->condlabel = gen_new_label();
gen_test_cc(op ^ 1, s->condlabel);
s->condjmp = 1;
/* offset[11:1] = insn[10:0] */
offset = (insn & 0x7ff) << 1;
/* offset[17:12] = insn[21:16]. */
offset |= (insn & 0x003f0000) >> 4;
/* offset[31:20] = insn[26]. */
offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
/* offset[18] = insn[13]. */
offset |= (insn & (1 << 13)) << 5;
/* offset[19] = insn[11]. */
offset |= (insn & (1 << 11)) << 8;
/* jump to the offset */
gen_jmp(s, s->pc + offset);
}
} else {
/* Data processing immediate. */
if (insn & (1 << 25)) {
if (insn & (1 << 24)) {
if (insn & (1 << 20))
goto illegal_op;
/* Bitfield/Saturate. */
op = (insn >> 21) & 7;
imm = insn & 0x1f;
shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
if (rn == 15) {
tmp = new_tmp();
tcg_gen_movi_i32(tmp, 0);
} else {
tmp = load_reg(s, rn);
}
switch (op) {
case 2: /* Signed bitfield extract. */
imm++;
if (shift + imm > 32)
goto illegal_op;
if (imm < 32)
gen_sbfx(tmp, shift, imm);
break;
case 6: /* Unsigned bitfield extract. */
imm++;
if (shift + imm > 32)
goto illegal_op;
if (imm < 32)
gen_ubfx(tmp, shift, (1u << imm) - 1);
break;
case 3: /* Bitfield insert/clear. */
if (imm < shift)
goto illegal_op;
imm = imm + 1 - shift;
if (imm != 32) {
tmp2 = load_reg(s, rd);
gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
dead_tmp(tmp2);
}
break;
case 7:
goto illegal_op;
default: /* Saturate. */
if (shift) {
if (op & 1)
tcg_gen_sari_i32(tmp, tmp, shift);
else
tcg_gen_shli_i32(tmp, tmp, shift);
}
tmp2 = tcg_const_i32(imm);
if (op & 4) {
/* Unsigned. */
if ((op & 1) && shift == 0)
gen_helper_usat16(tmp, tmp, tmp2);
else
gen_helper_usat(tmp, tmp, tmp2);
} else {
/* Signed. */
if ((op & 1) && shift == 0)
gen_helper_ssat16(tmp, tmp, tmp2);
else
gen_helper_ssat(tmp, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
break;
}
store_reg(s, rd, tmp);
} else {
imm = ((insn & 0x04000000) >> 15)
| ((insn & 0x7000) >> 4) | (insn & 0xff);
if (insn & (1 << 22)) {
/* 16-bit immediate. */
imm |= (insn >> 4) & 0xf000;
if (insn & (1 << 23)) {
/* movt */
tmp = load_reg(s, rd);
tcg_gen_ext16u_i32(tmp, tmp);
tcg_gen_ori_i32(tmp, tmp, imm << 16);
} else {
/* movw */
tmp = new_tmp();
tcg_gen_movi_i32(tmp, imm);
}
} else {
/* Add/sub 12-bit immediate. */
if (rn == 15) {
offset = s->pc & ~(uint32_t)3;
if (insn & (1 << 23))
offset -= imm;
else
offset += imm;
tmp = new_tmp();
tcg_gen_movi_i32(tmp, offset);
} else {
tmp = load_reg(s, rn);
if (insn & (1 << 23))
tcg_gen_subi_i32(tmp, tmp, imm);
else
tcg_gen_addi_i32(tmp, tmp, imm);
}
}
store_reg(s, rd, tmp);
}
} else {
int shifter_out = 0;
/* modified 12-bit immediate. */
shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
imm = (insn & 0xff);
switch (shift) {
case 0: /* XY */
/* Nothing to do. */
break;
case 1: /* 00XY00XY */
imm |= imm << 16;
break;
case 2: /* XY00XY00 */
imm |= imm << 16;
imm <<= 8;
break;
case 3: /* XYXYXYXY */
imm |= imm << 16;
imm |= imm << 8;
break;
default: /* Rotated constant. */
shift = (shift << 1) | (imm >> 7);
imm |= 0x80;
imm = imm << (32 - shift);
shifter_out = 1;
break;
}
tmp2 = new_tmp();
tcg_gen_movi_i32(tmp2, imm);
rn = (insn >> 16) & 0xf;
if (rn == 15) {
tmp = new_tmp();
tcg_gen_movi_i32(tmp, 0);
} else {
tmp = load_reg(s, rn);
}
op = (insn >> 21) & 0xf;
if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
shifter_out, tmp, tmp2))
goto illegal_op;
dead_tmp(tmp2);
rd = (insn >> 8) & 0xf;
if (rd != 15) {
store_reg(s, rd, tmp);
} else {
dead_tmp(tmp);
}
}
}
break;
case 12: /* Load/store single data item. */
{
int postinc = 0;
int writeback = 0;
int user;
if ((insn & 0x01100000) == 0x01000000) {
if (disas_neon_ls_insn(env, s, insn))
goto illegal_op;
break;
}
user = IS_USER(s);
if (rn == 15) {
addr = new_tmp();
/* PC relative. */
/* s->pc has already been incremented by 4. */
imm = s->pc & 0xfffffffc;
if (insn & (1 << 23))
imm += insn & 0xfff;
else
imm -= insn & 0xfff;
tcg_gen_movi_i32(addr, imm);
} else {
addr = load_reg(s, rn);
if (insn & (1 << 23)) {
/* Positive offset. */
imm = insn & 0xfff;
tcg_gen_addi_i32(addr, addr, imm);
} else {
op = (insn >> 8) & 7;
imm = insn & 0xff;
switch (op) {
case 0: case 8: /* Shifted Register. */
shift = (insn >> 4) & 0xf;
if (shift > 3)
goto illegal_op;
tmp = load_reg(s, rm);
if (shift)
tcg_gen_shli_i32(tmp, tmp, shift);
tcg_gen_add_i32(addr, addr, tmp);
dead_tmp(tmp);
break;
case 4: /* Negative offset. */
tcg_gen_addi_i32(addr, addr, -imm);
break;
case 6: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
user = 1;
break;
case 1: /* Post-decrement. */
imm = -imm;
/* Fall through. */
case 3: /* Post-increment. */
postinc = 1;
writeback = 1;
break;
case 5: /* Pre-decrement. */
imm = -imm;
/* Fall through. */
case 7: /* Pre-increment. */
tcg_gen_addi_i32(addr, addr, imm);
writeback = 1;
break;
default:
goto illegal_op;
}
}
}
op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
if (insn & (1 << 20)) {
/* Load. */
if (rs == 15 && op != 2) {
if (op & 2)
goto illegal_op;
/* Memory hint. Implemented as NOP. */
} else {
switch (op) {
case 0: tmp = gen_ld8u(addr, user); break;
case 4: tmp = gen_ld8s(addr, user); break;
case 1: tmp = gen_ld16u(addr, user); break;
case 5: tmp = gen_ld16s(addr, user); break;
case 2: tmp = gen_ld32(addr, user); break;
default: goto illegal_op;
}
if (rs == 15) {
gen_bx(s, tmp);
} else {
store_reg(s, rs, tmp);
}
}
} else {
/* Store. */
if (rs == 15)
goto illegal_op;
tmp = load_reg(s, rs);
switch (op) {
case 0: gen_st8(tmp, addr, user); break;
case 1: gen_st16(tmp, addr, user); break;
case 2: gen_st32(tmp, addr, user); break;
default: goto illegal_op;
}
}
if (postinc)
tcg_gen_addi_i32(addr, addr, imm);
if (writeback) {
store_reg(s, rn, addr);
} else {
dead_tmp(addr);
}
}
break;
default:
goto illegal_op;
}
return 0;
illegal_op:
return 1;
}
| false | qemu | 39ea3d4eaf1ff300ee55946108394729bc053dfa |
26,697 | size_t iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt, size_t iov_off,
void *buf, size_t size)
{
uint8_t *ptr;
size_t iovec_off, buf_off;
unsigned int i;
ptr = buf;
iovec_off = 0;
buf_off = 0;
for (i = 0; i < iov_cnt && size; i++) {
if (iov_off < (iovec_off + iov[i].iov_len)) {
size_t len = MIN((iovec_off + iov[i].iov_len) - iov_off , size);
memcpy(ptr + buf_off, iov[i].iov_base + (iov_off - iovec_off), len);
buf_off += len;
iov_off += len;
size -= len;
}
iovec_off += iov[i].iov_len;
}
return buf_off;
}
| false | qemu | 2278a69e7020d86a8c73a28474e7709d3e7d5081 |
26,698 | void hmp_savevm(Monitor *mon, const QDict *qdict)
{
BlockDriverState *bs, *bs1;
QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
int ret;
QEMUFile *f;
int saved_vm_running;
uint64_t vm_state_size;
qemu_timeval tv;
struct tm tm;
const char *name = qdict_get_try_str(qdict, "name");
Error *local_err = NULL;
AioContext *aio_context;
if (!bdrv_all_can_snapshot(&bs)) {
monitor_printf(mon, "Device '%s' is writable but does not "
"support snapshots.\n", bdrv_get_device_name(bs));
return;
}
/* Delete old snapshots of the same name */
if (name && bdrv_all_delete_snapshot(name, &bs1, &local_err) < 0) {
error_reportf_err(local_err,
"Error while deleting snapshot on device '%s': ",
bdrv_get_device_name(bs1));
return;
}
bs = bdrv_all_find_vmstate_bs();
if (bs == NULL) {
monitor_printf(mon, "No block device can accept snapshots\n");
return;
}
aio_context = bdrv_get_aio_context(bs);
saved_vm_running = runstate_is_running();
ret = global_state_store();
if (ret) {
monitor_printf(mon, "Error saving global state\n");
return;
}
vm_stop(RUN_STATE_SAVE_VM);
aio_context_acquire(aio_context);
memset(sn, 0, sizeof(*sn));
/* fill auxiliary fields */
qemu_gettimeofday(&tv);
sn->date_sec = tv.tv_sec;
sn->date_nsec = tv.tv_usec * 1000;
sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
if (name) {
ret = bdrv_snapshot_find(bs, old_sn, name);
if (ret >= 0) {
pstrcpy(sn->name, sizeof(sn->name), old_sn->name);
pstrcpy(sn->id_str, sizeof(sn->id_str), old_sn->id_str);
} else {
pstrcpy(sn->name, sizeof(sn->name), name);
}
} else {
/* cast below needed for OpenBSD where tv_sec is still 'long' */
localtime_r((const time_t *)&tv.tv_sec, &tm);
strftime(sn->name, sizeof(sn->name), "vm-%Y%m%d%H%M%S", &tm);
}
/* save the VM state */
f = qemu_fopen_bdrv(bs, 1);
if (!f) {
monitor_printf(mon, "Could not open VM state file\n");
goto the_end;
}
ret = qemu_savevm_state(f, &local_err);
vm_state_size = qemu_ftell(f);
qemu_fclose(f);
if (ret < 0) {
error_report_err(local_err);
goto the_end;
}
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, &bs);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs));
}
the_end:
aio_context_release(aio_context);
if (saved_vm_running) {
vm_start();
}
}
| false | qemu | ac8c19ba742fcbc3d64a5390b32acc6479edd7e1 |
26,699 | static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, QEMUIOVector *qiov,
int flags)
{
BDRVQcow2State *s = bs->opaque;
int offset_in_cluster;
int ret;
unsigned int cur_bytes; /* number of sectors in current iteration */
uint64_t cluster_offset;
QEMUIOVector hd_qiov;
uint64_t bytes_done = 0;
uint8_t *cluster_data = NULL;
QCowL2Meta *l2meta = NULL;
trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
qemu_iovec_init(&hd_qiov, qiov->niov);
s->cluster_cache_offset = -1; /* disable compressed cache */
qemu_co_mutex_lock(&s->lock);
while (bytes != 0) {
l2meta = NULL;
trace_qcow2_writev_start_part(qemu_coroutine_self());
offset_in_cluster = offset_into_cluster(s, offset);
cur_bytes = MIN(bytes, INT_MAX);
if (bs->encrypted) {
cur_bytes = MIN(cur_bytes,
QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
- offset_in_cluster);
}
ret = qcow2_alloc_cluster_offset(bs, offset, &cur_bytes,
&cluster_offset, &l2meta);
if (ret < 0) {
goto fail;
}
assert((cluster_offset & 511) == 0);
qemu_iovec_reset(&hd_qiov);
qemu_iovec_concat(&hd_qiov, qiov, bytes_done, cur_bytes);
if (bs->encrypted) {
Error *err = NULL;
assert(s->cipher);
if (!cluster_data) {
cluster_data = qemu_try_blockalign(bs->file->bs,
QCOW_MAX_CRYPT_CLUSTERS
* s->cluster_size);
if (cluster_data == NULL) {
ret = -ENOMEM;
goto fail;
}
}
assert(hd_qiov.size <=
QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size);
if (qcow2_encrypt_sectors(s, offset >> BDRV_SECTOR_BITS,
cluster_data,
cur_bytes >>BDRV_SECTOR_BITS,
true, &err) < 0) {
error_free(err);
ret = -EIO;
goto fail;
}
qemu_iovec_reset(&hd_qiov);
qemu_iovec_add(&hd_qiov, cluster_data, cur_bytes);
}
ret = qcow2_pre_write_overlap_check(bs, 0,
cluster_offset + offset_in_cluster, cur_bytes);
if (ret < 0) {
goto fail;
}
/* If we need to do COW, check if it's possible to merge the
* writing of the guest data together with that of the COW regions.
* If it's not possible (or not necessary) then write the
* guest data now. */
if (!merge_cow(offset, cur_bytes, &hd_qiov, l2meta)) {
qemu_co_mutex_unlock(&s->lock);
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(),
cluster_offset + offset_in_cluster);
ret = bdrv_co_pwritev(bs->file,
cluster_offset + offset_in_cluster,
cur_bytes, &hd_qiov, 0);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
goto fail;
}
}
while (l2meta != NULL) {
QCowL2Meta *next;
ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
if (ret < 0) {
goto fail;
}
/* Take the request off the list of running requests */
if (l2meta->nb_clusters != 0) {
QLIST_REMOVE(l2meta, next_in_flight);
}
qemu_co_queue_restart_all(&l2meta->dependent_requests);
next = l2meta->next;
g_free(l2meta);
l2meta = next;
}
bytes -= cur_bytes;
offset += cur_bytes;
bytes_done += cur_bytes;
trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
}
ret = 0;
fail:
qemu_co_mutex_unlock(&s->lock);
while (l2meta != NULL) {
QCowL2Meta *next;
if (l2meta->nb_clusters != 0) {
QLIST_REMOVE(l2meta, next_in_flight);
}
qemu_co_queue_restart_all(&l2meta->dependent_requests);
next = l2meta->next;
g_free(l2meta);
l2meta = next;
}
qemu_iovec_destroy(&hd_qiov);
qemu_vfree(cluster_data);
trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
return ret;
}
| false | qemu | b25b387fa5928e516cb2c9e7fde68e958bd7e50a |
26,700 | static int open_self_maps(void *cpu_env, int fd)
{
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
TaskState *ts = cpu->opaque;
#endif
FILE *fp;
char *line = NULL;
size_t len = 0;
ssize_t read;
fp = fopen("/proc/self/maps", "r");
if (fp == NULL) {
return -EACCES;
}
while ((read = getline(&line, &len, fp)) != -1) {
int fields, dev_maj, dev_min, inode;
uint64_t min, max, offset;
char flag_r, flag_w, flag_x, flag_p;
char path[512] = "";
fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
" %512s", &min, &max, &flag_r, &flag_w, &flag_x,
&flag_p, &offset, &dev_maj, &dev_min, &inode, path);
if ((fields < 10) || (fields > 11)) {
continue;
}
if (!strncmp(path, "[stack]", 7)) {
continue;
}
if (h2g_valid(min) && h2g_valid(max)) {
dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
" %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
h2g(min), h2g(max), flag_r, flag_w,
flag_x, flag_p, offset, dev_maj, dev_min, inode,
path[0] ? " " : "", path);
}
}
free(line);
fclose(fp);
#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_UNICORE32)
dprintf(fd, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
(unsigned long long)ts->info->stack_limit,
(unsigned long long)(ts->info->start_stack +
(TARGET_PAGE_SIZE - 1)) & TARGET_PAGE_MASK,
(unsigned long long)0);
#endif
return 0;
}
| false | qemu | d67f4aaae8379b44b3b51ff07df75f693012983c |
26,702 | static void qxl_log_cmd_draw(PCIQXLDevice *qxl, QXLDrawable *draw, int group_id)
{
fprintf(stderr, ": surface_id %d type %s effect %s",
draw->surface_id,
qxl_name(qxl_draw_type, draw->type),
qxl_name(qxl_draw_effect, draw->effect));
switch (draw->type) {
case QXL_DRAW_COPY:
qxl_log_cmd_draw_copy(qxl, &draw->u.copy, group_id);
break;
}
}
| false | qemu | fae2afb10e3fdceab612c62a2b1e8b944ff578d9 |
26,703 | int bdrv_enable_write_cache(BlockDriverState *bs)
{
return bs->enable_write_cache;
}
| false | qemu | 61007b316cd71ee7333ff7a0a749a8949527575f |
26,704 | static int add_doubles_metadata(int count,
const char *name, const char *sep,
TiffContext *s)
{
char *ap;
int i;
double *dp;
if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
return -1;
dp = av_malloc(count * sizeof(double));
if (!dp)
return AVERROR(ENOMEM);
for (i = 0; i < count; i++)
dp[i] = tget_double(&s->gb, s->le);
ap = doubles2str(dp, count, sep);
av_freep(&dp);
if (!ap)
return AVERROR(ENOMEM);
av_dict_set(&s->picture.metadata, name, ap, AV_DICT_DONT_STRDUP_VAL);
return 0;
}
| false | FFmpeg | 292850b634240045805e3c2001aed6f046034e93 |
26,705 | static void device_set_realized(Object *obj, bool value, Error **errp)
{
DeviceState *dev = DEVICE(obj);
DeviceClass *dc = DEVICE_GET_CLASS(dev);
HotplugHandler *hotplug_ctrl;
BusState *bus;
Error *local_err = NULL;
bool unattached_parent = false;
static int unattached_count;
if (dev->hotplugged && !dc->hotpluggable) {
error_setg(errp, QERR_DEVICE_NO_HOTPLUG, object_get_typename(obj));
return;
}
if (value && !dev->realized) {
if (!obj->parent) {
gchar *name = g_strdup_printf("device[%d]", unattached_count++);
object_property_add_child(container_get(qdev_get_machine(),
"/unattached"),
name, obj, &error_abort);
unattached_parent = true;
g_free(name);
}
hotplug_ctrl = qdev_get_hotplug_handler(dev);
if (hotplug_ctrl) {
hotplug_handler_pre_plug(hotplug_ctrl, dev, &local_err);
if (local_err != NULL) {
goto fail;
}
}
if (dc->realize) {
dc->realize(dev, &local_err);
}
if (local_err != NULL) {
goto fail;
}
DEVICE_LISTENER_CALL(realize, Forward, dev);
if (hotplug_ctrl) {
hotplug_handler_plug(hotplug_ctrl, dev, &local_err);
}
if (local_err != NULL) {
goto post_realize_fail;
}
if (qdev_get_vmsd(dev)) {
vmstate_register_with_alias_id(dev, -1, qdev_get_vmsd(dev), dev,
dev->instance_id_alias,
dev->alias_required_for_version,
NULL);
}
QLIST_FOREACH(bus, &dev->child_bus, sibling) {
object_property_set_bool(OBJECT(bus), true, "realized",
&local_err);
if (local_err != NULL) {
goto child_realize_fail;
}
}
if (dev->hotplugged) {
device_reset(dev);
}
dev->pending_deleted_event = false;
} else if (!value && dev->realized) {
Error **local_errp = NULL;
QLIST_FOREACH(bus, &dev->child_bus, sibling) {
local_errp = local_err ? NULL : &local_err;
object_property_set_bool(OBJECT(bus), false, "realized",
local_errp);
}
if (qdev_get_vmsd(dev)) {
vmstate_unregister(dev, qdev_get_vmsd(dev), dev);
}
if (dc->unrealize) {
local_errp = local_err ? NULL : &local_err;
dc->unrealize(dev, local_errp);
}
dev->pending_deleted_event = true;
DEVICE_LISTENER_CALL(unrealize, Reverse, dev);
}
if (local_err != NULL) {
goto fail;
}
dev->realized = value;
return;
child_realize_fail:
QLIST_FOREACH(bus, &dev->child_bus, sibling) {
object_property_set_bool(OBJECT(bus), false, "realized",
NULL);
}
if (qdev_get_vmsd(dev)) {
vmstate_unregister(dev, qdev_get_vmsd(dev), dev);
}
post_realize_fail:
if (dc->unrealize) {
dc->unrealize(dev, NULL);
}
fail:
error_propagate(errp, local_err);
if (unattached_parent) {
object_unparent(OBJECT(dev));
unattached_count--;
}
}
| false | qemu | 67980031d234aa90524b83bb80bb5d1601d29076 |
26,706 | static void serial_update_irq(SerialState *s)
{
uint8_t tmp_iir = UART_IIR_NO_INT;
if ((s->ier & UART_IER_RLSI) && (s->lsr & UART_LSR_INT_ANY)) {
tmp_iir = UART_IIR_RLSI;
} else if ((s->ier & UART_IER_RDI) && s->timeout_ipending) {
/* Note that(s->ier & UART_IER_RDI) can mask this interrupt,
* this is not in the specification but is observed on existing
* hardware. */
tmp_iir = UART_IIR_CTI;
} else if ((s->ier & UART_IER_RDI) && (s->lsr & UART_LSR_DR)) {
if (!(s->fcr & UART_FCR_FE)) {
tmp_iir = UART_IIR_RDI;
} else if (s->recv_fifo.count >= s->recv_fifo.itl) {
tmp_iir = UART_IIR_RDI;
}
} else if ((s->ier & UART_IER_THRI) && s->thr_ipending) {
tmp_iir = UART_IIR_THRI;
} else if ((s->ier & UART_IER_MSI) && (s->msr & UART_MSR_ANY_DELTA)) {
tmp_iir = UART_IIR_MSI;
}
s->iir = tmp_iir | (s->iir & 0xF0);
if (tmp_iir != UART_IIR_NO_INT) {
qemu_irq_raise(s->irq);
} else {
qemu_irq_lower(s->irq);
}
}
| false | qemu | 2d6ee8e7e17227d5eb8c6e9a054dd88d5b37c5ae |
26,707 | static void spapr_add_lmbs(DeviceState *dev, uint64_t addr_start, uint64_t size,
uint32_t node, bool dedicated_hp_event_source,
Error **errp)
{
sPAPRDRConnector *drc;
uint32_t nr_lmbs = size/SPAPR_MEMORY_BLOCK_SIZE;
int i, fdt_offset, fdt_size;
void *fdt;
uint64_t addr = addr_start;
for (i = 0; i < nr_lmbs; i++) {
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
addr / SPAPR_MEMORY_BLOCK_SIZE);
g_assert(drc);
fdt = create_device_tree(&fdt_size);
fdt_offset = spapr_populate_memory_node(fdt, node, addr,
SPAPR_MEMORY_BLOCK_SIZE);
spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
addr += SPAPR_MEMORY_BLOCK_SIZE;
if (!dev->hotplugged) {
sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
/* guests expect coldplugged LMBs to be pre-allocated */
drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
}
}
/* send hotplug notification to the
* guest only in case of hotplugged memory
*/
if (dev->hotplugged) {
if (dedicated_hp_event_source) {
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
addr_start / SPAPR_MEMORY_BLOCK_SIZE);
spapr_hotplug_req_add_by_count_indexed(SPAPR_DR_CONNECTOR_TYPE_LMB,
nr_lmbs,
spapr_drc_index(drc));
} else {
spapr_hotplug_req_add_by_count(SPAPR_DR_CONNECTOR_TYPE_LMB,
nr_lmbs);
}
}
}
| false | qemu | 4f9242fc931ab5e5b1b753c8e5a76c50c0b0612e |
26,708 | static int vdi_open(BlockDriverState *bs, int flags)
{
BDRVVdiState *s = bs->opaque;
VdiHeader header;
size_t bmap_size;
int ret;
logout("\n");
ret = bdrv_read(bs->file, 0, (uint8_t *)&header, 1);
if (ret < 0) {
goto fail;
}
vdi_header_to_cpu(&header);
#if defined(CONFIG_VDI_DEBUG)
vdi_header_print(&header);
#endif
if (header.disk_size % SECTOR_SIZE != 0) {
/* 'VBoxManage convertfromraw' can create images with odd disk sizes.
We accept them but round the disk size to the next multiple of
SECTOR_SIZE. */
logout("odd disk size %" PRIu64 " B, round up\n", header.disk_size);
header.disk_size += SECTOR_SIZE - 1;
header.disk_size &= ~(SECTOR_SIZE - 1);
}
if (header.version != VDI_VERSION_1_1) {
logout("unsupported version %u.%u\n",
header.version >> 16, header.version & 0xffff);
ret = -ENOTSUP;
goto fail;
} else if (header.offset_bmap % SECTOR_SIZE != 0) {
/* We only support block maps which start on a sector boundary. */
logout("unsupported block map offset 0x%x B\n", header.offset_bmap);
ret = -ENOTSUP;
goto fail;
} else if (header.offset_data % SECTOR_SIZE != 0) {
/* We only support data blocks which start on a sector boundary. */
logout("unsupported data offset 0x%x B\n", header.offset_data);
ret = -ENOTSUP;
goto fail;
} else if (header.sector_size != SECTOR_SIZE) {
logout("unsupported sector size %u B\n", header.sector_size);
ret = -ENOTSUP;
goto fail;
} else if (header.block_size != 1 * MiB) {
logout("unsupported block size %u B\n", header.block_size);
ret = -ENOTSUP;
goto fail;
} else if (header.disk_size >
(uint64_t)header.blocks_in_image * header.block_size) {
logout("unsupported disk size %" PRIu64 " B\n", header.disk_size);
ret = -ENOTSUP;
goto fail;
} else if (!uuid_is_null(header.uuid_link)) {
logout("link uuid != 0, unsupported\n");
ret = -ENOTSUP;
goto fail;
} else if (!uuid_is_null(header.uuid_parent)) {
logout("parent uuid != 0, unsupported\n");
ret = -ENOTSUP;
goto fail;
}
bs->total_sectors = header.disk_size / SECTOR_SIZE;
s->block_size = header.block_size;
s->block_sectors = header.block_size / SECTOR_SIZE;
s->bmap_sector = header.offset_bmap / SECTOR_SIZE;
s->header = header;
bmap_size = header.blocks_in_image * sizeof(uint32_t);
bmap_size = (bmap_size + SECTOR_SIZE - 1) / SECTOR_SIZE;
if (bmap_size > 0) {
s->bmap = g_malloc(bmap_size * SECTOR_SIZE);
}
ret = bdrv_read(bs->file, s->bmap_sector, (uint8_t *)s->bmap, bmap_size);
if (ret < 0) {
goto fail_free_bmap;
}
/* Disable migration when vdi images are used */
error_set(&s->migration_blocker,
QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
"vdi", bs->device_name, "live migration");
migrate_add_blocker(s->migration_blocker);
return 0;
fail_free_bmap:
g_free(s->bmap);
fail:
return ret;
}
| false | qemu | 0e87ba2ccbf900cc5a56b95e0671e5a5d2c2f6a0 |
26,709 | qemu_irq isa_reserve_irq(int isairq)
{
if (isairq < 0 || isairq > 15) {
hw_error("isa irq %d invalid", isairq);
}
if (isabus->assigned & (1 << isairq)) {
hw_error("isa irq %d already assigned", isairq);
}
isabus->assigned |= (1 << isairq);
return isabus->irqs[isairq];
}
| false | qemu | ee951a37d8873bff7aa58e23222dfd984111b6cb |
26,710 | S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
{
static bool features_parsed;
char *name, *features;
const char *typename;
ObjectClass *oc;
CPUClass *cc;
name = g_strdup(cpu_model);
features = strchr(name, ',');
if (features) {
features[0] = 0;
features++;
}
oc = cpu_class_by_name(TYPE_S390_CPU, name);
if (!oc) {
error_setg(errp, "Unknown CPU definition \'%s\'", name);
g_free(name);
return NULL;
}
typename = object_class_get_name(oc);
if (!features_parsed) {
features_parsed = true;
cc = CPU_CLASS(oc);
cc->parse_features(typename, features, errp);
}
g_free(name);
if (*errp) {
return NULL;
}
return S390_CPU(CPU(object_new(typename)));
}
| false | qemu | 524d18d8bd463431b120eeb5f9f3d1064a1c19e4 |
26,712 | static inline void pit_load_count(PITChannelState *s, int val)
{
if (val == 0)
val = 0x10000;
s->count_load_time = cpu_get_ticks();
s->count_last_edge_check_time = s->count_load_time;
s->count = val;
if (s == &pit_channels[0] && val <= pit_min_timer_count) {
fprintf(stderr,
"\nWARNING: qemu: on your system, accurate timer emulation is impossible if its frequency is more than %d Hz. If using a 2.5.xx Linux kernel, you must patch asm/param.h to change HZ from 1000 to 100.\n\n",
PIT_FREQ / pit_min_timer_count);
}
}
| false | qemu | bb551faa4ac8a798df9757c0ae26893041344bc3 |
26,713 | static int usbredir_handle_bulk_data(USBRedirDevice *dev, USBPacket *p,
uint8_t ep)
{
AsyncURB *aurb = async_alloc(dev, p);
struct usb_redir_bulk_packet_header bulk_packet;
DPRINTF("bulk-out ep %02X len %zd id %u\n", ep,
p->iov.size, aurb->packet_id);
bulk_packet.endpoint = ep;
bulk_packet.length = p->iov.size;
bulk_packet.stream_id = 0;
aurb->bulk_packet = bulk_packet;
if (ep & USB_DIR_IN) {
usbredirparser_send_bulk_packet(dev->parser, aurb->packet_id,
&bulk_packet, NULL, 0);
} else {
uint8_t buf[p->iov.size];
usb_packet_copy(p, buf, p->iov.size);
usbredir_log_data(dev, "bulk data out:", buf, p->iov.size);
usbredirparser_send_bulk_packet(dev->parser, aurb->packet_id,
&bulk_packet, buf, p->iov.size);
}
usbredirparser_do_write(dev->parser);
return USB_RET_ASYNC;
}
| false | qemu | 104981d52b63dc3d68f39d4442881c667f44bbb9 |
26,714 | static int decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
C93DecoderContext * const c93 = avctx->priv_data;
AVFrame * const newpic = &c93->pictures[c93->currentpic];
AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
AVFrame *picture = data;
uint8_t *out;
int stride, i, x, y, bt = 0;
c93->currentpic ^= 1;
newpic->reference = 1;
newpic->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
if (avctx->reget_buffer(avctx, newpic)) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
}
stride = newpic->linesize[0];
if (buf[0] & C93_FIRST_FRAME) {
newpic->pict_type = AV_PICTURE_TYPE_I;
newpic->key_frame = 1;
} else {
newpic->pict_type = AV_PICTURE_TYPE_P;
newpic->key_frame = 0;
}
if (*buf++ & C93_HAS_PALETTE) {
uint32_t *palette = (uint32_t *) newpic->data[1];
const uint8_t *palbuf = buf + buf_size - 768 - 1;
for (i = 0; i < 256; i++) {
palette[i] = bytestream_get_be24(&palbuf);
}
} else {
if (oldpic->data[1])
memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
}
for (y = 0; y < HEIGHT; y += 8) {
out = newpic->data[0] + y * stride;
for (x = 0; x < WIDTH; x += 8) {
uint8_t *copy_from = oldpic->data[0];
unsigned int offset, j;
uint8_t cols[4], grps[4];
C93BlockType block_type;
if (!bt)
bt = *buf++;
block_type= bt & 0x0F;
switch (block_type) {
case C93_8X8_FROM_PREV:
offset = bytestream_get_le16(&buf);
if (copy_block(avctx, out, copy_from, offset, 8, stride))
return -1;
break;
case C93_4X4_FROM_CURR:
copy_from = newpic->data[0];
case C93_4X4_FROM_PREV:
for (j = 0; j < 8; j += 4) {
for (i = 0; i < 8; i += 4) {
offset = bytestream_get_le16(&buf);
if (copy_block(avctx, &out[j*stride+i],
copy_from, offset, 4, stride))
return -1;
}
}
break;
case C93_8X8_2COLOR:
bytestream_get_buffer(&buf, cols, 2);
for (i = 0; i < 8; i++) {
draw_n_color(out + i*stride, stride, 8, 1, 1, cols,
NULL, *buf++);
}
break;
case C93_4X4_2COLOR:
case C93_4X4_4COLOR:
case C93_4X4_4COLOR_GRP:
for (j = 0; j < 8; j += 4) {
for (i = 0; i < 8; i += 4) {
if (block_type == C93_4X4_2COLOR) {
bytestream_get_buffer(&buf, cols, 2);
draw_n_color(out + i + j*stride, stride, 4, 4,
1, cols, NULL, bytestream_get_le16(&buf));
} else if (block_type == C93_4X4_4COLOR) {
bytestream_get_buffer(&buf, cols, 4);
draw_n_color(out + i + j*stride, stride, 4, 4,
2, cols, NULL, bytestream_get_le32(&buf));
} else {
bytestream_get_buffer(&buf, grps, 4);
draw_n_color(out + i + j*stride, stride, 4, 4,
1, cols, grps, bytestream_get_le16(&buf));
}
}
}
break;
case C93_NOOP:
break;
case C93_8X8_INTRA:
for (j = 0; j < 8; j++)
bytestream_get_buffer(&buf, out + j*stride, 8);
break;
default:
av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n",
block_type, x, y);
return -1;
}
bt >>= 4;
out += 8;
}
}
*picture = *newpic;
*data_size = sizeof(AVFrame);
return buf_size;
}
| true | FFmpeg | 85aded741e03b17b0cc5c588b1f5acbcb25d7996 |
26,716 | static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
{
HQDN3DContext *hqdn3d = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out;
int direct, c;
if (in->perms & AV_PERM_WRITE) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
out->video->w = outlink->w;
out->video->h = outlink->h;
}
for (c = 0; c < 3; c++) {
denoise(hqdn3d, in->data[c], out->data[c],
hqdn3d->line, &hqdn3d->frame_prev[c],
in->video->w >> (!!c * hqdn3d->hsub),
in->video->h >> (!!c * hqdn3d->vsub),
in->linesize[c], out->linesize[c],
hqdn3d->coefs[c?2:0], hqdn3d->coefs[c?3:1]);
}
if (!direct)
avfilter_unref_bufferp(&in);
return ff_filter_frame(outlink, out);
}
| true | FFmpeg | 9e1914dfbafb59b424a7c06cfdd324a85c33ef44 |
26,717 | void *qemu_blockalign(BlockDriverState *bs, size_t size)
{
return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
}
| true | qemu | 339064d5063924e5176842abbf6c8089f3479c5b |
26,718 | static int ehci_fill_queue(EHCIPacket *p)
{
USBEndpoint *ep = p->packet.ep;
EHCIQueue *q = p->queue;
EHCIqtd qtd = p->qtd;
uint32_t qtdaddr;
for (;;) {
if (NLPTR_TBIT(qtd.next) != 0) {
qtdaddr = qtd.next;
/*
* Detect circular td lists, Windows creates these, counting on the
* active bit going low after execution to make the queue stop.
*/
QTAILQ_FOREACH(p, &q->packets, next) {
if (p->qtdaddr == qtdaddr) {
goto leave;
if (get_dwords(q->ehci, NLPTR_GET(qtdaddr),
(uint32_t *) &qtd, sizeof(EHCIqtd) >> 2) < 0) {
return -1;
ehci_trace_qtd(q, NLPTR_GET(qtdaddr), &qtd);
if (!(qtd.token & QTD_TOKEN_ACTIVE)) {
p = ehci_alloc_packet(q);
p->qtdaddr = qtdaddr;
p->qtd = qtd;
if (ehci_execute(p, "queue") == -1) {
return -1;
assert(p->packet.status == USB_RET_ASYNC);
p->async = EHCI_ASYNC_INFLIGHT;
leave:
usb_device_flush_ep_queue(ep->dev, ep);
return 1;
| true | qemu | bbbc39ccacf66ef58261c155f9eed503947c3023 |
26,719 | void bdrv_close(BlockDriverState *bs)
{
if (bs->drv) {
if (bs->backing_hd)
bdrv_delete(bs->backing_hd);
bs->drv->bdrv_close(bs);
qemu_free(bs->opaque);
#ifdef _WIN32
if (bs->is_temporary) {
unlink(bs->filename);
}
#endif
bs->opaque = NULL;
bs->drv = NULL;
/* call the change callback */
bs->total_sectors = 0;
bs->media_changed = 1;
if (bs->change_cb)
bs->change_cb(bs->change_opaque);
}
}
| true | qemu | b5eff355460643d09e533024360fe0522f368c07 |
26,720 | static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref,
uint8_t *decoded, int stride, int level,
int threshold, int lambda, int intra)
{
int count, y, x, i, j, split, best_mean, best_score, best_count;
int best_vector[6];
int block_sum[7] = { 0, 0, 0, 0, 0, 0 };
int w = 2 << (level + 2 >> 1);
int h = 2 << (level + 1 >> 1);
int size = w * h;
int16_t (*block)[256] = s->encoded_block_levels[level];
const int8_t *codebook_sum, *codebook;
const uint16_t(*mean_vlc)[2];
const uint8_t(*multistage_vlc)[2];
best_score = 0;
// FIXME: Optimize, this does not need to be done multiple times.
if (intra) {
codebook_sum = svq1_intra_codebook_sum[level];
codebook = ff_svq1_intra_codebooks[level];
mean_vlc = ff_svq1_intra_mean_vlc;
multistage_vlc = ff_svq1_intra_multistage_vlc[level];
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
int v = src[x + y * stride];
block[0][x + w * y] = v;
best_score += v * v;
block_sum[0] += v;
}
}
} else {
codebook_sum = svq1_inter_codebook_sum[level];
codebook = ff_svq1_inter_codebooks[level];
mean_vlc = ff_svq1_inter_mean_vlc + 256;
multistage_vlc = ff_svq1_inter_multistage_vlc[level];
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
int v = src[x + y * stride] - ref[x + y * stride];
block[0][x + w * y] = v;
best_score += v * v;
block_sum[0] += v;
}
}
}
best_count = 0;
best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3));
best_mean = block_sum[0] + (size >> 1) >> (level + 3);
if (level < 4) {
for (count = 1; count < 7; count++) {
int best_vector_score = INT_MAX;
int best_vector_sum = -999, best_vector_mean = -999;
const int stage = count - 1;
const int8_t *vector;
for (i = 0; i < 16; i++) {
int sum = codebook_sum[stage * 16 + i];
int sqr, diff, score;
vector = codebook + stage * size * 16 + i * size;
sqr = s->ssd_int8_vs_int16(vector, block[stage], size);
diff = block_sum[stage] - sum;
score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64bit slooow
if (score < best_vector_score) {
int mean = diff + (size >> 1) >> (level + 3);
av_assert2(mean > -300 && mean < 300);
mean = av_clip(mean, intra ? 0 : -256, 255);
best_vector_score = score;
best_vector[stage] = i;
best_vector_sum = sum;
best_vector_mean = mean;
}
}
av_assert0(best_vector_mean != -999);
vector = codebook + stage * size * 16 + best_vector[stage] * size;
for (j = 0; j < size; j++)
block[stage + 1][j] = block[stage][j] - vector[j];
block_sum[stage + 1] = block_sum[stage] - best_vector_sum;
best_vector_score += lambda *
(+1 + 4 * count +
multistage_vlc[1 + count][1]
+ mean_vlc[best_vector_mean][1]);
if (best_vector_score < best_score) {
best_score = best_vector_score;
best_count = count;
best_mean = best_vector_mean;
}
}
}
split = 0;
if (best_score > threshold && level) {
int score = 0;
int offset = level & 1 ? stride * h / 2 : w / 2;
PutBitContext backup[6];
for (i = level - 1; i >= 0; i--)
backup[i] = s->reorder_pb[i];
score += encode_block(s, src, ref, decoded, stride, level - 1,
threshold >> 1, lambda, intra);
score += encode_block(s, src + offset, ref + offset, decoded + offset,
stride, level - 1, threshold >> 1, lambda, intra);
score += lambda;
if (score < best_score) {
best_score = score;
split = 1;
} else {
for (i = level - 1; i >= 0; i--)
s->reorder_pb[i] = backup[i];
}
}
if (level > 0)
put_bits(&s->reorder_pb[level], 1, split);
if (!split) {
av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
av_assert1(best_mean >= -256 && best_mean < 256);
av_assert1(best_count >= 0 && best_count < 7);
av_assert1(level < 4 || best_count == 0);
/* output the encoding */
put_bits(&s->reorder_pb[level],
multistage_vlc[1 + best_count][1],
multistage_vlc[1 + best_count][0]);
put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
mean_vlc[best_mean][0]);
for (i = 0; i < best_count; i++) {
av_assert2(best_vector[i] >= 0 && best_vector[i] < 16);
put_bits(&s->reorder_pb[level], 4, best_vector[i]);
}
for (y = 0; y < h; y++)
for (x = 0; x < w; x++)
decoded[x + y * stride] = src[x + y * stride] -
block[best_count][x + w * y] +
best_mean;
}
return best_score;
}
| true | FFmpeg | 9079e99d2c462ec7ef2e89d9e77ee6c3553dacce |
26,721 | void bdrv_attach_aio_context(BlockDriverState *bs,
AioContext *new_context)
{
BdrvAioNotifier *ban;
BdrvChild *child;
if (!bs->drv) {
return;
}
bs->aio_context = new_context;
QLIST_FOREACH(child, &bs->children, next) {
bdrv_attach_aio_context(child->bs, new_context);
}
if (bs->drv->bdrv_attach_aio_context) {
bs->drv->bdrv_attach_aio_context(bs, new_context);
}
QLIST_FOREACH(ban, &bs->aio_notifiers, list) {
ban->attached_aio_context(new_context, ban->opaque);
}
}
| true | qemu | e8a095dadb70e2ea6d5169d261920db3747bfa45 |
26,722 | static inline CopyRet copy_frame(AVCodecContext *avctx,
BC_DTS_PROC_OUT *output,
void *data, int *data_size)
{
BC_STATUS ret;
BC_DTS_STATUS decoder_status = { 0, };
uint8_t trust_interlaced;
uint8_t interlaced;
CHDContext *priv = avctx->priv_data;
int64_t pkt_pts = AV_NOPTS_VALUE;
uint8_t pic_type = 0;
uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
VDEC_FLAG_BOTTOMFIELD;
uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
int width = output->PicInfo.width;
int height = output->PicInfo.height;
int bwidth;
uint8_t *src = output->Ybuff;
int sStride;
uint8_t *dst;
int dStride;
if (output->PicInfo.timeStamp != 0) {
OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
if (node) {
pkt_pts = node->reordered_opaque;
pic_type = node->pic_type;
av_free(node);
} else {
/*
* We will encounter a situation where a timestamp cannot be
* popped if a second field is being returned. In this case,
* each field has the same timestamp and the first one will
* cause it to be popped. To keep subsequent calculations
* simple, pic_type should be set a FIELD value - doesn't
* matter which, but I chose BOTTOM.
*/
pic_type = PICT_BOTTOM_FIELD;
}
av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
output->PicInfo.timeStamp);
av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
pic_type);
}
ret = DtsGetDriverStatus(priv->dev, &decoder_status);
if (ret != BC_STS_SUCCESS) {
av_log(avctx, AV_LOG_ERROR,
"CrystalHD: GetDriverStatus failed: %u\n", ret);
return RET_ERROR;
}
/*
* For most content, we can trust the interlaced flag returned
* by the hardware, but sometimes we can't. These are the
* conditions under which we can trust the flag:
*
* 1) It's not h.264 content
* 2) The UNKNOWN_SRC flag is not set
* 3) We know we're expecting a second field
* 4) The hardware reports this picture and the next picture
* have the same picture number.
*
* Note that there can still be interlaced content that will
* fail this check, if the hardware hasn't decoded the next
* picture or if there is a corruption in the stream. (In either
* case a 0 will be returned for the next picture number)
*/
trust_interlaced = avctx->codec->id != CODEC_ID_H264 ||
!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
priv->need_second_field ||
(decoder_status.picNumFlags & ~0x40000000) ==
output->PicInfo.picture_number;
/*
* If we got a false negative for trust_interlaced on the first field,
* we will realise our mistake here when we see that the picture number is that
* of the previous picture. We cannot recover the frame and should discard the
* second field to keep the correct number of output frames.
*/
if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
av_log(avctx, AV_LOG_WARNING,
"Incorrectly guessed progressive frame. Discarding second field\n");
/* Returning without providing a picture. */
return RET_OK;
}
interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
trust_interlaced;
if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
av_log(avctx, AV_LOG_VERBOSE,
"Next picture number unknown. Assuming progressive frame.\n");
}
av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
interlaced, trust_interlaced);
if (priv->pic.data[0] && !priv->need_second_field)
avctx->release_buffer(avctx, &priv->pic);
priv->need_second_field = interlaced && !priv->need_second_field;
priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
FF_BUFFER_HINTS_REUSABLE;
if (!priv->pic.data[0]) {
if (avctx->get_buffer(avctx, &priv->pic) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return RET_ERROR;
}
}
bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
if (priv->is_70012) {
int pStride;
if (width <= 720)
pStride = 720;
else if (width <= 1280)
pStride = 1280;
else pStride = 1920;
sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
} else {
sStride = bwidth;
}
dStride = priv->pic.linesize[0];
dst = priv->pic.data[0];
av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
if (interlaced) {
int dY = 0;
int sY = 0;
height /= 2;
if (bottom_field) {
av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
dY = 1;
} else {
av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
dY = 0;
}
for (sY = 0; sY < height; dY++, sY++) {
memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
dY++;
}
} else {
av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
}
priv->pic.interlaced_frame = interlaced;
if (interlaced)
priv->pic.top_field_first = !bottom_first;
priv->pic.pkt_pts = pkt_pts;
if (!priv->need_second_field) {
*data_size = sizeof(AVFrame);
*(AVFrame *)data = priv->pic;
}
/*
* Two types of PAFF content have been observed. One form causes the
* hardware to return a field pair and the other individual fields,
* even though the input is always individual fields. We must skip
* copying on the next decode() call to maintain pipeline length in
* the first case.
*/
if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
(pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
return RET_SKIP_NEXT_COPY;
}
/*
* Testing has shown that in all cases where we don't want to return the
* full frame immediately, VDEC_FLAG_UNKNOWN_SRC is set.
*/
return priv->need_second_field &&
!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ?
RET_COPY_NEXT_FIELD : RET_OK;
}
| false | FFmpeg | d1ac8e10340f30b6989cfd64ed1f91dae5a54e2d |
26,723 | static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
hwaddr pa = offsetof(VRingAvail, flags);
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
| true | qemu | e0e2d644096c79a71099b176d08f465f6803a8b1 |
26,724 | static void ppc_spapr_init(MachineState *machine)
{
ram_addr_t ram_size = machine->ram_size;
const char *cpu_model = machine->cpu_model;
const char *kernel_filename = machine->kernel_filename;
const char *kernel_cmdline = machine->kernel_cmdline;
const char *initrd_filename = machine->initrd_filename;
const char *boot_device = machine->boot_order;
PowerPCCPU *cpu;
CPUPPCState *env;
PCIHostState *phb;
int i;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
MemoryRegion *rma_region;
void *rma = NULL;
hwaddr rma_alloc_size;
hwaddr node0_size = spapr_node0_size();
uint32_t initrd_base = 0;
long kernel_size = 0, initrd_size = 0;
long load_limit, rtas_limit, fw_size;
bool kernel_le = false;
char *filename;
msi_supported = true;
spapr = g_malloc0(sizeof(*spapr));
QLIST_INIT(&spapr->phbs);
cpu_ppc_hypercall = emulate_spapr_hypercall;
/* Allocate RMA if necessary */
rma_alloc_size = kvmppc_alloc_rma(&rma);
if (rma_alloc_size == -1) {
hw_error("qemu: Unable to create RMA\n");
exit(1);
}
if (rma_alloc_size && (rma_alloc_size < node0_size)) {
spapr->rma_size = rma_alloc_size;
} else {
spapr->rma_size = node0_size;
/* With KVM, we don't actually know whether KVM supports an
* unbounded RMA (PR KVM) or is limited by the hash table size
* (HV KVM using VRMA), so we always assume the latter
*
* In that case, we also limit the initial allocations for RTAS
* etc... to 256M since we have no way to know what the VRMA size
* is going to be as it depends on the size of the hash table
* isn't determined yet.
*/
if (kvm_enabled()) {
spapr->vrma_adjust = 1;
spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
}
}
if (spapr->rma_size > node0_size) {
fprintf(stderr, "Error: Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")\n",
spapr->rma_size);
exit(1);
}
/* We place the device tree and RTAS just below either the top of the RMA,
* or just below 2GB, whichever is lowere, so that it can be
* processed with 32-bit real mode code if necessary */
rtas_limit = MIN(spapr->rma_size, 0x80000000);
spapr->rtas_addr = rtas_limit - RTAS_MAX_SIZE;
spapr->fdt_addr = spapr->rtas_addr - FDT_MAX_SIZE;
load_limit = spapr->fdt_addr - FW_OVERHEAD;
/* We aim for a hash table of size 1/128 the size of RAM. The
* normal rule of thumb is 1/64 the size of RAM, but that's much
* more than needed for the Linux guests we support. */
spapr->htab_shift = 18; /* Minimum architected size */
while (spapr->htab_shift <= 46) {
if ((1ULL << (spapr->htab_shift + 7)) >= ram_size) {
break;
}
spapr->htab_shift++;
}
/* Set up Interrupt Controller before we create the VCPUs */
spapr->icp = xics_system_init(smp_cpus * kvmppc_smt_threads() / smp_threads,
XICS_IRQS);
/* init CPUs */
if (cpu_model == NULL) {
cpu_model = kvm_enabled() ? "host" : "POWER7";
}
for (i = 0; i < smp_cpus; i++) {
cpu = cpu_ppc_init(cpu_model);
if (cpu == NULL) {
fprintf(stderr, "Unable to find PowerPC CPU definition\n");
exit(1);
}
env = &cpu->env;
/* Set time-base frequency to 512 MHz */
cpu_ppc_tb_init(env, TIMEBASE_FREQ);
/* PAPR always has exception vectors in RAM not ROM. To ensure this,
* MSR[IP] should never be set.
*/
env->msr_mask &= ~(1 << 6);
/* Tell KVM that we're in PAPR mode */
if (kvm_enabled()) {
kvmppc_set_papr(cpu);
}
if (cpu->max_compat) {
if (ppc_set_compat(cpu, cpu->max_compat) < 0) {
exit(1);
}
}
xics_cpu_setup(spapr->icp, cpu);
qemu_register_reset(spapr_cpu_reset, cpu);
}
/* allocate RAM */
spapr->ram_limit = ram_size;
memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
spapr->ram_limit);
memory_region_add_subregion(sysmem, 0, ram);
if (rma_alloc_size && rma) {
rma_region = g_new(MemoryRegion, 1);
memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma",
rma_alloc_size, rma);
vmstate_register_ram_global(rma_region);
memory_region_add_subregion(sysmem, 0, rma_region);
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
spapr->rtas_size = load_image_targphys(filename, spapr->rtas_addr,
rtas_limit - spapr->rtas_addr);
if (spapr->rtas_size < 0) {
hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
exit(1);
}
if (spapr->rtas_size > RTAS_MAX_SIZE) {
hw_error("RTAS too big ! 0x%lx bytes (max is 0x%x)\n",
spapr->rtas_size, RTAS_MAX_SIZE);
exit(1);
}
g_free(filename);
/* Set up EPOW events infrastructure */
spapr_events_init(spapr);
/* Set up VIO bus */
spapr->vio_bus = spapr_vio_bus_init();
for (i = 0; i < MAX_SERIAL_PORTS; i++) {
if (serial_hds[i]) {
spapr_vty_create(spapr->vio_bus, serial_hds[i]);
}
}
/* We always have at least the nvram device on VIO */
spapr_create_nvram(spapr);
/* Set up PCI */
spapr_pci_msi_init(spapr, SPAPR_PCI_MSI_WINDOW);
spapr_pci_rtas_init();
phb = spapr_create_phb(spapr, 0);
for (i = 0; i < nb_nics; i++) {
NICInfo *nd = &nd_table[i];
if (!nd->model) {
nd->model = g_strdup("ibmveth");
}
if (strcmp(nd->model, "ibmveth") == 0) {
spapr_vlan_create(spapr->vio_bus, nd);
} else {
pci_nic_init_nofail(&nd_table[i], phb->bus, nd->model, NULL);
}
}
for (i = 0; i <= drive_get_max_bus(IF_SCSI); i++) {
spapr_vscsi_create(spapr->vio_bus);
}
/* Graphics */
if (spapr_vga_init(phb->bus)) {
spapr->has_graphics = true;
}
if (usb_enabled(spapr->has_graphics)) {
pci_create_simple(phb->bus, -1, "pci-ohci");
if (spapr->has_graphics) {
usbdevice_create("keyboard");
usbdevice_create("mouse");
}
}
if (spapr->rma_size < (MIN_RMA_SLOF << 20)) {
fprintf(stderr, "qemu: pSeries SLOF firmware requires >= "
"%ldM guest RMA (Real Mode Area memory)\n", MIN_RMA_SLOF);
exit(1);
}
if (kernel_filename) {
uint64_t lowaddr = 0;
kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL,
NULL, &lowaddr, NULL, 1, ELF_MACHINE, 0);
if (kernel_size == ELF_LOAD_WRONG_ENDIAN) {
kernel_size = load_elf(kernel_filename,
translate_kernel_address, NULL,
NULL, &lowaddr, NULL, 0, ELF_MACHINE, 0);
kernel_le = kernel_size > 0;
}
if (kernel_size < 0) {
fprintf(stderr, "qemu: error loading %s: %s\n",
kernel_filename, load_elf_strerror(kernel_size));
exit(1);
}
/* load initrd */
if (initrd_filename) {
/* Try to locate the initrd in the gap between the kernel
* and the firmware. Add a bit of space just in case
*/
initrd_base = (KERNEL_LOAD_ADDR + kernel_size + 0x1ffff) & ~0xffff;
initrd_size = load_image_targphys(initrd_filename, initrd_base,
load_limit - initrd_base);
if (initrd_size < 0) {
fprintf(stderr, "qemu: could not load initial ram disk '%s'\n",
initrd_filename);
exit(1);
}
} else {
initrd_base = 0;
initrd_size = 0;
}
}
if (bios_name == NULL) {
bios_name = FW_FILE_NAME;
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
if (fw_size < 0) {
hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
exit(1);
}
g_free(filename);
spapr->entry_point = 0x100;
vmstate_register(NULL, 0, &vmstate_spapr, spapr);
register_savevm_live(NULL, "spapr/htab", -1, 1,
&savevm_htab_handlers, spapr);
/* Prepare the device tree */
spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size,
kernel_size, kernel_le,
boot_device, kernel_cmdline,
spapr->epow_irq);
assert(spapr->fdt_skel != NULL);
}
| true | qemu | b7d1f77adaab790d20232df261d4e2ff6a77f556 |
26,729 | static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
TTAContext *c = s->priv_data;
AVStream *st;
int i, channels, bps, samplerate, datalen, framelen, start;
start = url_ftell(&s->pb);
if (get_le32(&s->pb) != ff_get_fourcc("TTA1"))
return -1; // not tta file
url_fskip(&s->pb, 2); // FIXME: flags
channels = get_le16(&s->pb);
bps = get_le16(&s->pb);
samplerate = get_le32(&s->pb);
datalen = get_le32(&s->pb);
url_fskip(&s->pb, 4); // header crc
framelen = 1.04489795918367346939 * samplerate;
c->totalframes = datalen / framelen + ((datalen % framelen) ? 1 : 0);
c->currentframe = 0;
c->seektable = av_mallocz(sizeof(uint32_t)*c->totalframes);
if (!c->seektable)
return AVERROR_NOMEM;
for (i = 0; i < c->totalframes; i++)
c->seektable[i] = get_le32(&s->pb);
url_fskip(&s->pb, 4); // seektable crc
st = av_new_stream(s, 0);
// av_set_pts_info(st, 32, 1, 1000);
if (!st)
return AVERROR_NOMEM;
st->codec->codec_type = CODEC_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_TTA;
st->codec->channels = channels;
st->codec->sample_rate = samplerate;
st->codec->bits_per_sample = bps;
st->codec->extradata_size = url_ftell(&s->pb) - start;
if(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
//this check is redundant as get_buffer should fail
av_log(s, AV_LOG_ERROR, "extradata_size too large\n");
st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
url_fseek(&s->pb, start, SEEK_SET); // or SEEK_CUR and -size ? :)
get_buffer(&s->pb, st->codec->extradata, st->codec->extradata_size);
return 0; | true | FFmpeg | a443a2530d00b7019269202ac0f5ca8ba0a021c7 |
26,731 | void kvm_flush_coalesced_mmio_buffer(void)
{
#ifdef KVM_CAP_COALESCED_MMIO
KVMState *s = kvm_state;
if (s->coalesced_mmio_ring) {
struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
while (ring->first != ring->last) {
struct kvm_coalesced_mmio *ent;
ent = &ring->coalesced_mmio[ring->first];
cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
smp_wmb();
ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
}
}
#endif
}
| true | qemu | 94a8d39afd8ccfdbf578af04c3385fdb5f545af1 |
26,732 | void in_asm_used_var_warning_killer()
{
volatile int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+
bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+temp0+asm_yalpha1+ asm_uvalpha1+
M24A+M24B+M24C+w02 + funnyYCode[0]+ funnyUVCode[0]+b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0];
if(i) i=0;
}
| true | FFmpeg | 28bf81c90d36a55cf76e2be913c5215ebebf61f2 |
26,733 | static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, DCTELEM *block)
{
int i;
int dc = block[0];
const uint8_t *cm;
dc = (17 * dc + 4) >> 3;
dc = (12 * dc + 64) >> 7;
cm = ff_cropTbl + MAX_NEG_CROP + dc;
for(i = 0; i < 8; i++){
dest[0] = cm[dest[0]];
dest[1] = cm[dest[1]];
dest[2] = cm[dest[2]];
dest[3] = cm[dest[3]];
dest += linesize;
}
}
| true | FFmpeg | c23acbaed40101c677dfcfbbfe0d2c230a8e8f44 |
26,735 | static void qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta,
qxl_async_io async)
{
static const int regions[] = {
QXL_RAM_RANGE_INDEX,
QXL_VRAM_RANGE_INDEX,
QXL_VRAM64_RANGE_INDEX,
};
uint64_t guest_start;
uint64_t guest_end;
int pci_region;
pcibus_t pci_start;
pcibus_t pci_end;
intptr_t virt_start;
QXLDevMemSlot memslot;
int i;
guest_start = le64_to_cpu(d->guest_slots[slot_id].slot.mem_start);
guest_end = le64_to_cpu(d->guest_slots[slot_id].slot.mem_end);
trace_qxl_memslot_add_guest(d->id, slot_id, guest_start, guest_end);
PANIC_ON(slot_id >= NUM_MEMSLOTS);
PANIC_ON(guest_start > guest_end);
for (i = 0; i < ARRAY_SIZE(regions); i++) {
pci_region = regions[i];
pci_start = d->pci.io_regions[pci_region].addr;
pci_end = pci_start + d->pci.io_regions[pci_region].size;
/* mapped? */
if (pci_start == -1) {
continue;
}
/* start address in range ? */
if (guest_start < pci_start || guest_start > pci_end) {
continue;
}
/* end address in range ? */
if (guest_end > pci_end) {
continue;
}
/* passed */
break;
}
PANIC_ON(i == ARRAY_SIZE(regions)); /* finished loop without match */
switch (pci_region) {
case QXL_RAM_RANGE_INDEX:
virt_start = (intptr_t)memory_region_get_ram_ptr(&d->vga.vram);
break;
case QXL_VRAM_RANGE_INDEX:
case 4 /* vram 64bit */:
virt_start = (intptr_t)memory_region_get_ram_ptr(&d->vram_bar);
break;
default:
/* should not happen */
abort();
}
memslot.slot_id = slot_id;
memslot.slot_group_id = MEMSLOT_GROUP_GUEST; /* guest group */
memslot.virt_start = virt_start + (guest_start - pci_start);
memslot.virt_end = virt_start + (guest_end - pci_start);
memslot.addr_delta = memslot.virt_start - delta;
memslot.generation = d->rom->slot_generation = 0;
qxl_rom_set_dirty(d);
qemu_spice_add_memslot(&d->ssd, &memslot, async);
d->guest_slots[slot_id].ptr = (void*)memslot.virt_start;
d->guest_slots[slot_id].size = memslot.virt_end - memslot.virt_start;
d->guest_slots[slot_id].delta = delta;
d->guest_slots[slot_id].active = 1;
}
| true | qemu | e954ea2873fd6621d199d4a1a012fc0bc0292924 |
26,736 | static int nbd_negotiate_handle_info(NBDClient *client, uint16_t myflags,
Error **errp)
{
int rc;
char name[NBD_MAX_NAME_SIZE + 1];
NBDExport *exp;
uint16_t requests;
uint16_t request;
uint32_t namelen;
bool sendname = false;
bool blocksize = false;
uint32_t sizes[3];
char buf[sizeof(uint64_t) + sizeof(uint16_t)];
const char *msg;
/* Client sends:
4 bytes: L, name length (can be 0)
L bytes: export name
2 bytes: N, number of requests (can be 0)
N * 2 bytes: N requests
*/
if (client->optlen < sizeof(namelen) + sizeof(requests)) {
msg = "overall request too short";
goto invalid;
}
if (nbd_read(client->ioc, &namelen, sizeof(namelen), errp) < 0) {
return -EIO;
}
be32_to_cpus(&namelen);
client->optlen -= sizeof(namelen);
if (namelen > client->optlen - sizeof(requests) ||
(client->optlen - namelen) % 2)
{
msg = "name length is incorrect";
goto invalid;
}
if (namelen >= sizeof(name)) {
msg = "name too long for qemu";
goto invalid;
}
if (nbd_read(client->ioc, name, namelen, errp) < 0) {
return -EIO;
}
name[namelen] = '\0';
client->optlen -= namelen;
trace_nbd_negotiate_handle_export_name_request(name);
if (nbd_read(client->ioc, &requests, sizeof(requests), errp) < 0) {
return -EIO;
}
be16_to_cpus(&requests);
client->optlen -= sizeof(requests);
trace_nbd_negotiate_handle_info_requests(requests);
if (requests != client->optlen / sizeof(request)) {
msg = "incorrect number of requests for overall length";
goto invalid;
}
while (requests--) {
if (nbd_read(client->ioc, &request, sizeof(request), errp) < 0) {
return -EIO;
}
be16_to_cpus(&request);
client->optlen -= sizeof(request);
trace_nbd_negotiate_handle_info_request(request,
nbd_info_lookup(request));
/* We care about NBD_INFO_NAME and NBD_INFO_BLOCK_SIZE;
* everything else is either a request we don't know or
* something we send regardless of request */
switch (request) {
case NBD_INFO_NAME:
sendname = true;
break;
case NBD_INFO_BLOCK_SIZE:
blocksize = true;
break;
}
}
assert(client->optlen == 0);
exp = nbd_export_find(name);
if (!exp) {
return nbd_negotiate_send_rep_err(client, NBD_REP_ERR_UNKNOWN,
errp, "export '%s' not present",
name);
}
/* Don't bother sending NBD_INFO_NAME unless client requested it */
if (sendname) {
rc = nbd_negotiate_send_info(client, NBD_INFO_NAME, namelen, name,
errp);
if (rc < 0) {
return rc;
}
}
/* Send NBD_INFO_DESCRIPTION only if available, regardless of
* client request */
if (exp->description) {
size_t len = strlen(exp->description);
rc = nbd_negotiate_send_info(client, NBD_INFO_DESCRIPTION,
len, exp->description, errp);
if (rc < 0) {
return rc;
}
}
/* Send NBD_INFO_BLOCK_SIZE always, but tweak the minimum size
* according to whether the client requested it, and according to
* whether this is OPT_INFO or OPT_GO. */
/* minimum - 1 for back-compat, or 512 if client is new enough.
* TODO: consult blk_bs(blk)->bl.request_alignment? */
sizes[0] =
(client->opt == NBD_OPT_INFO || blocksize) ? BDRV_SECTOR_SIZE : 1;
/* preferred - Hard-code to 4096 for now.
* TODO: is blk_bs(blk)->bl.opt_transfer appropriate? */
sizes[1] = 4096;
/* maximum - At most 32M, but smaller as appropriate. */
sizes[2] = MIN(blk_get_max_transfer(exp->blk), NBD_MAX_BUFFER_SIZE);
trace_nbd_negotiate_handle_info_block_size(sizes[0], sizes[1], sizes[2]);
cpu_to_be32s(&sizes[0]);
cpu_to_be32s(&sizes[1]);
cpu_to_be32s(&sizes[2]);
rc = nbd_negotiate_send_info(client, NBD_INFO_BLOCK_SIZE,
sizeof(sizes), sizes, errp);
if (rc < 0) {
return rc;
}
/* Send NBD_INFO_EXPORT always */
trace_nbd_negotiate_new_style_size_flags(exp->size,
exp->nbdflags | myflags);
stq_be_p(buf, exp->size);
stw_be_p(buf + 8, exp->nbdflags | myflags);
rc = nbd_negotiate_send_info(client, NBD_INFO_EXPORT,
sizeof(buf), buf, errp);
if (rc < 0) {
return rc;
}
/* If the client is just asking for NBD_OPT_INFO, but forgot to
* request block sizes, return an error.
* TODO: consult blk_bs(blk)->request_align, and only error if it
* is not 1? */
if (client->opt == NBD_OPT_INFO && !blocksize) {
return nbd_negotiate_send_rep_err(client,
NBD_REP_ERR_BLOCK_SIZE_REQD,
errp,
"request NBD_INFO_BLOCK_SIZE to "
"use this export");
}
/* Final reply */
rc = nbd_negotiate_send_rep(client, NBD_REP_ACK, errp);
if (rc < 0) {
return rc;
}
if (client->opt == NBD_OPT_GO) {
client->exp = exp;
QTAILQ_INSERT_TAIL(&client->exp->clients, client, next);
nbd_export_get(client->exp);
rc = 1;
}
return rc;
invalid:
if (nbd_drop(client->ioc, client->optlen, errp) < 0) {
return -EIO;
}
return nbd_negotiate_send_rep_err(client, NBD_REP_ERR_INVALID,
errp, "%s", msg);
}
| true | qemu | 894e02804c862c6940b43a0a488164655d3fb3f0 |
26,737 | static void qemu_rdma_init_one_block(void *host_addr,
ram_addr_t block_offset, ram_addr_t length, void *opaque)
{
__qemu_rdma_add_block(opaque, host_addr, block_offset, length);
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 |
26,740 | static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
AVFrame *frame, int *got_frame,
AVPacket *avpkt)
{
QSVFrame *out_frame;
mfxFrameSurface1 *insurf;
mfxFrameSurface1 *outsurf;
mfxSyncPoint *sync;
mfxBitstream bs = { { { 0 } } };
int ret;
if (avpkt->size) {
bs.Data = avpkt->data;
bs.DataLength = avpkt->size;
bs.MaxLength = bs.DataLength;
bs.TimeStamp = avpkt->pts;
}
sync = av_mallocz(sizeof(*sync));
if (!sync) {
av_freep(&sync);
return AVERROR(ENOMEM);
}
do {
ret = get_surface(avctx, q, &insurf);
if (ret < 0)
return ret;
ret = MFXVideoDECODE_DecodeFrameAsync(q->session, avpkt->size ? &bs : NULL,
insurf, &outsurf, sync);
if (ret == MFX_WRN_DEVICE_BUSY)
av_usleep(500);
} while (ret == MFX_WRN_DEVICE_BUSY || ret == MFX_ERR_MORE_SURFACE);
if (ret != MFX_ERR_NONE &&
ret != MFX_ERR_MORE_DATA &&
ret != MFX_WRN_VIDEO_PARAM_CHANGED &&
ret != MFX_ERR_MORE_SURFACE) {
av_log(avctx, AV_LOG_ERROR, "Error during QSV decoding.\n");
av_freep(&sync);
return ff_qsv_error(ret);
}
/* make sure we do not enter an infinite loop if the SDK
* did not consume any data and did not return anything */
if (!*sync && !bs.DataOffset) {
av_log(avctx, AV_LOG_WARNING, "A decode call did not consume any data\n");
bs.DataOffset = avpkt->size;
}
if (*sync) {
QSVFrame *out_frame = find_frame(q, outsurf);
if (!out_frame) {
av_log(avctx, AV_LOG_ERROR,
"The returned surface does not correspond to any frame\n");
av_freep(&sync);
return AVERROR_BUG;
}
out_frame->queued = 1;
av_fifo_generic_write(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
av_fifo_generic_write(q->async_fifo, &sync, sizeof(sync), NULL);
} else {
av_freep(&sync);
}
if (!av_fifo_space(q->async_fifo) ||
(!avpkt->size && av_fifo_size(q->async_fifo))) {
AVFrame *src_frame;
av_fifo_generic_read(q->async_fifo, &out_frame, sizeof(out_frame), NULL);
av_fifo_generic_read(q->async_fifo, &sync, sizeof(sync), NULL);
out_frame->queued = 0;
do {
ret = MFXVideoCORE_SyncOperation(q->session, *sync, 1000);
} while (ret == MFX_WRN_IN_EXECUTION);
av_freep(&sync);
src_frame = out_frame->frame;
ret = av_frame_ref(frame, src_frame);
if (ret < 0)
return ret;
outsurf = out_frame->surface;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_pts = outsurf->Data.TimeStamp;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame->pts = outsurf->Data.TimeStamp;
frame->repeat_pict =
outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_TRIPLING ? 4 :
outsurf->Info.PicStruct & MFX_PICSTRUCT_FRAME_DOUBLING ? 2 :
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_REPEATED ? 1 : 0;
frame->top_field_first =
outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
frame->interlaced_frame =
!(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
*got_frame = 1;
}
return bs.DataOffset;
}
| true | FFmpeg | b6f80b16d1a82463a77352b8756e1cdcaa3a33d0 |
26,741 | static int cmp(const void *a, const void *b)
{
const double va = *(const double *)a, vb = *(const double *)b;
return va < vb ? -1 : ( va > vb ? 1 : 0 );
}
| true | FFmpeg | 92e483f8ed70d88d4f64337f65bae212502735d4 |
26,742 | static void usb_ohci_init(OHCIState *ohci, DeviceState *dev,
int num_ports, dma_addr_t localmem_base,
char *masterbus, uint32_t firstport,
AddressSpace *as, Error **errp)
{
Error *err = NULL;
int i;
ohci->as = as;
if (usb_frame_time == 0) {
#ifdef OHCI_TIME_WARP
usb_frame_time = get_ticks_per_sec();
usb_bit_time = muldiv64(1, get_ticks_per_sec(), USB_HZ/1000);
#else
usb_frame_time = muldiv64(1, get_ticks_per_sec(), 1000);
if (get_ticks_per_sec() >= USB_HZ) {
usb_bit_time = muldiv64(1, get_ticks_per_sec(), USB_HZ);
} else {
usb_bit_time = 1;
}
#endif
trace_usb_ohci_init_time(usb_frame_time, usb_bit_time);
}
ohci->num_ports = num_ports;
if (masterbus) {
USBPort *ports[OHCI_MAX_PORTS];
for(i = 0; i < num_ports; i++) {
ports[i] = &ohci->rhport[i].port;
}
usb_register_companion(masterbus, ports, num_ports,
firstport, ohci, &ohci_port_ops,
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL,
&err);
if (err) {
error_propagate(errp, err);
return;
}
} else {
usb_bus_new(&ohci->bus, sizeof(ohci->bus), &ohci_bus_ops, dev);
for (i = 0; i < num_ports; i++) {
usb_register_port(&ohci->bus, &ohci->rhport[i].port,
ohci, i, &ohci_port_ops,
USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL);
}
}
memory_region_init_io(&ohci->mem, OBJECT(dev), &ohci_mem_ops,
ohci, "ohci", 256);
ohci->localmem_base = localmem_base;
ohci->name = object_get_typename(OBJECT(dev));
usb_packet_init(&ohci->usb_packet);
ohci->async_td = 0;
qemu_register_reset(ohci_reset, ohci);
}
| true | qemu | 88dd1b8d0063ff16c54dc19c8b52508a00108f50 |
26,743 | static void read_len_table(uint8_t *dst, GetBitContext *gb){
int i, val, repeat;
for(i=0; i<256;){
repeat= get_bits(gb, 3);
val = get_bits(gb, 5);
if(repeat==0)
repeat= get_bits(gb, 8);
//printf("%d %d\n", val, repeat);
while (repeat--)
dst[i++] = val;
}
}
| true | FFmpeg | e30004fa733ec64b6ff90678098c1f1132d4d603 |
26,744 | static int dca_parse(AVCodecParserContext *s, AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
DCAParseContext *pc1 = s->priv_data;
ParseContext *pc = &pc1->pc;
int next, duration, sample_rate;
if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
next = buf_size;
} else {
next = dca_find_frame_end(pc1, buf, buf_size);
if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
*poutbuf = NULL;
*poutbuf_size = 0;
return buf_size;
}
}
/* read the duration and sample rate from the frame header */
if (!dca_parse_params(buf, buf_size, &duration, &sample_rate, &pc1->framesize)) {
s->duration = duration;
avctx->sample_rate = sample_rate;
} else
s->duration = 0;
*poutbuf = buf;
*poutbuf_size = buf_size;
return next;
}
| true | FFmpeg | 00ae5b401b24592a9f7019baada5b349152ee2fc |
26,747 | void error_setg(Error **errp, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
error_setv(errp, ERROR_CLASS_GENERIC_ERROR, fmt, ap);
va_end(ap);
}
| true | qemu | 1e9b65bb1bad51735cab6c861c29b592dccabf0e |
26,748 | static void patch_pci_windows(PcPciInfo *pci, uint8_t *start, unsigned size)
{
*ACPI_BUILD_PTR(start, size, acpi_pci32_start[0], uint32_t) =
cpu_to_le32(pci->w32.begin);
*ACPI_BUILD_PTR(start, size, acpi_pci32_end[0], uint32_t) =
cpu_to_le32(pci->w32.end - 1);
if (pci->w64.end || pci->w64.begin) {
*ACPI_BUILD_PTR(start, size, acpi_pci64_valid[0], uint8_t) = 1;
*ACPI_BUILD_PTR(start, size, acpi_pci64_start[0], uint64_t) =
cpu_to_le64(pci->w64.begin);
*ACPI_BUILD_PTR(start, size, acpi_pci64_end[0], uint64_t) =
cpu_to_le64(pci->w64.end - 1);
*ACPI_BUILD_PTR(start, size, acpi_pci64_length[0], uint64_t) =
cpu_to_le64(pci->w64.end - pci->w64.begin);
} else {
*ACPI_BUILD_PTR(start, size, acpi_pci64_valid[0], uint8_t) = 0;
}
}
| true | qemu | b4e5a4bffda0d5dd79c87c66f28a5fac87182e30 |
26,750 | static int decode_header(EXRContext *s)
{
int magic_number, version, i, flags, sar = 0;
int layer_match = 0;
s->current_channel_offset = 0;
s->xmin = ~0;
s->xmax = ~0;
s->ymin = ~0;
s->ymax = ~0;
s->xdelta = ~0;
s->ydelta = ~0;
s->channel_offsets[0] = -1;
s->channel_offsets[1] = -1;
s->channel_offsets[2] = -1;
s->channel_offsets[3] = -1;
s->pixel_type = EXR_UNKNOWN;
s->compression = EXR_UNKN;
s->nb_channels = 0;
s->w = 0;
s->h = 0;
s->tile_attr.xSize = -1;
s->tile_attr.ySize = -1;
s->is_tile = 0;
s->is_luma = 0;
if (bytestream2_get_bytes_left(&s->gb) < 10) {
av_log(s->avctx, AV_LOG_ERROR, "Header too short to parse.\n");
return AVERROR_INVALIDDATA;
}
magic_number = bytestream2_get_le32(&s->gb);
if (magic_number != 20000630) {
/* As per documentation of OpenEXR, it is supposed to be
* int 20000630 little-endian */
av_log(s->avctx, AV_LOG_ERROR, "Wrong magic number %d.\n", magic_number);
return AVERROR_INVALIDDATA;
}
version = bytestream2_get_byte(&s->gb);
if (version != 2) {
avpriv_report_missing_feature(s->avctx, "Version %d", version);
return AVERROR_PATCHWELCOME;
}
flags = bytestream2_get_le24(&s->gb);
if (flags == 0x00)
s->is_tile = 0;
else if (flags & 0x02)
s->is_tile = 1;
else{
avpriv_report_missing_feature(s->avctx, "flags %d", flags);
return AVERROR_PATCHWELCOME;
}
// Parse the header
while (bytestream2_get_bytes_left(&s->gb) > 0 && *s->gb.buffer) {
int var_size;
if ((var_size = check_header_variable(s, "channels",
"chlist", 38)) >= 0) {
GetByteContext ch_gb;
if (!var_size)
return AVERROR_INVALIDDATA;
bytestream2_init(&ch_gb, s->gb.buffer, var_size);
while (bytestream2_get_bytes_left(&ch_gb) >= 19) {
EXRChannel *channel;
enum ExrPixelType current_pixel_type;
int channel_index = -1;
int xsub, ysub;
if (strcmp(s->layer, "") != 0) {
if (strncmp(ch_gb.buffer, s->layer, strlen(s->layer)) == 0) {
layer_match = 1;
av_log(s->avctx, AV_LOG_INFO,
"Channel match layer : %s.\n", ch_gb.buffer);
ch_gb.buffer += strlen(s->layer);
if (*ch_gb.buffer == '.')
ch_gb.buffer++; /* skip dot if not given */
} else {
av_log(s->avctx, AV_LOG_INFO,
"Channel doesn't match layer : %s.\n", ch_gb.buffer);
}
} else {
layer_match = 1;
}
if (layer_match) { /* only search channel if the layer match is valid */
if (!strcmp(ch_gb.buffer, "R") ||
!strcmp(ch_gb.buffer, "X") ||
!strcmp(ch_gb.buffer, "U")) {
channel_index = 0;
s->is_luma = 0;
} else if (!strcmp(ch_gb.buffer, "G") ||
!strcmp(ch_gb.buffer, "V")) {
channel_index = 1;
s->is_luma = 0;
} else if (!strcmp(ch_gb.buffer, "Y")) {
channel_index = 1;
s->is_luma = 1;
} else if (!strcmp(ch_gb.buffer, "B") ||
!strcmp(ch_gb.buffer, "Z") ||
!strcmp(ch_gb.buffer, "W")){
channel_index = 2;
s->is_luma = 0;
} else if (!strcmp(ch_gb.buffer, "A")) {
channel_index = 3;
} else {
av_log(s->avctx, AV_LOG_WARNING,
"Unsupported channel %.256s.\n", ch_gb.buffer);
}
}
/* skip until you get a 0 */
while (bytestream2_get_bytes_left(&ch_gb) > 0 &&
bytestream2_get_byte(&ch_gb))
continue;
if (bytestream2_get_bytes_left(&ch_gb) < 4) {
av_log(s->avctx, AV_LOG_ERROR, "Incomplete header.\n");
return AVERROR_INVALIDDATA;
}
current_pixel_type = bytestream2_get_le32(&ch_gb);
if (current_pixel_type >= EXR_UNKNOWN) {
avpriv_report_missing_feature(s->avctx, "Pixel type %d",
current_pixel_type);
return AVERROR_PATCHWELCOME;
}
bytestream2_skip(&ch_gb, 4);
xsub = bytestream2_get_le32(&ch_gb);
ysub = bytestream2_get_le32(&ch_gb);
if (xsub != 1 || ysub != 1) {
avpriv_report_missing_feature(s->avctx,
"Subsampling %dx%d",
xsub, ysub);
return AVERROR_PATCHWELCOME;
}
if (s->channel_offsets[channel_index] == -1){/* channel have not been previously assign */
if (channel_index >= 0) {
if (s->pixel_type != EXR_UNKNOWN &&
s->pixel_type != current_pixel_type) {
av_log(s->avctx, AV_LOG_ERROR,
"RGB channels not of the same depth.\n");
return AVERROR_INVALIDDATA;
}
s->pixel_type = current_pixel_type;
s->channel_offsets[channel_index] = s->current_channel_offset;
}
}
s->channels = av_realloc(s->channels,
++s->nb_channels * sizeof(EXRChannel));
if (!s->channels)
return AVERROR(ENOMEM);
channel = &s->channels[s->nb_channels - 1];
channel->pixel_type = current_pixel_type;
channel->xsub = xsub;
channel->ysub = ysub;
s->current_channel_offset += 1 << current_pixel_type;
}
/* Check if all channels are set with an offset or if the channels
* are causing an overflow */
if (!s->is_luma){/* if we expected to have at least 3 channels */
if (FFMIN3(s->channel_offsets[0],
s->channel_offsets[1],
s->channel_offsets[2]) < 0) {
if (s->channel_offsets[0] < 0)
av_log(s->avctx, AV_LOG_ERROR, "Missing red channel.\n");
if (s->channel_offsets[1] < 0)
av_log(s->avctx, AV_LOG_ERROR, "Missing green channel.\n");
if (s->channel_offsets[2] < 0)
av_log(s->avctx, AV_LOG_ERROR, "Missing blue channel.\n");
return AVERROR_INVALIDDATA;
}
}
// skip one last byte and update main gb
s->gb.buffer = ch_gb.buffer + 1;
continue;
} else if ((var_size = check_header_variable(s, "dataWindow", "box2i",
31)) >= 0) {
if (!var_size)
return AVERROR_INVALIDDATA;
s->xmin = bytestream2_get_le32(&s->gb);
s->ymin = bytestream2_get_le32(&s->gb);
s->xmax = bytestream2_get_le32(&s->gb);
s->ymax = bytestream2_get_le32(&s->gb);
s->xdelta = (s->xmax - s->xmin) + 1;
s->ydelta = (s->ymax - s->ymin) + 1;
continue;
} else if ((var_size = check_header_variable(s, "displayWindow",
"box2i", 34)) >= 0) {
if (!var_size)
return AVERROR_INVALIDDATA;
bytestream2_skip(&s->gb, 8);
s->w = bytestream2_get_le32(&s->gb) + 1;
s->h = bytestream2_get_le32(&s->gb) + 1;
continue;
} else if ((var_size = check_header_variable(s, "lineOrder",
"lineOrder", 25)) >= 0) {
int line_order;
if (!var_size)
return AVERROR_INVALIDDATA;
line_order = bytestream2_get_byte(&s->gb);
av_log(s->avctx, AV_LOG_DEBUG, "line order: %d.\n", line_order);
if (line_order > 2) {
av_log(s->avctx, AV_LOG_ERROR, "Unknown line order.\n");
return AVERROR_INVALIDDATA;
}
continue;
} else if ((var_size = check_header_variable(s, "pixelAspectRatio",
"float", 31)) >= 0) {
if (!var_size)
return AVERROR_INVALIDDATA;
sar = bytestream2_get_le32(&s->gb);
continue;
} else if ((var_size = check_header_variable(s, "compression",
"compression", 29)) >= 0) {
if (!var_size)
return AVERROR_INVALIDDATA;
if (s->compression == EXR_UNKN)
s->compression = bytestream2_get_byte(&s->gb);
else
av_log(s->avctx, AV_LOG_WARNING,
"Found more than one compression attribute.\n");
continue;
} else if ((var_size = check_header_variable(s, "tiles",
"tiledesc", 22)) >= 0) {
char tileLevel;
if (!s->is_tile)
av_log(s->avctx, AV_LOG_WARNING,
"Found tile attribute and scanline flags. Exr will be interpreted as scanline.\n");
s->tile_attr.xSize = bytestream2_get_le32(&s->gb);
s->tile_attr.ySize = bytestream2_get_le32(&s->gb);
tileLevel = bytestream2_get_byte(&s->gb);
s->tile_attr.level_mode = tileLevel & 0x0f;
s->tile_attr.level_round = (tileLevel >> 4) & 0x0f;
if (s->tile_attr.level_mode >= EXR_TILE_LEVEL_UNKNOWN){
avpriv_report_missing_feature(s->avctx, "Tile level mode %d",
s->tile_attr.level_mode);
return AVERROR_PATCHWELCOME;
}
if (s->tile_attr.level_round >= EXR_TILE_ROUND_UNKNOWN) {
avpriv_report_missing_feature(s->avctx, "Tile level round %d",
s->tile_attr.level_round);
return AVERROR_PATCHWELCOME;
}
continue;
}
// Check if there are enough bytes for a header
if (bytestream2_get_bytes_left(&s->gb) <= 9) {
av_log(s->avctx, AV_LOG_ERROR, "Incomplete header\n");
return AVERROR_INVALIDDATA;
}
// Process unknown variables
for (i = 0; i < 2; i++) // value_name and value_type
while (bytestream2_get_byte(&s->gb) != 0);
// Skip variable length
bytestream2_skip(&s->gb, bytestream2_get_le32(&s->gb));
}
ff_set_sar(s->avctx, av_d2q(av_int2float(sar), 255));
if (s->compression == EXR_UNKN) {
av_log(s->avctx, AV_LOG_ERROR, "Missing compression attribute.\n");
return AVERROR_INVALIDDATA;
}
if (s->is_tile) {
if (s->tile_attr.xSize < 1 || s->tile_attr.ySize < 1) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid tile attribute.\n");
return AVERROR_INVALIDDATA;
}
}
if (bytestream2_get_bytes_left(&s->gb) <= 0) {
av_log(s->avctx, AV_LOG_ERROR, "Incomplete frame.\n");
return AVERROR_INVALIDDATA;
}
// aaand we are done
bytestream2_skip(&s->gb, 1);
return 0;
}
| true | FFmpeg | ffdc5d09e498bee8176c9e35df101c01c546a738 |
26,751 | static void encode_block(NellyMoserEncodeContext *s, unsigned char *output, int output_size)
{
PutBitContext pb;
int i, j, band, block, best_idx, power_idx = 0;
float power_val, coeff, coeff_sum;
float pows[NELLY_FILL_LEN];
int bits[NELLY_BUF_LEN], idx_table[NELLY_BANDS];
float cand[NELLY_BANDS];
apply_mdct(s);
init_put_bits(&pb, output, output_size * 8);
i = 0;
for (band = 0; band < NELLY_BANDS; band++) {
coeff_sum = 0;
for (j = 0; j < ff_nelly_band_sizes_table[band]; i++, j++) {
coeff_sum += s->mdct_out[i ] * s->mdct_out[i ]
+ s->mdct_out[i + NELLY_BUF_LEN] * s->mdct_out[i + NELLY_BUF_LEN];
}
cand[band] =
log(FFMAX(1.0, coeff_sum / (ff_nelly_band_sizes_table[band] << 7))) * 1024.0 / M_LN2;
}
if (s->avctx->trellis) {
get_exponent_dynamic(s, cand, idx_table);
} else {
get_exponent_greedy(s, cand, idx_table);
}
i = 0;
for (band = 0; band < NELLY_BANDS; band++) {
if (band) {
power_idx += ff_nelly_delta_table[idx_table[band]];
put_bits(&pb, 5, idx_table[band]);
} else {
power_idx = ff_nelly_init_table[idx_table[0]];
put_bits(&pb, 6, idx_table[0]);
}
power_val = pow_table[power_idx & 0x7FF] / (1 << ((power_idx >> 11) + POW_TABLE_OFFSET));
for (j = 0; j < ff_nelly_band_sizes_table[band]; i++, j++) {
s->mdct_out[i] *= power_val;
s->mdct_out[i + NELLY_BUF_LEN] *= power_val;
pows[i] = power_idx;
}
}
ff_nelly_get_sample_bits(pows, bits);
for (block = 0; block < 2; block++) {
for (i = 0; i < NELLY_FILL_LEN; i++) {
if (bits[i] > 0) {
const float *table = ff_nelly_dequantization_table + (1 << bits[i]) - 1;
coeff = s->mdct_out[block * NELLY_BUF_LEN + i];
best_idx =
quant_lut[av_clip (
coeff * quant_lut_mul[bits[i]] + quant_lut_add[bits[i]],
quant_lut_offset[bits[i]],
quant_lut_offset[bits[i]+1] - 1
)];
if (fabs(coeff - table[best_idx]) > fabs(coeff - table[best_idx + 1]))
best_idx++;
put_bits(&pb, bits[i], best_idx);
}
}
if (!block)
put_bits(&pb, NELLY_HEADER_BITS + NELLY_DETAIL_BITS - put_bits_count(&pb), 0);
}
flush_put_bits(&pb);
memset(put_bits_ptr(&pb), 0, output + output_size - put_bits_ptr(&pb));
}
| true | FFmpeg | 50833c9f7b4e1922197a8955669f8ab3589c8cef |
26,752 | hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
UniCore32CPU *cpu = UNICORE32_CPU(cs);
cpu_abort(CPU(cpu), "%s not supported yet\n", __func__);
return addr;
}
| true | qemu | 0ac241bcf9f9d99a252a352a162f4b13b24732ab |
26,753 | static char *SocketAddress_to_str(const char *prefix, SocketAddress *addr,
bool is_listen, bool is_telnet)
{
switch (addr->type) {
case SOCKET_ADDRESS_KIND_INET:
return g_strdup_printf("%s%s:%s:%s%s", prefix,
is_telnet ? "telnet" : "tcp",
addr->u.inet.data->host,
addr->u.inet.data->port,
is_listen ? ",server" : "");
break;
case SOCKET_ADDRESS_KIND_UNIX:
return g_strdup_printf("%sunix:%s%s", prefix,
addr->u.q_unix.data->path,
is_listen ? ",server" : "");
break;
case SOCKET_ADDRESS_KIND_FD:
return g_strdup_printf("%sfd:%s%s", prefix, addr->u.fd.data->str,
is_listen ? ",server" : "");
break;
default:
abort();
}
} | true | qemu | d2e49aad7259af943b72be761ee5c18e14acd71a |
26,755 | static void dnxhd_decode_dct_block_10(const DNXHDContext *ctx,
RowContext *row, int n)
{
dnxhd_decode_dct_block(ctx, row, n, 6, 8, 4);
}
| true | FFmpeg | b8b8e82ea14016b2cb04b49ecea57f836e6ee7f8 |
26,756 | static void opt_qsquish(const char *arg)
{
video_qsquish = atof(arg);
if (video_qsquish < 0.0 ||
video_qsquish > 99.0) {
fprintf(stderr, "qsquish must be >= 0.0 and <= 99.0\n");
exit(1);
}
}
| false | FFmpeg | 58bba31e3f22bb07645a764602603364b1ec953d |
26,757 | static int create_vorbis_context(vorbis_enc_context *venc,
AVCodecContext *avctx)
{
vorbis_enc_floor *fc;
vorbis_enc_residue *rc;
vorbis_enc_mapping *mc;
int i, book, ret;
venc->channels = avctx->channels;
venc->sample_rate = avctx->sample_rate;
venc->log2_blocksize[0] = venc->log2_blocksize[1] = 11;
venc->ncodebooks = FF_ARRAY_ELEMS(cvectors);
venc->codebooks = av_malloc(sizeof(vorbis_enc_codebook) * venc->ncodebooks);
if (!venc->codebooks)
return AVERROR(ENOMEM);
// codebook 0..14 - floor1 book, values 0..255
// codebook 15 residue masterbook
// codebook 16..29 residue
for (book = 0; book < venc->ncodebooks; book++) {
vorbis_enc_codebook *cb = &venc->codebooks[book];
int vals;
cb->ndimensions = cvectors[book].dim;
cb->nentries = cvectors[book].real_len;
cb->min = cvectors[book].min;
cb->delta = cvectors[book].delta;
cb->lookup = cvectors[book].lookup;
cb->seq_p = 0;
cb->lens = av_malloc_array(cb->nentries, sizeof(uint8_t));
cb->codewords = av_malloc_array(cb->nentries, sizeof(uint32_t));
if (!cb->lens || !cb->codewords)
return AVERROR(ENOMEM);
memcpy(cb->lens, cvectors[book].clens, cvectors[book].len);
memset(cb->lens + cvectors[book].len, 0, cb->nentries - cvectors[book].len);
if (cb->lookup) {
vals = cb_lookup_vals(cb->lookup, cb->ndimensions, cb->nentries);
cb->quantlist = av_malloc_array(vals, sizeof(int));
if (!cb->quantlist)
return AVERROR(ENOMEM);
for (i = 0; i < vals; i++)
cb->quantlist[i] = cvectors[book].quant[i];
} else {
cb->quantlist = NULL;
}
if ((ret = ready_codebook(cb)) < 0)
return ret;
}
venc->nfloors = 1;
venc->floors = av_malloc(sizeof(vorbis_enc_floor) * venc->nfloors);
if (!venc->floors)
return AVERROR(ENOMEM);
// just 1 floor
fc = &venc->floors[0];
fc->partitions = NUM_FLOOR_PARTITIONS;
fc->partition_to_class = av_malloc(sizeof(int) * fc->partitions);
if (!fc->partition_to_class)
return AVERROR(ENOMEM);
fc->nclasses = 0;
for (i = 0; i < fc->partitions; i++) {
static const int a[] = {0, 1, 2, 2, 3, 3, 4, 4};
fc->partition_to_class[i] = a[i];
fc->nclasses = FFMAX(fc->nclasses, fc->partition_to_class[i]);
}
fc->nclasses++;
fc->classes = av_malloc_array(fc->nclasses, sizeof(vorbis_enc_floor_class));
if (!fc->classes)
return AVERROR(ENOMEM);
for (i = 0; i < fc->nclasses; i++) {
vorbis_enc_floor_class * c = &fc->classes[i];
int j, books;
c->dim = floor_classes[i].dim;
c->subclass = floor_classes[i].subclass;
c->masterbook = floor_classes[i].masterbook;
books = (1 << c->subclass);
c->books = av_malloc_array(books, sizeof(int));
if (!c->books)
return AVERROR(ENOMEM);
for (j = 0; j < books; j++)
c->books[j] = floor_classes[i].nbooks[j];
}
fc->multiplier = 2;
fc->rangebits = venc->log2_blocksize[0] - 1;
fc->values = 2;
for (i = 0; i < fc->partitions; i++)
fc->values += fc->classes[fc->partition_to_class[i]].dim;
fc->list = av_malloc_array(fc->values, sizeof(vorbis_floor1_entry));
if (!fc->list)
return AVERROR(ENOMEM);
fc->list[0].x = 0;
fc->list[1].x = 1 << fc->rangebits;
for (i = 2; i < fc->values; i++) {
static const int a[] = {
93, 23,372, 6, 46,186,750, 14, 33, 65,
130,260,556, 3, 10, 18, 28, 39, 55, 79,
111,158,220,312,464,650,850
};
fc->list[i].x = a[i - 2];
}
if (ff_vorbis_ready_floor1_list(avctx, fc->list, fc->values))
return AVERROR_BUG;
venc->nresidues = 1;
venc->residues = av_malloc(sizeof(vorbis_enc_residue) * venc->nresidues);
if (!venc->residues)
return AVERROR(ENOMEM);
// single residue
rc = &venc->residues[0];
rc->type = 2;
rc->begin = 0;
rc->end = 1600;
rc->partition_size = 32;
rc->classifications = 10;
rc->classbook = 15;
rc->books = av_malloc(sizeof(*rc->books) * rc->classifications);
if (!rc->books)
return AVERROR(ENOMEM);
{
static const int8_t a[10][8] = {
{ -1, -1, -1, -1, -1, -1, -1, -1, },
{ -1, -1, 16, -1, -1, -1, -1, -1, },
{ -1, -1, 17, -1, -1, -1, -1, -1, },
{ -1, -1, 18, -1, -1, -1, -1, -1, },
{ -1, -1, 19, -1, -1, -1, -1, -1, },
{ -1, -1, 20, -1, -1, -1, -1, -1, },
{ -1, -1, 21, -1, -1, -1, -1, -1, },
{ 22, 23, -1, -1, -1, -1, -1, -1, },
{ 24, 25, -1, -1, -1, -1, -1, -1, },
{ 26, 27, 28, -1, -1, -1, -1, -1, },
};
memcpy(rc->books, a, sizeof a);
}
if ((ret = ready_residue(rc, venc)) < 0)
return ret;
venc->nmappings = 1;
venc->mappings = av_malloc(sizeof(vorbis_enc_mapping) * venc->nmappings);
if (!venc->mappings)
return AVERROR(ENOMEM);
// single mapping
mc = &venc->mappings[0];
mc->submaps = 1;
mc->mux = av_malloc(sizeof(int) * venc->channels);
if (!mc->mux)
return AVERROR(ENOMEM);
for (i = 0; i < venc->channels; i++)
mc->mux[i] = 0;
mc->floor = av_malloc(sizeof(int) * mc->submaps);
mc->residue = av_malloc(sizeof(int) * mc->submaps);
if (!mc->floor || !mc->residue)
return AVERROR(ENOMEM);
for (i = 0; i < mc->submaps; i++) {
mc->floor[i] = 0;
mc->residue[i] = 0;
}
mc->coupling_steps = venc->channels == 2 ? 1 : 0;
mc->magnitude = av_malloc(sizeof(int) * mc->coupling_steps);
mc->angle = av_malloc(sizeof(int) * mc->coupling_steps);
if (!mc->magnitude || !mc->angle)
return AVERROR(ENOMEM);
if (mc->coupling_steps) {
mc->magnitude[0] = 0;
mc->angle[0] = 1;
}
venc->nmodes = 1;
venc->modes = av_malloc(sizeof(vorbis_enc_mode) * venc->nmodes);
if (!venc->modes)
return AVERROR(ENOMEM);
// single mode
venc->modes[0].blockflag = 0;
venc->modes[0].mapping = 0;
venc->have_saved = 0;
venc->saved = av_malloc_array(sizeof(float) * venc->channels, (1 << venc->log2_blocksize[1]) / 2);
venc->samples = av_malloc_array(sizeof(float) * venc->channels, (1 << venc->log2_blocksize[1]));
venc->floor = av_malloc_array(sizeof(float) * venc->channels, (1 << venc->log2_blocksize[1]) / 2);
venc->coeffs = av_malloc_array(sizeof(float) * venc->channels, (1 << venc->log2_blocksize[1]) / 2);
venc->scratch = av_malloc_array(sizeof(float) * venc->channels, (1 << venc->log2_blocksize[1]) / 2);
if (!venc->saved || !venc->samples || !venc->floor || !venc->coeffs || !venc->scratch)
return AVERROR(ENOMEM);
if ((ret = dsp_init(avctx, venc)) < 0)
return ret;
return 0;
}
| false | FFmpeg | 5a2ad7ede33b5d63c1f1b1313a218da62e1c0d48 |
26,758 | static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
{
MpegEncContext *s= &s1->mpeg_enc_ctx;
s->full_pel[0] = s->full_pel[1] = 0;
s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
if(!s->pict_type && s1->mpeg_enc_ctx_allocated){
av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code, guessing missing values\n");
if(s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1]==15){
if(s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
s->pict_type= FF_I_TYPE;
else
s->pict_type= FF_P_TYPE;
}else
s->pict_type= FF_B_TYPE;
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == FF_I_TYPE;
}
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
s->top_field_first = get_bits1(&s->gb);
s->frame_pred_frame_dct = get_bits1(&s->gb);
s->concealment_motion_vectors = get_bits1(&s->gb);
s->q_scale_type = get_bits1(&s->gb);
s->intra_vlc_format = get_bits1(&s->gb);
s->alternate_scan = get_bits1(&s->gb);
s->repeat_first_field = get_bits1(&s->gb);
s->chroma_420_type = get_bits1(&s->gb);
s->progressive_frame = get_bits1(&s->gb);
if(s->progressive_sequence && !s->progressive_frame){
s->progressive_frame= 1;
av_log(s->avctx, AV_LOG_ERROR, "interlaced frame in progressive sequence, ignoring\n");
}
if(s->picture_structure==0 || (s->progressive_frame && s->picture_structure!=PICT_FRAME)){
av_log(s->avctx, AV_LOG_ERROR, "picture_structure %d invalid, ignoring\n", s->picture_structure);
s->picture_structure= PICT_FRAME;
}
if(s->progressive_frame && !s->frame_pred_frame_dct){
av_log(s->avctx, AV_LOG_ERROR, "invalid frame_pred_frame_dct\n");
s->frame_pred_frame_dct= 1;
}
if(s->picture_structure == PICT_FRAME){
s->first_field=0;
s->v_edge_pos= 16*s->mb_height;
}else{
s->first_field ^= 1;
s->v_edge_pos= 8*s->mb_height;
memset(s->mbskip_table, 0, s->mb_stride*s->mb_height);
}
if(s->alternate_scan){
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
}else{
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
}
/* composite display not parsed */
dprintf(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
dprintf(s->avctx, "picture_structure=%d\n", s->picture_structure);
dprintf(s->avctx, "top field first=%d\n", s->top_field_first);
dprintf(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
dprintf(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
dprintf(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
dprintf(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
dprintf(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
dprintf(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
}
| false | FFmpeg | 7a14430ed75a2eaaa430e46c2f54a7a9a8b71804 |
26,759 | static ssize_t nic_receive(VLANClientState *nc, const uint8_t * buf, size_t size)
{
/* TODO:
* - Magic packets should set bit 30 in power management driver register.
* - Interesting packets should set bit 29 in power management driver register.
*/
EEPRO100State *s = DO_UPCAST(NICState, nc, nc)->opaque;
uint16_t rfd_status = 0xa000;
static const uint8_t broadcast_macaddr[6] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
/* TODO: check multiple IA bit. */
if (s->configuration[20] & BIT(6)) {
missing("Multiple IA bit");
return -1;
}
if (s->configuration[8] & 0x80) {
/* CSMA is disabled. */
logout("%p received while CSMA is disabled\n", s);
return -1;
} else if (size < 64 && (s->configuration[7] & BIT(0))) {
/* Short frame and configuration byte 7/0 (discard short receive) set:
* Short frame is discarded */
logout("%p received short frame (%zu byte)\n", s, size);
s->statistics.rx_short_frame_errors++;
#if 0
return -1;
#endif
} else if ((size > MAX_ETH_FRAME_SIZE + 4) && !(s->configuration[18] & BIT(3))) {
/* Long frame and configuration byte 18/3 (long receive ok) not set:
* Long frames are discarded. */
logout("%p received long frame (%zu byte), ignored\n", s, size);
return -1;
} else if (memcmp(buf, s->conf.macaddr.a, 6) == 0) { /* !!! */
/* Frame matches individual address. */
/* TODO: check configuration byte 15/4 (ignore U/L). */
TRACE(RXTX, logout("%p received frame for me, len=%zu\n", s, size));
} else if (memcmp(buf, broadcast_macaddr, 6) == 0) {
/* Broadcast frame. */
TRACE(RXTX, logout("%p received broadcast, len=%zu\n", s, size));
rfd_status |= 0x0002;
} else if (buf[0] & 0x01) {
/* Multicast frame. */
TRACE(RXTX, logout("%p received multicast, len=%zu,%s\n", s, size, nic_dump(buf, size)));
if (s->configuration[21] & BIT(3)) {
/* Multicast all bit is set, receive all multicast frames. */
} else {
unsigned mcast_idx = compute_mcast_idx(buf);
assert(mcast_idx < 64);
if (s->mult[mcast_idx >> 3] & (1 << (mcast_idx & 7))) {
/* Multicast frame is allowed in hash table. */
} else if (s->configuration[15] & BIT(0)) {
/* Promiscuous: receive all. */
rfd_status |= 0x0004;
} else {
TRACE(RXTX, logout("%p multicast ignored\n", s));
return -1;
}
}
/* TODO: Next not for promiscuous mode? */
rfd_status |= 0x0002;
} else if (s->configuration[15] & BIT(0)) {
/* Promiscuous: receive all. */
TRACE(RXTX, logout("%p received frame in promiscuous mode, len=%zu\n", s, size));
rfd_status |= 0x0004;
} else {
TRACE(RXTX, logout("%p received frame, ignored, len=%zu,%s\n", s, size,
nic_dump(buf, size)));
return size;
}
if (get_ru_state(s) != ru_ready) {
/* No resources available. */
logout("no resources, state=%u\n", get_ru_state(s));
/* TODO: RNR interrupt only at first failed frame? */
eepro100_rnr_interrupt(s);
s->statistics.rx_resource_errors++;
#if 0
assert(!"no resources");
#endif
return -1;
}
/* !!! */
eepro100_rx_t rx;
cpu_physical_memory_read(s->ru_base + s->ru_offset, (uint8_t *) & rx,
offsetof(eepro100_rx_t, packet));
uint16_t rfd_command = le16_to_cpu(rx.command);
uint16_t rfd_size = le16_to_cpu(rx.size);
if (size > rfd_size) {
logout("Receive buffer (%" PRId16 " bytes) too small for data "
"(%zu bytes); data truncated\n", rfd_size, size);
size = rfd_size;
}
if (size < 64) {
rfd_status |= 0x0080;
}
TRACE(OTHER, logout("command 0x%04x, link 0x%08x, addr 0x%08x, size %u\n",
rfd_command, rx.link, rx.rx_buf_addr, rfd_size));
stw_phys(s->ru_base + s->ru_offset + offsetof(eepro100_rx_t, status),
rfd_status);
stw_phys(s->ru_base + s->ru_offset + offsetof(eepro100_rx_t, count), size);
/* Early receive interrupt not supported. */
#if 0
eepro100_er_interrupt(s);
#endif
/* Receive CRC Transfer not supported. */
if (s->configuration[18] & BIT(2)) {
missing("Receive CRC Transfer");
return -1;
}
/* TODO: check stripping enable bit. */
#if 0
assert(!(s->configuration[17] & BIT(0)));
#endif
cpu_physical_memory_write(s->ru_base + s->ru_offset +
offsetof(eepro100_rx_t, packet), buf, size);
s->statistics.rx_good_frames++;
eepro100_fr_interrupt(s);
s->ru_offset = le32_to_cpu(rx.link);
if (rfd_command & COMMAND_EL) {
/* EL bit is set, so this was the last frame. */
logout("receive: Running out of frames\n");
set_ru_state(s, ru_suspended);
}
if (rfd_command & COMMAND_S) {
/* S bit is set. */
set_ru_state(s, ru_suspended);
}
return size;
}
| true | qemu | 010ec6293409f10b88631c36145944b9c3277ce1 |
26,760 | static int mov_write_mdia_tag(AVIOContext *pb, MOVMuxContext *mov,
MOVTrack *track)
{
int64_t pos = avio_tell(pb);
avio_wb32(pb, 0); /* size */
ffio_wfourcc(pb, "mdia");
mov_write_mdhd_tag(pb, mov, track);
mov_write_hdlr_tag(pb, track);
mov_write_minf_tag(pb, track);
return update_size(pb, pos);
}
| true | FFmpeg | da048c6d24729d3bab6ccb0ac340ea129e3e88d5 |