label
int64 0
1
| func1
stringlengths 23
97k
| id
int64 0
27.3k
|
---|---|---|
0 | static int ehci_state_waitlisthead(EHCIState *ehci, int async) { EHCIqh qh; int i = 0; int again = 0; uint32_t entry = ehci->asynclistaddr; /* set reclamation flag at start event (4.8.6) */ if (async) { ehci_set_usbsts(ehci, USBSTS_REC); } ehci_queues_rip_unused(ehci, async, 0); /* Find the head of the list (4.9.1.1) */ for(i = 0; i < MAX_QH; i++) { get_dwords(ehci, NLPTR_GET(entry), (uint32_t *) &qh, sizeof(EHCIqh) >> 2); ehci_trace_qh(NULL, NLPTR_GET(entry), &qh); if (qh.epchar & QH_EPCHAR_H) { if (async) { entry |= (NLPTR_TYPE_QH << 1); } ehci_set_fetch_addr(ehci, async, entry); ehci_set_state(ehci, async, EST_FETCHENTRY); again = 1; goto out; } entry = qh.next; if (entry == ehci->asynclistaddr) { break; } } /* no head found for list. */ ehci_set_state(ehci, async, EST_ACTIVE); out: return again; } | 27,249 |
0 | static void gen_rdhwr(DisasContext *ctx, int rt, int rd) { TCGv t0; #if !defined(CONFIG_USER_ONLY) /* The Linux kernel will emulate rdhwr if it's not supported natively. Therefore only check the ISA in system mode. */ check_insn(ctx, ISA_MIPS32R2); #endif t0 = tcg_temp_new(); switch (rd) { case 0: gen_helper_rdhwr_cpunum(t0, cpu_env); gen_store_gpr(t0, rt); break; case 1: gen_helper_rdhwr_synci_step(t0, cpu_env); gen_store_gpr(t0, rt); break; case 2: gen_helper_rdhwr_cc(t0, cpu_env); gen_store_gpr(t0, rt); break; case 3: gen_helper_rdhwr_ccres(t0, cpu_env); gen_store_gpr(t0, rt); break; case 29: #if defined(CONFIG_USER_ONLY) tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); gen_store_gpr(t0, rt); break; #else if ((ctx->hflags & MIPS_HFLAG_CP0) || (ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) { tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUMIPSState, active_tc.CP0_UserLocal)); gen_store_gpr(t0, rt); } else { generate_exception_end(ctx, EXCP_RI); } break; #endif default: /* Invalid */ MIPS_INVAL("rdhwr"); generate_exception_end(ctx, EXCP_RI); break; } tcg_temp_free(t0); } | 27,250 |
0 | Aml *aml_shiftright(Aml *arg1, Aml *count) { Aml *var = aml_opcode(0x7A /* ShiftRightOp */); aml_append(var, arg1); aml_append(var, count); build_append_byte(var->buf, 0x00); /* NullNameOp */ return var; } | 27,251 |
0 | static ssize_t handle_aiocb_write_zeroes(RawPosixAIOData *aiocb) { int ret = -EOPNOTSUPP; BDRVRawState *s = aiocb->bs->opaque; if (s->has_write_zeroes == 0) { return -ENOTSUP; } if (aiocb->aio_type & QEMU_AIO_BLKDEV) { #ifdef BLKZEROOUT do { uint64_t range[2] = { aiocb->aio_offset, aiocb->aio_nbytes }; if (ioctl(aiocb->aio_fildes, BLKZEROOUT, range) == 0) { return 0; } } while (errno == EINTR); ret = -errno; #endif } else { #ifdef CONFIG_XFS if (s->is_xfs) { return xfs_write_zeroes(s, aiocb->aio_offset, aiocb->aio_nbytes); } #endif } ret = translate_err(ret); if (ret == -ENOTSUP) { s->has_write_zeroes = false; } return ret; } | 27,252 |
0 | int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options, int flags, BlockDriver *drv, Error **errp) { int ret; /* TODO: extra byte is a hack to ensure MAX_PATH space on Windows. */ char tmp_filename[PATH_MAX + 1]; BlockDriverState *file = NULL; QDict *file_options = NULL; const char *drvname; Error *local_err = NULL; /* NULL means an empty set of options */ if (options == NULL) { options = qdict_new(); } bs->options = options; options = qdict_clone_shallow(options); /* For snapshot=on, create a temporary qcow2 overlay */ if (flags & BDRV_O_SNAPSHOT) { BlockDriverState *bs1; int64_t total_size; BlockDriver *bdrv_qcow2; QEMUOptionParameter *create_options; char backing_filename[PATH_MAX]; if (qdict_size(options) != 0) { error_setg(errp, "Can't use snapshot=on with driver-specific options"); ret = -EINVAL; goto fail; } assert(filename != NULL); /* if snapshot, we create a temporary backing file and open it instead of opening 'filename' directly */ /* if there is a backing file, use it */ bs1 = bdrv_new(""); ret = bdrv_open(bs1, filename, NULL, 0, drv, &local_err); if (ret < 0) { bdrv_unref(bs1); goto fail; } total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK; bdrv_unref(bs1); ret = get_tmp_filename(tmp_filename, sizeof(tmp_filename)); if (ret < 0) { error_setg_errno(errp, -ret, "Could not get temporary filename"); goto fail; } /* Real path is meaningless for protocols */ if (path_has_protocol(filename)) { snprintf(backing_filename, sizeof(backing_filename), "%s", filename); } else if (!realpath(filename, backing_filename)) { ret = -errno; error_setg_errno(errp, errno, "Could not resolve path '%s'", filename); goto fail; } bdrv_qcow2 = bdrv_find_format("qcow2"); create_options = parse_option_parameters("", bdrv_qcow2->create_options, NULL); set_option_parameter_int(create_options, BLOCK_OPT_SIZE, total_size); set_option_parameter(create_options, BLOCK_OPT_BACKING_FILE, backing_filename); if (drv) { set_option_parameter(create_options, BLOCK_OPT_BACKING_FMT, drv->format_name); } ret = bdrv_create(bdrv_qcow2, tmp_filename, create_options, &local_err); free_option_parameters(create_options); if (ret < 0) { error_setg_errno(errp, -ret, "Could not create temporary overlay " "'%s': %s", tmp_filename, error_get_pretty(local_err)); error_free(local_err); local_err = NULL; goto fail; } filename = tmp_filename; drv = bdrv_qcow2; bs->is_temporary = 1; } /* Open image file without format layer */ if (flags & BDRV_O_RDWR) { flags |= BDRV_O_ALLOW_RDWR; } qdict_extract_subqdict(options, &file_options, "file."); ret = bdrv_file_open(&file, filename, file_options, bdrv_open_flags(bs, flags | BDRV_O_UNMAP), &local_err); if (ret < 0) { goto fail; } /* Find the right image format driver */ drvname = qdict_get_try_str(options, "driver"); if (drvname) { drv = bdrv_find_format(drvname); qdict_del(options, "driver"); if (!drv) { error_setg(errp, "Invalid driver: '%s'", drvname); ret = -EINVAL; goto unlink_and_fail; } } if (!drv) { ret = find_image_format(file, filename, &drv, &local_err); } if (!drv) { goto unlink_and_fail; } /* Open the image */ ret = bdrv_open_common(bs, file, options, flags, drv, &local_err); if (ret < 0) { goto unlink_and_fail; } if (bs->file != file) { bdrv_unref(file); file = NULL; } /* If there is a backing file, use it */ if ((flags & BDRV_O_NO_BACKING) == 0) { QDict *backing_options; qdict_extract_subqdict(options, &backing_options, "backing."); ret = bdrv_open_backing_file(bs, backing_options, &local_err); if (ret < 0) { goto close_and_fail; } } /* Check if any unknown options were used */ if (qdict_size(options) != 0) { const QDictEntry *entry = qdict_first(options); error_setg(errp, "Block format '%s' used by device '%s' doesn't " "support the option '%s'", drv->format_name, bs->device_name, entry->key); ret = -EINVAL; goto close_and_fail; } QDECREF(options); if (!bdrv_key_required(bs)) { bdrv_dev_change_media_cb(bs, true); } return 0; unlink_and_fail: if (file != NULL) { bdrv_unref(file); } if (bs->is_temporary) { unlink(filename); } fail: QDECREF(bs->options); QDECREF(options); bs->options = NULL; if (error_is_set(&local_err)) { error_propagate(errp, local_err); } return ret; close_and_fail: bdrv_close(bs); QDECREF(options); if (error_is_set(&local_err)) { error_propagate(errp, local_err); } return ret; } | 27,253 |
0 | static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){ int x, y, i; for(i=0; i<3; i++){ p->frame->data[i]= src[i]; p->frame->linesize[i]= src_stride[i]; } p->avctx_enc->me_cmp= p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/; p->frame->quality= p->qp*FF_QP2LAMBDA; avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame); p->frame_dec = p->avctx_enc->coded_frame; for(i=0; i<3; i++){ int is_chroma= !!i; int w= width >>is_chroma; int h= height>>is_chroma; int fils= p->frame_dec->linesize[i]; int srcs= src_stride[i]; for(y=0; y<h; y++){ if((y ^ p->parity) & 1){ for(x=0; x<w; x++){ if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this uint8_t *filp= &p->frame_dec->data[i][x + y*fils]; uint8_t *srcp= &src[i][x + y*srcs]; int diff0= filp[-fils] - srcp[-srcs]; int diff1= filp[+fils] - srcp[+srcs]; int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1]) +ABS(srcp[-srcs ] - srcp[+srcs ]) +ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1; int temp= filp[0]; #define CHECK(j)\ { int score= ABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])\ + ABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])\ + ABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)]);\ if(score < spatial_score){\ spatial_score= score;\ diff0= filp[-fils+(j)] - srcp[-srcs+(j)];\ diff1= filp[+fils-(j)] - srcp[+srcs-(j)]; CHECK(-1) CHECK(-2) }} }} CHECK( 1) CHECK( 2) }} }} | 27,255 |
1 | static void test_qga_config(gconstpointer data) { GError *error = NULL; char *cwd, *cmd, *out, *err, *str, **strv, **argv = NULL; char *env[2]; int status; gsize n; GKeyFile *kf; cwd = g_get_current_dir(); cmd = g_strdup_printf("%s%cqemu-ga -D", cwd, G_DIR_SEPARATOR); g_shell_parse_argv(cmd, NULL, &argv, &error); g_assert_no_error(error); env[0] = g_strdup_printf("QGA_CONF=tests%cdata%ctest-qga-config", G_DIR_SEPARATOR, G_DIR_SEPARATOR); env[1] = NULL; g_spawn_sync(NULL, argv, env, 0, NULL, NULL, &out, &err, &status, &error); g_strfreev(argv); g_assert_no_error(error); g_assert_cmpstr(err, ==, ""); g_assert_cmpint(status, ==, 0); kf = g_key_file_new(); g_key_file_load_from_data(kf, out, -1, G_KEY_FILE_NONE, &error); g_assert_no_error(error); str = g_key_file_get_start_group(kf); g_assert_cmpstr(str, ==, "general"); g_free(str); g_assert_false(g_key_file_get_boolean(kf, "general", "daemon", &error)); g_assert_no_error(error); str = g_key_file_get_string(kf, "general", "method", &error); g_assert_no_error(error); g_assert_cmpstr(str, ==, "virtio-serial"); g_free(str); str = g_key_file_get_string(kf, "general", "path", &error); g_assert_no_error(error); g_assert_cmpstr(str, ==, "/path/to/org.qemu.guest_agent.0"); g_free(str); str = g_key_file_get_string(kf, "general", "pidfile", &error); g_assert_no_error(error); g_assert_cmpstr(str, ==, "/var/foo/qemu-ga.pid"); g_free(str); str = g_key_file_get_string(kf, "general", "statedir", &error); g_assert_no_error(error); g_assert_cmpstr(str, ==, "/var/state"); g_free(str); g_assert_true(g_key_file_get_boolean(kf, "general", "verbose", &error)); g_assert_no_error(error); strv = g_key_file_get_string_list(kf, "general", "blacklist", &n, &error); g_assert_cmpint(n, ==, 2); #if GLIB_CHECK_VERSION(2, 44, 0) g_assert_true(g_strv_contains((const char * const *)strv, "guest-ping")); g_assert_true(g_strv_contains((const char * const *)strv, "guest-get-time")); #endif g_assert_no_error(error); g_strfreev(strv); g_free(out); g_free(err); g_free(env[0]); g_key_file_free(kf); } | 27,258 |
1 | static int apc_read_packet(AVFormatContext *s, AVPacket *pkt) { if (av_get_packet(s->pb, pkt, MAX_READ_SIZE) <= 0) return AVERROR(EIO); pkt->stream_index = 0; return 0; } | 27,260 |
0 | void helper_fitoq(CPUSPARCState *env, int32_t src) { /* No possible exceptions converting int to long double. */ QT0 = int32_to_float128(src, &env->fp_status); } | 27,262 |
0 | int gdbserver_start(const char *device) { GDBState *s; char gdbstub_device_name[128]; CharDriverState *chr = NULL; CharDriverState *mon_chr; if (!device) return -1; if (strcmp(device, "none") != 0) { if (strstart(device, "tcp:", NULL)) { /* enforce required TCP attributes */ snprintf(gdbstub_device_name, sizeof(gdbstub_device_name), "%s,nowait,nodelay,server", device); device = gdbstub_device_name; } #ifndef _WIN32 else if (strcmp(device, "stdio") == 0) { struct sigaction act; memset(&act, 0, sizeof(act)); act.sa_handler = gdb_sigterm_handler; sigaction(SIGINT, &act, NULL); } #endif chr = qemu_chr_new("gdb", device, NULL); if (!chr) return -1; qemu_chr_fe_claim_no_fail(chr); qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive, gdb_chr_event, NULL); } s = gdbserver_state; if (!s) { s = g_malloc0(sizeof(GDBState)); gdbserver_state = s; qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL); /* Initialize a monitor terminal for gdb */ mon_chr = qemu_chr_alloc(); mon_chr->chr_write = gdb_monitor_write; monitor_init(mon_chr, 0); } else { if (s->chr) qemu_chr_delete(s->chr); mon_chr = s->mon_chr; memset(s, 0, sizeof(GDBState)); } s->c_cpu = first_cpu; s->g_cpu = first_cpu; s->chr = chr; s->state = chr ? RS_IDLE : RS_INACTIVE; s->mon_chr = mon_chr; s->current_syscall_cb = NULL; return 0; } | 27,263 |
0 | net_checksum_add_iov(const struct iovec *iov, const unsigned int iov_cnt, uint32_t iov_off, uint32_t size) { size_t iovec_off, buf_off; unsigned int i; uint32_t res = 0; uint32_t seq = 0; iovec_off = 0; buf_off = 0; for (i = 0; i < iov_cnt && size; i++) { if (iov_off < (iovec_off + iov[i].iov_len)) { size_t len = MIN((iovec_off + iov[i].iov_len) - iov_off , size); void *chunk_buf = iov[i].iov_base + (iov_off - iovec_off); res += net_checksum_add_cont(len, chunk_buf, seq); seq += len; buf_off += len; iov_off += len; size -= len; } iovec_off += iov[i].iov_len; } return res; } | 27,265 |
0 | static inline int get_phys_addr(CPUARMState *env, target_ulong address, int access_type, int is_user, hwaddr *phys_ptr, int *prot, target_ulong *page_size) { /* This is not entirely correct as get_phys_addr() can also be called * from ats_write() for an address translation of a specific regime. */ uint32_t sctlr = A32_BANKED_CURRENT_REG_GET(env, sctlr); /* Fast Context Switch Extension. */ if (address < 0x02000000) address += env->cp15.c13_fcse; if ((sctlr & SCTLR_M) == 0) { /* MMU/MPU disabled. */ *phys_ptr = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; *page_size = TARGET_PAGE_SIZE; return 0; } else if (arm_feature(env, ARM_FEATURE_MPU)) { *page_size = TARGET_PAGE_SIZE; return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr, prot); } else if (extended_addresses_enabled(env)) { return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr, prot, page_size); } else if (sctlr & SCTLR_XP) { return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr, prot, page_size); } else { return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr, prot, page_size); } } | 27,267 |
0 | DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi, int *fatal_error) { const char *buf; const char *file = NULL; char devname[128]; const char *serial; const char *mediastr = ""; BlockInterfaceType type; enum { MEDIA_DISK, MEDIA_CDROM } media; int bus_id, unit_id; int cyls, heads, secs, translation; BlockDriver *drv = NULL; int max_devs; int index; int ro = 0; int bdrv_flags = 0; int on_read_error, on_write_error; const char *devaddr; DriveInfo *dinfo; int snapshot = 0; int ret; *fatal_error = 1; translation = BIOS_ATA_TRANSLATION_AUTO; if (default_to_scsi) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; pstrcpy(devname, sizeof(devname), "scsi"); } else { type = IF_IDE; max_devs = MAX_IDE_DEVS; pstrcpy(devname, sizeof(devname), "ide"); } media = MEDIA_DISK; /* extract parameters */ bus_id = qemu_opt_get_number(opts, "bus", 0); unit_id = qemu_opt_get_number(opts, "unit", -1); index = qemu_opt_get_number(opts, "index", -1); cyls = qemu_opt_get_number(opts, "cyls", 0); heads = qemu_opt_get_number(opts, "heads", 0); secs = qemu_opt_get_number(opts, "secs", 0); snapshot = qemu_opt_get_bool(opts, "snapshot", 0); ro = qemu_opt_get_bool(opts, "readonly", 0); file = qemu_opt_get(opts, "file"); serial = qemu_opt_get(opts, "serial"); if ((buf = qemu_opt_get(opts, "if")) != NULL) { pstrcpy(devname, sizeof(devname), buf); if (!strcmp(buf, "ide")) { type = IF_IDE; max_devs = MAX_IDE_DEVS; } else if (!strcmp(buf, "scsi")) { type = IF_SCSI; max_devs = MAX_SCSI_DEVS; } else if (!strcmp(buf, "floppy")) { type = IF_FLOPPY; max_devs = 0; } else if (!strcmp(buf, "pflash")) { type = IF_PFLASH; max_devs = 0; } else if (!strcmp(buf, "mtd")) { type = IF_MTD; max_devs = 0; } else if (!strcmp(buf, "sd")) { type = IF_SD; max_devs = 0; } else if (!strcmp(buf, "virtio")) { type = IF_VIRTIO; max_devs = 0; } else if (!strcmp(buf, "xen")) { type = IF_XEN; max_devs = 0; } else if (!strcmp(buf, "none")) { type = IF_NONE; max_devs = 0; } else { error_report("unsupported bus type '%s'", buf); return NULL; } } if (cyls || heads || secs) { if (cyls < 1 || (type == IF_IDE && cyls > 16383)) { error_report("invalid physical cyls number"); return NULL; } if (heads < 1 || (type == IF_IDE && heads > 16)) { error_report("invalid physical heads number"); return NULL; } if (secs < 1 || (type == IF_IDE && secs > 63)) { error_report("invalid physical secs number"); return NULL; } } if ((buf = qemu_opt_get(opts, "trans")) != NULL) { if (!cyls) { error_report("'%s' trans must be used with cyls,heads and secs", buf); return NULL; } if (!strcmp(buf, "none")) translation = BIOS_ATA_TRANSLATION_NONE; else if (!strcmp(buf, "lba")) translation = BIOS_ATA_TRANSLATION_LBA; else if (!strcmp(buf, "auto")) translation = BIOS_ATA_TRANSLATION_AUTO; else { error_report("'%s' invalid translation type", buf); return NULL; } } if ((buf = qemu_opt_get(opts, "media")) != NULL) { if (!strcmp(buf, "disk")) { media = MEDIA_DISK; } else if (!strcmp(buf, "cdrom")) { if (cyls || secs || heads) { error_report("'%s' invalid physical CHS format", buf); return NULL; } media = MEDIA_CDROM; } else { error_report("'%s' invalid media", buf); return NULL; } } if ((buf = qemu_opt_get(opts, "cache")) != NULL) { if (!strcmp(buf, "off") || !strcmp(buf, "none")) { bdrv_flags |= BDRV_O_NOCACHE; } else if (!strcmp(buf, "writeback")) { bdrv_flags |= BDRV_O_CACHE_WB; } else if (!strcmp(buf, "unsafe")) { bdrv_flags |= BDRV_O_CACHE_WB; bdrv_flags |= BDRV_O_NO_FLUSH; } else if (!strcmp(buf, "writethrough")) { /* this is the default */ } else { error_report("invalid cache option"); return NULL; } } #ifdef CONFIG_LINUX_AIO if ((buf = qemu_opt_get(opts, "aio")) != NULL) { if (!strcmp(buf, "native")) { bdrv_flags |= BDRV_O_NATIVE_AIO; } else if (!strcmp(buf, "threads")) { /* this is the default */ } else { error_report("invalid aio option"); return NULL; } } #endif if ((buf = qemu_opt_get(opts, "format")) != NULL) { if (strcmp(buf, "?") == 0) { error_printf("Supported formats:"); bdrv_iterate_format(bdrv_format_print, NULL); error_printf("\n"); return NULL; } drv = bdrv_find_whitelisted_format(buf); if (!drv) { error_report("'%s' invalid format", buf); return NULL; } } on_write_error = BLOCK_ERR_STOP_ENOSPC; if ((buf = qemu_opt_get(opts, "werror")) != NULL) { if (type != IF_IDE && type != IF_SCSI && type != IF_VIRTIO && type != IF_NONE) { error_report("werror is not supported by this bus type"); return NULL; } on_write_error = parse_block_error_action(buf, 0); if (on_write_error < 0) { return NULL; } } on_read_error = BLOCK_ERR_REPORT; if ((buf = qemu_opt_get(opts, "rerror")) != NULL) { if (type != IF_IDE && type != IF_VIRTIO && type != IF_SCSI && type != IF_NONE) { error_report("rerror is not supported by this bus type"); return NULL; } on_read_error = parse_block_error_action(buf, 1); if (on_read_error < 0) { return NULL; } } if ((devaddr = qemu_opt_get(opts, "addr")) != NULL) { if (type != IF_VIRTIO) { error_report("addr is not supported by this bus type"); return NULL; } } /* compute bus and unit according index */ if (index != -1) { if (bus_id != 0 || unit_id != -1) { error_report("index cannot be used with bus and unit"); return NULL; } if (max_devs == 0) { unit_id = index; bus_id = 0; } else { unit_id = index % max_devs; bus_id = index / max_devs; } } /* if user doesn't specify a unit_id, * try to find the first free */ if (unit_id == -1) { unit_id = 0; while (drive_get(type, bus_id, unit_id) != NULL) { unit_id++; if (max_devs && unit_id >= max_devs) { unit_id -= max_devs; bus_id++; } } } /* check unit id */ if (max_devs && unit_id >= max_devs) { error_report("unit %d too big (max is %d)", unit_id, max_devs - 1); return NULL; } /* * ignore multiple definitions */ if (drive_get(type, bus_id, unit_id) != NULL) { *fatal_error = 0; return NULL; } /* init */ dinfo = qemu_mallocz(sizeof(*dinfo)); if ((buf = qemu_opts_id(opts)) != NULL) { dinfo->id = qemu_strdup(buf); } else { /* no id supplied -> create one */ dinfo->id = qemu_mallocz(32); if (type == IF_IDE || type == IF_SCSI) mediastr = (media == MEDIA_CDROM) ? "-cd" : "-hd"; if (max_devs) snprintf(dinfo->id, 32, "%s%i%s%i", devname, bus_id, mediastr, unit_id); else snprintf(dinfo->id, 32, "%s%s%i", devname, mediastr, unit_id); } dinfo->bdrv = bdrv_new(dinfo->id); dinfo->devaddr = devaddr; dinfo->type = type; dinfo->bus = bus_id; dinfo->unit = unit_id; dinfo->opts = opts; if (serial) strncpy(dinfo->serial, serial, sizeof(dinfo->serial) - 1); QTAILQ_INSERT_TAIL(&drives, dinfo, next); bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error); switch(type) { case IF_IDE: case IF_SCSI: case IF_XEN: case IF_NONE: switch(media) { case MEDIA_DISK: if (cyls != 0) { bdrv_set_geometry_hint(dinfo->bdrv, cyls, heads, secs); bdrv_set_translation_hint(dinfo->bdrv, translation); } break; case MEDIA_CDROM: bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_CDROM); break; } break; case IF_SD: /* FIXME: This isn't really a floppy, but it's a reasonable approximation. */ case IF_FLOPPY: bdrv_set_type_hint(dinfo->bdrv, BDRV_TYPE_FLOPPY); break; case IF_PFLASH: case IF_MTD: break; case IF_VIRTIO: /* add virtio block device */ opts = qemu_opts_create(qemu_find_opts("device"), NULL, 0); qemu_opt_set(opts, "driver", "virtio-blk-pci"); qemu_opt_set(opts, "drive", dinfo->id); if (devaddr) qemu_opt_set(opts, "addr", devaddr); break; case IF_COUNT: abort(); } if (!file || !*file) { *fatal_error = 0; return NULL; } if (snapshot) { /* always use cache=unsafe with snapshot */ bdrv_flags &= ~BDRV_O_CACHE_MASK; bdrv_flags |= (BDRV_O_SNAPSHOT|BDRV_O_CACHE_WB|BDRV_O_NO_FLUSH); } if (media == MEDIA_CDROM) { /* CDROM is fine for any interface, don't check. */ ro = 1; } else if (ro == 1) { if (type != IF_SCSI && type != IF_VIRTIO && type != IF_FLOPPY && type != IF_NONE) { error_report("readonly not supported by this bus type"); return NULL; } } bdrv_flags |= ro ? 0 : BDRV_O_RDWR; ret = bdrv_open(dinfo->bdrv, file, bdrv_flags, drv); if (ret < 0) { error_report("could not open disk image %s: %s", file, strerror(-ret)); return NULL; } if (bdrv_key_required(dinfo->bdrv)) autostart = 0; *fatal_error = 0; return dinfo; } | 27,268 |
0 | static void test_visitor_in_struct(TestInputVisitorData *data, const void *unused) { TestStruct *p = NULL; Visitor *v; v = visitor_input_test_init(data, "{ 'integer': -42, 'boolean': true, 'string': 'foo' }"); visit_type_TestStruct(v, NULL, &p, &error_abort); g_assert_cmpint(p->integer, ==, -42); g_assert(p->boolean == true); g_assert_cmpstr(p->string, ==, "foo"); g_free(p->string); g_free(p); } | 27,270 |
0 | static int msix_add_config(struct PCIDevice *pdev, unsigned short nentries, unsigned bar_nr, unsigned bar_size) { int config_offset; uint8_t *config; uint32_t new_size; if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) return -EINVAL; if (bar_size > 0x80000000) return -ENOSPC; /* Add space for MSI-X structures */ if (!bar_size) { new_size = MSIX_PAGE_SIZE; } else if (bar_size < MSIX_PAGE_SIZE) { bar_size = MSIX_PAGE_SIZE; new_size = MSIX_PAGE_SIZE * 2; } else { new_size = bar_size * 2; } pdev->msix_bar_size = new_size; config_offset = pci_add_capability(pdev, PCI_CAP_ID_MSIX, 0, MSIX_CAP_LENGTH); if (config_offset < 0) return config_offset; config = pdev->config + config_offset; pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); /* Table on top of BAR */ pci_set_long(config + PCI_MSIX_TABLE, bar_size | bar_nr); /* Pending bits on top of that */ pci_set_long(config + PCI_MSIX_PBA, (bar_size + MSIX_PAGE_PENDING) | bar_nr); pdev->msix_cap = config_offset; /* Make flags bit writable. */ pdev->wmask[config_offset + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | MSIX_MASKALL_MASK; pdev->msix_function_masked = true; return 0; } | 27,271 |
0 | void qmp_memsave(int64_t addr, int64_t size, const char *filename, bool has_cpu, int64_t cpu_index, Error **errp) { FILE *f; uint32_t l; CPUState *cpu; uint8_t buf[1024]; if (!has_cpu) { cpu_index = 0; } cpu = qemu_get_cpu(cpu_index); if (cpu == NULL) { error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index", "a CPU number"); return; } f = fopen(filename, "wb"); if (!f) { error_setg_file_open(errp, errno, filename); return; } while (size != 0) { l = sizeof(buf); if (l > size) l = size; cpu_memory_rw_debug(cpu, addr, buf, l, 0); if (fwrite(buf, 1, l, f) != l) { error_set(errp, QERR_IO_ERROR); goto exit; } addr += l; size -= l; } exit: fclose(f); } | 27,273 |
0 | static void xen_platform_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = xen_platform_initfn; k->vendor_id = PCI_VENDOR_ID_XEN; k->device_id = PCI_DEVICE_ID_XEN_PLATFORM; k->class_id = PCI_CLASS_OTHERS << 8 | 0x80; k->subsystem_vendor_id = PCI_VENDOR_ID_XEN; k->subsystem_id = PCI_DEVICE_ID_XEN_PLATFORM; k->revision = 1; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->desc = "XEN platform pci device"; dc->reset = platform_reset; dc->vmsd = &vmstate_xen_platform; } | 27,274 |
0 | int net_init_slirp(QemuOpts *opts, Monitor *mon, const char *name, VLANState *vlan) { struct slirp_config_str *config; const char *vhost; const char *vhostname; const char *vdhcp_start; const char *vnamesrv; const char *tftp_export; const char *bootfile; const char *smb_export; const char *vsmbsrv; char *vnet = NULL; int restricted = 0; int ret; vhost = qemu_opt_get(opts, "host"); vhostname = qemu_opt_get(opts, "hostname"); vdhcp_start = qemu_opt_get(opts, "dhcpstart"); vnamesrv = qemu_opt_get(opts, "dns"); tftp_export = qemu_opt_get(opts, "tftp"); bootfile = qemu_opt_get(opts, "bootfile"); smb_export = qemu_opt_get(opts, "smb"); vsmbsrv = qemu_opt_get(opts, "smbserver"); if (qemu_opt_get(opts, "ip")) { const char *ip = qemu_opt_get(opts, "ip"); int l = strlen(ip) + strlen("/24") + 1; vnet = qemu_malloc(l); /* emulate legacy ip= parameter */ pstrcpy(vnet, l, ip); pstrcat(vnet, l, "/24"); } if (qemu_opt_get(opts, "net")) { if (vnet) { qemu_free(vnet); } vnet = qemu_strdup(qemu_opt_get(opts, "net")); } if (qemu_opt_get(opts, "restrict") && qemu_opt_get(opts, "restrict")[0] == 'y') { restricted = 1; } qemu_opt_foreach(opts, net_init_slirp_configs, NULL, 0); ret = net_slirp_init(vlan, "user", name, restricted, vnet, vhost, vhostname, tftp_export, bootfile, vdhcp_start, vnamesrv, smb_export, vsmbsrv); while (slirp_configs) { config = slirp_configs; slirp_configs = config->next; qemu_free(config); } if (ret != -1 && vlan) { vlan->nb_host_devs++; } qemu_free(vnet); return ret; } | 27,277 |
0 | static QOSState *qvirtio_9p_start(void) { const char *cmd = "-fsdev local,id=fsdev0,security_model=none,path=%s " "-device virtio-9p-pci,fsdev=fsdev0,mount_tag=%s"; test_share = g_strdup("/tmp/qtest.XXXXXX"); g_assert_nonnull(mkdtemp(test_share)); return qtest_pc_boot(cmd, test_share, mount_tag); } | 27,278 |
1 | static void test_read_without_media(void) { uint8_t ret; ret = send_read_command(); g_assert(ret == 0); } | 27,279 |
1 | static void set_acpi_power_state(IPMIBmcSim *ibs, uint8_t *cmd, unsigned int cmd_len, uint8_t *rsp, unsigned int *rsp_len, unsigned int max_rsp_len) { IPMI_CHECK_CMD_LEN(4); ibs->acpi_power_state[0] = cmd[2]; ibs->acpi_power_state[1] = cmd[3]; } | 27,280 |
1 | static void usb_uas_unrealize(USBDevice *dev, Error **errp) { UASDevice *uas = USB_UAS(dev); qemu_bh_delete(uas->status_bh); } | 27,281 |
0 | static int sdp_parse(AVFormatContext *s, const char *content) { const char *p; int letter; /* Some SDP lines, particularly for Realmedia or ASF RTSP streams, * contain long SDP lines containing complete ASF Headers (several * kB) or arrays of MDPR (RM stream descriptor) headers plus * "rulebooks" describing their properties. Therefore, the SDP line * buffer is large. * * The Vorbis FMTP line can be up to 16KB - see sdp_parse_fmtp. */ char buf[16384], *q; SDPParseState sdp_parse_state, *s1 = &sdp_parse_state; memset(s1, 0, sizeof(SDPParseState)); p = content; for(;;) { skip_spaces(&p); letter = *p; if (letter == '\0') break; p++; if (*p != '=') goto next_line; p++; /* get the content */ q = buf; while (*p != '\n' && *p != '\r' && *p != '\0') { if ((q - buf) < sizeof(buf) - 1) *q++ = *p; p++; } *q = '\0'; sdp_parse_line(s, s1, letter, buf); next_line: while (*p != '\n' && *p != '\0') p++; if (*p == '\n') p++; } return 0; } | 27,282 |
1 | static void flush_change(H264Context *h) { h->outputed_poc = h->next_outputed_poc = INT_MIN; h->prev_interlaced_frame = 1; idr(h); h->prev_frame_num = -1; if (h->s.current_picture_ptr) h->s.current_picture_ptr->f.reference = 0; h->s.first_field = 0; memset(h->ref_list[0], 0, sizeof(h->ref_list[0])); memset(h->ref_list[1], 0, sizeof(h->ref_list[1])); memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0])); memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1])); ff_h264_reset_sei(h); h->recovery_frame= -1; h->sync= 0; h->list_count = 0; h->current_slice = 0; } | 27,283 |
1 | int qcrypto_cipher_decrypt(QCryptoCipher *cipher, const void *in, void *out, size_t len, Error **errp) { QCryptoCipherNettle *ctx = cipher->opaque; switch (cipher->mode) { case QCRYPTO_CIPHER_MODE_ECB: ctx->alg_decrypt(ctx->ctx_decrypt ? ctx->ctx_decrypt : ctx->ctx_encrypt, len, out, in); break; case QCRYPTO_CIPHER_MODE_CBC: cbc_decrypt(ctx->ctx_decrypt ? ctx->ctx_decrypt : ctx->ctx_encrypt, ctx->alg_decrypt, ctx->niv, ctx->iv, len, out, in); break; default: error_setg(errp, "Unsupported cipher algorithm %d", cipher->alg); return -1; } return 0; } | 27,284 |
1 | static void ahci_migrate(AHCIQState *from, AHCIQState *to, const char *uri) { QOSState *tmp = to->parent; QPCIDevice *dev = to->dev; char *uri_local = NULL; if (uri == NULL) { uri_local = g_strdup_printf("%s%s", "unix:", mig_socket); uri = uri_local; } /* context will be 'to' after completion. */ migrate(from->parent, to->parent, uri); /* We'd like for the AHCIState objects to still point * to information specific to its specific parent * instance, but otherwise just inherit the new data. */ memcpy(to, from, sizeof(AHCIQState)); to->parent = tmp; to->dev = dev; tmp = from->parent; dev = from->dev; memset(from, 0x00, sizeof(AHCIQState)); from->parent = tmp; from->dev = dev; verify_state(to); g_free(uri_local); } | 27,285 |
1 | static int kvm_virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, unsigned int queue_no, unsigned int vector, MSIMessage msg) { VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no); EventNotifier *n = virtio_queue_get_guest_notifier(vq); VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; int ret; if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg); if (ret < 0) { return ret; } } /* If guest supports masking, irqfd is already setup, unmask it. * Otherwise, set it up now. */ if (proxy->vdev->guest_notifier_mask) { proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, false); /* Test after unmasking to avoid losing events. */ if (proxy->vdev->guest_notifier_pending && proxy->vdev->guest_notifier_pending(proxy->vdev, queue_no)) { event_notifier_set(n); } } else { ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); } return ret; } | 27,286 |
1 | static int vvfat_open(BlockDriverState *bs, const char* dirname, int flags) { BDRVVVFATState *s = bs->opaque; int floppy = 0; int i; #ifdef DEBUG vvv = s; #endif DLOG(if (stderr == NULL) { stderr = fopen("vvfat.log", "a"); setbuf(stderr, NULL); }) s->bs = bs; s->fat_type=16; /* LATER TODO: if FAT32, adjust */ s->sectors_per_cluster=0x10; /* 504MB disk*/ bs->cyls=1024; bs->heads=16; bs->secs=63; s->current_cluster=0xffffffff; s->first_sectors_number=0x40; /* read only is the default for safety */ bs->read_only = 1; s->qcow = s->write_target = NULL; s->qcow_filename = NULL; s->fat2 = NULL; s->downcase_short_names = 1; if (!strstart(dirname, "fat:", NULL)) return -1; if (strstr(dirname, ":floppy:")) { floppy = 1; s->fat_type = 12; s->first_sectors_number = 1; s->sectors_per_cluster=2; bs->cyls = 80; bs->heads = 2; bs->secs = 36; } if (strstr(dirname, ":32:")) { fprintf(stderr, "Big fat greek warning: FAT32 has not been tested. You are welcome to do so!\n"); s->fat_type = 32; } else if (strstr(dirname, ":16:")) { s->fat_type = 16; } else if (strstr(dirname, ":12:")) { s->fat_type = 12; bs->secs = 18; } s->sector_count=bs->cyls*bs->heads*bs->secs-(s->first_sectors_number-1); if (strstr(dirname, ":rw:")) { if (enable_write_target(s)) return -1; bs->read_only = 0; } i = strrchr(dirname, ':') - dirname; assert(i >= 3); if (dirname[i-2] == ':' && qemu_isalpha(dirname[i-1])) /* workaround for DOS drive names */ dirname += i-1; else dirname += i+1; bs->total_sectors=bs->cyls*bs->heads*bs->secs; if(init_directories(s, dirname)) return -1; s->sector_count = s->faked_sectors + s->sectors_per_cluster*s->cluster_count; if(s->first_sectors_number==0x40) init_mbr(s); /* for some reason or other, MS-DOS does not like to know about CHS... */ if (floppy) bs->heads = bs->cyls = bs->secs = 0; // assert(is_consistent(s)); qemu_co_mutex_init(&s->lock); return 0; } | 27,287 |
1 | VirtIODevice *virtio_scsi_init(DeviceState *dev, VirtIOSCSIConf *proxyconf) { VirtIOSCSI *s; static int virtio_scsi_id; size_t sz; int i; sz = sizeof(VirtIOSCSI) + proxyconf->num_queues * sizeof(VirtQueue *); s = (VirtIOSCSI *)virtio_common_init("virtio-scsi", VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig), sz); s->qdev = dev; s->conf = proxyconf; /* TODO set up vdev function pointers */ s->vdev.get_config = virtio_scsi_get_config; s->vdev.set_config = virtio_scsi_set_config; s->vdev.get_features = virtio_scsi_get_features; s->vdev.reset = virtio_scsi_reset; s->ctrl_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE, virtio_scsi_handle_ctrl); s->event_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE, NULL); for (i = 0; i < s->conf->num_queues; i++) { s->cmd_vqs[i] = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE, virtio_scsi_handle_cmd); } scsi_bus_new(&s->bus, dev, &virtio_scsi_scsi_info); if (!dev->hotplugged) { scsi_bus_legacy_handle_cmdline(&s->bus); } register_savevm(dev, "virtio-scsi", virtio_scsi_id++, 1, virtio_scsi_save, virtio_scsi_load, s); return &s->vdev; } | 27,288 |
1 | static ResampleContext *resample_init(ResampleContext *c, int out_rate, int in_rate, int filter_size, int phase_shift, int linear, double cutoff0, enum AVSampleFormat format, enum SwrFilterType filter_type, int kaiser_beta, double precision, int cheby){ double cutoff = cutoff0? cutoff0 : 0.97; double factor= FFMIN(out_rate * cutoff / in_rate, 1.0); int phase_count= 1<<phase_shift; if (!c || c->phase_shift != phase_shift || c->linear!=linear || c->factor != factor || c->filter_length != FFMAX((int)ceil(filter_size/factor), 1) || c->format != format || c->filter_type != filter_type || c->kaiser_beta != kaiser_beta) { c = av_mallocz(sizeof(*c)); if (!c) return NULL; c->format= format; c->felem_size= av_get_bytes_per_sample(c->format); switch(c->format){ case AV_SAMPLE_FMT_S16P: c->filter_shift = 15; break; case AV_SAMPLE_FMT_S32P: c->filter_shift = 30; break; case AV_SAMPLE_FMT_FLTP: case AV_SAMPLE_FMT_DBLP: c->filter_shift = 0; break; default: av_log(NULL, AV_LOG_ERROR, "Unsupported sample format\n"); av_assert0(0); c->phase_shift = phase_shift; c->phase_mask = phase_count - 1; c->linear = linear; c->factor = factor; c->filter_length = FFMAX((int)ceil(filter_size/factor), 1); c->filter_alloc = FFALIGN(c->filter_length, 8); c->filter_bank = av_calloc(c->filter_alloc, (phase_count+1)*c->felem_size); c->filter_type = filter_type; c->kaiser_beta = kaiser_beta; if (!c->filter_bank) if (build_filter(c, (void*)c->filter_bank, factor, c->filter_length, c->filter_alloc, phase_count, 1<<c->filter_shift, filter_type, kaiser_beta)) memcpy(c->filter_bank + (c->filter_alloc*phase_count+1)*c->felem_size, c->filter_bank, (c->filter_alloc-1)*c->felem_size); memcpy(c->filter_bank + (c->filter_alloc*phase_count )*c->felem_size, c->filter_bank + (c->filter_alloc - 1)*c->felem_size, c->felem_size); c->compensation_distance= 0; if(!av_reduce(&c->src_incr, &c->dst_incr, out_rate, in_rate * (int64_t)phase_count, INT32_MAX/2)) c->ideal_dst_incr= c->dst_incr; c->index= -phase_count*((c->filter_length-1)/2); c->frac= 0; return c; error: av_freep(&c->filter_bank); av_free(c); return NULL; | 27,289 |
1 | static void test_properties(const char *path) { char *child_path; QDict *response, *tuple; QList *list; QListEntry *entry; g_test_message("Obtaining properties of %s", path); response = qmp("{ 'execute': 'qom-list'," " 'arguments': { 'path': '%s' } }", path); g_assert(response); g_assert(qdict_haskey(response, "return")); list = qobject_to_qlist(qdict_get(response, "return")); QLIST_FOREACH_ENTRY(list, entry) { tuple = qobject_to_qdict(qlist_entry_obj(entry)); if (strstart(qdict_get_str(tuple, "type"), "child<", NULL)) { child_path = g_strdup_printf("%s/%s", path, qdict_get_str(tuple, "name")); test_properties(child_path); g_free(child_path); } else { const char *prop = qdict_get_str(tuple, "name"); g_test_message("Testing property %s.%s", path, prop); response = qmp("{ 'execute': 'qom-get'," " 'arguments': { 'path': '%s'," " 'property': '%s' } }", path, prop); /* qom-get may fail but should not, e.g., segfault. */ g_assert(response); } } } | 27,291 |
1 | static int decode_packet(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket* avpkt) { WmallDecodeCtx *s = avctx->priv_data; GetBitContext* gb = &s->pgb; const uint8_t* buf = avpkt->data; int buf_size = avpkt->size; int num_bits_prev_frame, packet_sequence_number, spliced_packet; s->frame.nb_samples = 0; if (s->packet_done || s->packet_loss) { s->packet_done = 0; /* sanity check for the buffer length */ if (buf_size < avctx->block_align) return 0; s->next_packet_start = buf_size - avctx->block_align; buf_size = avctx->block_align; s->buf_bit_size = buf_size << 3; /* parse packet header */ init_get_bits(gb, buf, s->buf_bit_size); packet_sequence_number = get_bits(gb, 4); skip_bits(gb, 1); // Skip seekable_frame_in_packet, currently ununused spliced_packet = get_bits1(gb); if (spliced_packet) avpriv_request_sample(avctx, "Bitstream splicing"); /* get number of bits that need to be added to the previous frame */ num_bits_prev_frame = get_bits(gb, s->log2_frame_size); /* check for packet loss */ if (!s->packet_loss && ((s->packet_sequence_number + 1) & 0xF) != packet_sequence_number) { s->packet_loss = 1; av_log(avctx, AV_LOG_ERROR, "Packet loss detected! seq %x vs %x\n", s->packet_sequence_number, packet_sequence_number); } s->packet_sequence_number = packet_sequence_number; if (num_bits_prev_frame > 0) { int remaining_packet_bits = s->buf_bit_size - get_bits_count(gb); if (num_bits_prev_frame >= remaining_packet_bits) { num_bits_prev_frame = remaining_packet_bits; s->packet_done = 1; } /* Append the previous frame data to the remaining data from the * previous packet to create a full frame. */ save_bits(s, gb, num_bits_prev_frame, 1); /* decode the cross packet frame if it is valid */ if (num_bits_prev_frame < remaining_packet_bits && !s->packet_loss) decode_frame(s); } else if (s->num_saved_bits - s->frame_offset) { av_dlog(avctx, "ignoring %x previously saved bits\n", s->num_saved_bits - s->frame_offset); } if (s->packet_loss) { /* Reset number of saved bits so that the decoder does not start * to decode incomplete frames in the s->len_prefix == 0 case. */ s->num_saved_bits = 0; s->packet_loss = 0; init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE); } } else { int frame_size; s->buf_bit_size = (avpkt->size - s->next_packet_start) << 3; init_get_bits(gb, avpkt->data, s->buf_bit_size); skip_bits(gb, s->packet_offset); if (s->len_prefix && remaining_bits(s, gb) > s->log2_frame_size && (frame_size = show_bits(gb, s->log2_frame_size)) && frame_size <= remaining_bits(s, gb)) { save_bits(s, gb, frame_size, 0); s->packet_done = !decode_frame(s); } else if (!s->len_prefix && s->num_saved_bits > get_bits_count(&s->gb)) { /* when the frames do not have a length prefix, we don't know the * compressed length of the individual frames however, we know what * part of a new packet belongs to the previous frame therefore we * save the incoming packet first, then we append the "previous * frame" data from the next packet so that we get a buffer that * only contains full frames */ s->packet_done = !decode_frame(s); } else { s->packet_done = 1; } } if (s->packet_done && !s->packet_loss && remaining_bits(s, gb) > 0) { /* save the rest of the data so that it can be decoded * with the next packet */ save_bits(s, gb, remaining_bits(s, gb), 0); } *(AVFrame *)data = s->frame; *got_frame_ptr = s->frame.nb_samples > 0; s->packet_offset = get_bits_count(gb) & 7; return (s->packet_loss) ? AVERROR_INVALIDDATA : get_bits_count(gb) >> 3; } | 27,292 |
1 | static void mpegts_write_pes(AVFormatContext *s, AVStream *st, const uint8_t *payload, int payload_size, int64_t pts, int64_t dts, int key) { MpegTSWriteStream *ts_st = st->priv_data; MpegTSWrite *ts = s->priv_data; uint8_t buf[TS_PACKET_SIZE]; uint8_t *q; int val, is_start, len, header_len, write_pcr, is_dvb_subtitle, is_dvb_teletext, flags; int afc_len, stuffing_len; int64_t pcr = -1; /* avoid warning */ int64_t delay = av_rescale(s->max_delay, 90000, AV_TIME_BASE); int force_pat = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && key && !ts_st->prev_payload_key; is_start = 1; while (payload_size > 0) { retransmit_si_info(s, force_pat); force_pat = 0; write_pcr = 0; if (ts_st->pid == ts_st->service->pcr_pid) { if (ts->mux_rate > 1 || is_start) // VBR pcr period is based on frames ts_st->service->pcr_packet_count++; if (ts_st->service->pcr_packet_count >= ts_st->service->pcr_packet_period) { ts_st->service->pcr_packet_count = 0; write_pcr = 1; if (ts->mux_rate > 1 && dts != AV_NOPTS_VALUE && (dts - get_pcr(ts, s->pb)/300) > delay) { /* pcr insert gets priority over null packet insert */ if (write_pcr) mpegts_insert_pcr_only(s, st); else mpegts_insert_null_packet(s); continue; /* recalculate write_pcr and possibly retransmit si_info */ /* prepare packet header */ q = buf; *q++ = 0x47; val = (ts_st->pid >> 8); if (is_start) val |= 0x40; *q++ = val; *q++ = ts_st->pid; ts_st->cc = (ts_st->cc + 1) & 0xf; *q++ = 0x10 | ts_st->cc; // payload indicator + CC if (key && is_start && pts != AV_NOPTS_VALUE) { // set Random Access for key frames if (ts_st->pid == ts_st->service->pcr_pid) write_pcr = 1; set_af_flag(buf, 0x40); q = get_ts_payload_start(buf); if (write_pcr) { set_af_flag(buf, 0x10); q = get_ts_payload_start(buf); // add 11, pcr references the last byte of program clock reference base if (ts->mux_rate > 1) pcr = get_pcr(ts, s->pb); else pcr = (dts - delay)*300; if (dts != AV_NOPTS_VALUE && dts < pcr / 300) av_log(s, AV_LOG_WARNING, "dts < pcr, TS is invalid\n"); extend_af(buf, write_pcr_bits(q, pcr)); q = get_ts_payload_start(buf); if (is_start) { int pes_extension = 0; int pes_header_stuffing_bytes = 0; /* write PES header */ *q++ = 0x00; *q++ = 0x00; *q++ = 0x01; is_dvb_subtitle = 0; is_dvb_teletext = 0; if (st->codec->codec_id == AV_CODEC_ID_DIRAC) { *q++ = 0xfd; } else *q++ = 0xe0; } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && (st->codec->codec_id == AV_CODEC_ID_MP2 || st->codec->codec_id == AV_CODEC_ID_MP3 || st->codec->codec_id == AV_CODEC_ID_AAC)) { *q++ = 0xc0; } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->codec_id == AV_CODEC_ID_AC3 && ts->m2ts_mode) { *q++ = 0xfd; } else { *q++ = 0xbd; if(st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) { if (st->codec->codec_id == AV_CODEC_ID_DVB_SUBTITLE) { is_dvb_subtitle = 1; } else if (st->codec->codec_id == AV_CODEC_ID_DVB_TELETEXT) { is_dvb_teletext = 1; header_len = 0; flags = 0; if (pts != AV_NOPTS_VALUE) { header_len += 5; flags |= 0x80; if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) { header_len += 5; flags |= 0x40; if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && st->codec->codec_id == AV_CODEC_ID_DIRAC) { /* set PES_extension_flag */ pes_extension = 1; flags |= 0x01; /* * One byte for PES2 extension flag + * one byte for extension length + * one byte for extension id */ header_len += 3; /* for Blu-ray AC3 Audio the PES Extension flag should be as follow * otherwise it will not play sound on blu-ray */ if (ts->m2ts_mode && st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->codec_id == AV_CODEC_ID_AC3) { /* set PES_extension_flag */ pes_extension = 1; flags |= 0x01; header_len += 3; if (is_dvb_teletext) { pes_header_stuffing_bytes = 0x24 - header_len; header_len = 0x24; len = payload_size + header_len + 3; /* 3 extra bytes should be added to DVB subtitle payload: 0x20 0x00 at the beginning and trailing 0xff */ if (is_dvb_subtitle) { len += 3; payload_size++; if (len > 0xffff) *q++ = len >> 8; *q++ = len; val = 0x80; /* data alignment indicator is required for subtitle and data streams */ if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) val |= 0x04; *q++ = val; *q++ = flags; *q++ = header_len; if (pts != AV_NOPTS_VALUE) { write_pts(q, flags >> 6, pts); q += 5; if (dts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE && dts != pts) { write_pts(q, 1, dts); q += 5; if (pes_extension && st->codec->codec_id == AV_CODEC_ID_DIRAC) { flags = 0x01; /* set PES_extension_flag_2 */ *q++ = flags; *q++ = 0x80 | 0x01; /* marker bit + extension length */ /* * Set the stream id extension flag bit to 0 and * write the extended stream id */ *q++ = 0x00 | 0x60; /* For Blu-ray AC3 Audio Setting extended flags */ if (ts->m2ts_mode && pes_extension && st->codec->codec_id == AV_CODEC_ID_AC3) { flags = 0x01; /* set PES_extension_flag_2 */ *q++ = flags; *q++ = 0x80 | 0x01; /* marker bit + extension length */ *q++ = 0x00 | 0x71; /* for AC3 Audio (specifically on blue-rays) */ if (is_dvb_subtitle) { /* First two fields of DVB subtitles PES data: * data_identifier: for DVB subtitle streams shall be coded with the value 0x20 * subtitle_stream_id: for DVB subtitle stream shall be identified by the value 0x00 */ *q++ = 0x20; *q++ = 0x00; if (is_dvb_teletext) { memset(q, 0xff, pes_header_stuffing_bytes); q += pes_header_stuffing_bytes; is_start = 0; /* header size */ header_len = q - buf; /* data len */ len = TS_PACKET_SIZE - header_len; if (len > payload_size) len = payload_size; stuffing_len = TS_PACKET_SIZE - header_len - len; if (stuffing_len > 0) { /* add stuffing with AFC */ if (buf[3] & 0x20) { /* stuffing already present: increase its size */ afc_len = buf[4] + 1; memmove(buf + 4 + afc_len + stuffing_len, buf + 4 + afc_len, header_len - (4 + afc_len)); buf[4] += stuffing_len; memset(buf + 4 + afc_len, 0xff, stuffing_len); } else { /* add stuffing */ memmove(buf + 4 + stuffing_len, buf + 4, header_len - 4); buf[3] |= 0x20; buf[4] = stuffing_len - 1; if (stuffing_len >= 2) { buf[5] = 0x00; memset(buf + 6, 0xff, stuffing_len - 2); if (is_dvb_subtitle && payload_size == len) { memcpy(buf + TS_PACKET_SIZE - len, payload, len - 1); buf[TS_PACKET_SIZE - 1] = 0xff; /* end_of_PES_data_field_marker: an 8-bit field with fixed contents 0xff for DVB subtitle */ } else { memcpy(buf + TS_PACKET_SIZE - len, payload, len); payload += len; payload_size -= len; mpegts_prefix_m2ts_header(s); avio_write(s->pb, buf, TS_PACKET_SIZE); avio_flush(s->pb); ts_st->prev_payload_key = key; | 27,293 |
0 | static void RENAME(postProcess)(const uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height, const QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c2) { DECLARE_ALIGNED(8, PPContext, c)= *c2; //copy to stack for faster access int x,y; #ifdef TEMPLATE_PP_TIME_MODE const int mode= TEMPLATE_PP_TIME_MODE; #else const int mode= isColor ? c.ppMode.chromMode : c.ppMode.lumMode; #endif int black=0, white=255; // blackest black and whitest white in the picture int QPCorrecture= 256*256; int copyAhead; #if TEMPLATE_PP_MMX int i; #endif const int qpHShift= isColor ? 4-c.hChromaSubSample : 4; const int qpVShift= isColor ? 4-c.vChromaSubSample : 4; //FIXME remove uint64_t * const yHistogram= c.yHistogram; uint8_t * const tempSrc= srcStride > 0 ? c.tempSrc : c.tempSrc - 23*srcStride; uint8_t * const tempDst= (dstStride > 0 ? c.tempDst : c.tempDst - 23*dstStride) + 32; //const int mbWidth= isColor ? (width+7)>>3 : (width+15)>>4; if (mode & VISUALIZE){ if(!(mode & (V_A_DEBLOCK | H_A_DEBLOCK)) || TEMPLATE_PP_MMX) { av_log(c2, AV_LOG_WARNING, "Visualization is currently only supported with the accurate deblock filter without SIMD\n"); } } #if TEMPLATE_PP_MMX for(i=0; i<57; i++){ int offset= ((i*c.ppMode.baseDcDiff)>>8) + 1; int threshold= offset*2 + 1; c.mmxDcOffset[i]= 0x7F - offset; c.mmxDcThreshold[i]= 0x7F - threshold; c.mmxDcOffset[i]*= 0x0101010101010101LL; c.mmxDcThreshold[i]*= 0x0101010101010101LL; } #endif if(mode & CUBIC_IPOL_DEINT_FILTER) copyAhead=16; else if( (mode & LINEAR_BLEND_DEINT_FILTER) || (mode & FFMPEG_DEINT_FILTER) || (mode & LOWPASS5_DEINT_FILTER)) copyAhead=14; else if( (mode & V_DEBLOCK) || (mode & LINEAR_IPOL_DEINT_FILTER) || (mode & MEDIAN_DEINT_FILTER) || (mode & V_A_DEBLOCK)) copyAhead=13; else if(mode & V_X1_FILTER) copyAhead=11; // else if(mode & V_RK1_FILTER) copyAhead=10; else if(mode & DERING) copyAhead=9; else copyAhead=8; copyAhead-= 8; if(!isColor){ uint64_t sum= 0; int i; uint64_t maxClipped; uint64_t clipped; double scale; c.frameNum++; // first frame is fscked so we ignore it if(c.frameNum == 1) yHistogram[0]= width*(uint64_t)height/64*15/256; for(i=0; i<256; i++){ sum+= yHistogram[i]; } /* We always get a completely black picture first. */ maxClipped= (uint64_t)(sum * c.ppMode.maxClippedThreshold); clipped= sum; for(black=255; black>0; black--){ if(clipped < maxClipped) break; clipped-= yHistogram[black]; } clipped= sum; for(white=0; white<256; white++){ if(clipped < maxClipped) break; clipped-= yHistogram[white]; } scale= (double)(c.ppMode.maxAllowedY - c.ppMode.minAllowedY) / (double)(white-black); #if TEMPLATE_PP_MMXEXT c.packedYScale= (uint16_t)(scale*256.0 + 0.5); c.packedYOffset= (((black*c.packedYScale)>>8) - c.ppMode.minAllowedY) & 0xFFFF; #else c.packedYScale= (uint16_t)(scale*1024.0 + 0.5); c.packedYOffset= (black - c.ppMode.minAllowedY) & 0xFFFF; #endif c.packedYOffset|= c.packedYOffset<<32; c.packedYOffset|= c.packedYOffset<<16; c.packedYScale|= c.packedYScale<<32; c.packedYScale|= c.packedYScale<<16; if(mode & LEVEL_FIX) QPCorrecture= (int)(scale*256*256 + 0.5); else QPCorrecture= 256*256; }else{ c.packedYScale= 0x0100010001000100LL; c.packedYOffset= 0; QPCorrecture= 256*256; } /* copy & deinterlace first row of blocks */ y=-BLOCK_SIZE; { const uint8_t *srcBlock= &(src[y*srcStride]); uint8_t *dstBlock= tempDst + dstStride; // From this point on it is guaranteed that we can read and write 16 lines downward // finish 1 block before the next otherwise we might have a problem // with the L1 Cache of the P4 ... or only a few blocks at a time or something for(x=0; x<width; x+=BLOCK_SIZE){ #if TEMPLATE_PP_MMXEXT && HAVE_6REGS /* prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32); prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32); */ __asm__( "mov %4, %%"REG_a" \n\t" "shr $2, %%"REG_a" \n\t" "and $6, %%"REG_a" \n\t" "add %5, %%"REG_a" \n\t" "mov %%"REG_a", %%"REG_d" \n\t" "imul %1, %%"REG_a" \n\t" "imul %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" "add %1, %%"REG_a" \n\t" "add %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" :: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride), "g" ((x86_reg)x), "g" ((x86_reg)copyAhead) : "%"REG_a, "%"REG_d ); #elif TEMPLATE_PP_3DNOW //FIXME check if this is faster on an 3dnow chip or if it is faster without the prefetch or ... /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32); prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32); */ #endif RENAME(blockCopy)(dstBlock + dstStride*8, dstStride, srcBlock + srcStride*8, srcStride, mode & LEVEL_FIX, &c.packedYOffset); RENAME(duplicate)(dstBlock + dstStride*8, dstStride); if(mode & LINEAR_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride); else if(mode & LINEAR_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x); else if(mode & MEDIAN_DEINT_FILTER) RENAME(deInterlaceMedian)(dstBlock, dstStride); else if(mode & CUBIC_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride); else if(mode & FFMPEG_DEINT_FILTER) RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x); else if(mode & LOWPASS5_DEINT_FILTER) RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x); /* else if(mode & CUBIC_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendCubic)(dstBlock, dstStride); */ dstBlock+=8; srcBlock+=8; } if(width==FFABS(dstStride)) linecpy(dst, tempDst + 9*dstStride, copyAhead, dstStride); else{ int i; for(i=0; i<copyAhead; i++){ memcpy(dst + i*dstStride, tempDst + (9+i)*dstStride, width); } } } for(y=0; y<height; y+=BLOCK_SIZE){ //1% speedup if these are here instead of the inner loop const uint8_t *srcBlock= &(src[y*srcStride]); uint8_t *dstBlock= &(dst[y*dstStride]); #if TEMPLATE_PP_MMX uint8_t *tempBlock1= c.tempBlocks; uint8_t *tempBlock2= c.tempBlocks + 8; #endif const int8_t *QPptr= &QPs[(y>>qpVShift)*QPStride]; int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*FFABS(QPStride)]; int QP=0; /* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards if not than use a temporary buffer */ if(y+15 >= height){ int i; /* copy from line (copyAhead) to (copyAhead+7) of src, these will be copied with blockcopy to dst later */ linecpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead, FFMAX(height-y-copyAhead, 0), srcStride); /* duplicate last line of src to fill the void up to line (copyAhead+7) */ for(i=FFMAX(height-y, 8); i<copyAhead+8; i++) memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), FFABS(srcStride)); /* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/ linecpy(tempDst, dstBlock - dstStride, FFMIN(height-y+1, copyAhead+1), dstStride); /* duplicate last line of dst to fill the void up to line (copyAhead) */ for(i=height-y+1; i<=copyAhead; i++) memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), FFABS(dstStride)); dstBlock= tempDst + dstStride; srcBlock= tempSrc; } // From this point on it is guaranteed that we can read and write 16 lines downward // finish 1 block before the next otherwise we might have a problem // with the L1 Cache of the P4 ... or only a few blocks at a time or something for(x=0; x<width; x+=BLOCK_SIZE){ const int stride= dstStride; #if TEMPLATE_PP_MMX uint8_t *tmpXchg; #endif if(isColor){ QP= QPptr[x>>qpHShift]; c.nonBQP= nonBQPptr[x>>qpHShift]; }else{ QP= QPptr[x>>4]; QP= (QP* QPCorrecture + 256*128)>>16; c.nonBQP= nonBQPptr[x>>4]; c.nonBQP= (c.nonBQP* QPCorrecture + 256*128)>>16; yHistogram[ srcBlock[srcStride*12 + 4] ]++; } c.QP= QP; #if TEMPLATE_PP_MMX __asm__ volatile( "movd %1, %%mm7 \n\t" "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP "movq %%mm7, %0 \n\t" : "=m" (c.pQPb) : "r" (QP) ); #endif #if TEMPLATE_PP_MMXEXT && HAVE_6REGS /* prefetchnta(srcBlock + (((x>>2)&6) + 5)*srcStride + 32); prefetchnta(srcBlock + (((x>>2)&6) + 6)*srcStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 5)*dstStride + 32); prefetcht0(dstBlock + (((x>>2)&6) + 6)*dstStride + 32); */ __asm__( "mov %4, %%"REG_a" \n\t" "shr $2, %%"REG_a" \n\t" "and $6, %%"REG_a" \n\t" "add %5, %%"REG_a" \n\t" "mov %%"REG_a", %%"REG_d" \n\t" "imul %1, %%"REG_a" \n\t" "imul %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" "add %1, %%"REG_a" \n\t" "add %3, %%"REG_d" \n\t" "prefetchnta 32(%%"REG_a", %0) \n\t" "prefetcht0 32(%%"REG_d", %2) \n\t" :: "r" (srcBlock), "r" ((x86_reg)srcStride), "r" (dstBlock), "r" ((x86_reg)dstStride), "g" ((x86_reg)x), "g" ((x86_reg)copyAhead) : "%"REG_a, "%"REG_d ); #elif TEMPLATE_PP_3DNOW //FIXME check if this is faster on an 3dnow chip or if it is faster without the prefetch or ... /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32); prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32); prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32); */ #endif RENAME(blockCopy)(dstBlock + dstStride*copyAhead, dstStride, srcBlock + srcStride*copyAhead, srcStride, mode & LEVEL_FIX, &c.packedYOffset); if(mode & LINEAR_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateLinear)(dstBlock, dstStride); else if(mode & LINEAR_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendLinear)(dstBlock, dstStride, c.deintTemp + x); else if(mode & MEDIAN_DEINT_FILTER) RENAME(deInterlaceMedian)(dstBlock, dstStride); else if(mode & CUBIC_IPOL_DEINT_FILTER) RENAME(deInterlaceInterpolateCubic)(dstBlock, dstStride); else if(mode & FFMPEG_DEINT_FILTER) RENAME(deInterlaceFF)(dstBlock, dstStride, c.deintTemp + x); else if(mode & LOWPASS5_DEINT_FILTER) RENAME(deInterlaceL5)(dstBlock, dstStride, c.deintTemp + x, c.deintTemp + width + x); /* else if(mode & CUBIC_BLEND_DEINT_FILTER) RENAME(deInterlaceBlendCubic)(dstBlock, dstStride); */ /* only deblock if we have 2 blocks */ if(y + 8 < height){ if(mode & V_X1_FILTER) RENAME(vertX1Filter)(dstBlock, stride, &c); else if(mode & V_DEBLOCK){ const int t= RENAME(vertClassify)(dstBlock, stride, &c); if(t==1) RENAME(doVertLowPass)(dstBlock, stride, &c); else if(t==2) RENAME(doVertDefFilter)(dstBlock, stride, &c); }else if(mode & V_A_DEBLOCK){ RENAME(do_a_deblock)(dstBlock, stride, 1, &c, mode); } } #if TEMPLATE_PP_MMX RENAME(transpose1)(tempBlock1, tempBlock2, dstBlock, dstStride); #endif /* check if we have a previous block to deblock it with dstBlock */ if(x - 8 >= 0){ #if TEMPLATE_PP_MMX if(mode & H_X1_FILTER) RENAME(vertX1Filter)(tempBlock1, 16, &c); else if(mode & H_DEBLOCK){ //START_TIMER const int t= RENAME(vertClassify)(tempBlock1, 16, &c); //STOP_TIMER("dc & minmax") if(t==1) RENAME(doVertLowPass)(tempBlock1, 16, &c); else if(t==2) RENAME(doVertDefFilter)(tempBlock1, 16, &c); }else if(mode & H_A_DEBLOCK){ RENAME(do_a_deblock)(tempBlock1, 16, 1, &c, mode); } RENAME(transpose2)(dstBlock-4, dstStride, tempBlock1 + 4*16); #else if(mode & H_X1_FILTER) horizX1Filter(dstBlock-4, stride, QP); else if(mode & H_DEBLOCK){ #if TEMPLATE_PP_ALTIVEC DECLARE_ALIGNED(16, unsigned char, tempBlock)[272]; int t; transpose_16x8_char_toPackedAlign_altivec(tempBlock, dstBlock - (4 + 1), stride); t = vertClassify_altivec(tempBlock-48, 16, &c); if(t==1) { doVertLowPass_altivec(tempBlock-48, 16, &c); transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride); } else if(t==2) { doVertDefFilter_altivec(tempBlock-48, 16, &c); transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride); } #else const int t= RENAME(horizClassify)(dstBlock-4, stride, &c); if(t==1) RENAME(doHorizLowPass)(dstBlock-4, stride, &c); else if(t==2) RENAME(doHorizDefFilter)(dstBlock-4, stride, &c); #endif }else if(mode & H_A_DEBLOCK){ RENAME(do_a_deblock)(dstBlock-8, 1, stride, &c, mode); } #endif //TEMPLATE_PP_MMX if(mode & DERING){ //FIXME filter first line if(y>0) RENAME(dering)(dstBlock - stride - 8, stride, &c); } if(mode & TEMP_NOISE_FILTER) { RENAME(tempNoiseReducer)(dstBlock-8, stride, c.tempBlurred[isColor] + y*dstStride + x, c.tempBlurredPast[isColor] + (y>>3)*256 + (x>>3) + 256, c.ppMode.maxTmpNoise); } } dstBlock+=8; srcBlock+=8; #if TEMPLATE_PP_MMX tmpXchg= tempBlock1; tempBlock1= tempBlock2; tempBlock2 = tmpXchg; #endif } if(mode & DERING){ if(y > 0) RENAME(dering)(dstBlock - dstStride - 8, dstStride, &c); } if((mode & TEMP_NOISE_FILTER)){ RENAME(tempNoiseReducer)(dstBlock-8, dstStride, c.tempBlurred[isColor] + y*dstStride + x, c.tempBlurredPast[isColor] + (y>>3)*256 + (x>>3) + 256, c.ppMode.maxTmpNoise); } /* did we use a tmp buffer for the last lines*/ if(y+15 >= height){ uint8_t *dstBlock= &(dst[y*dstStride]); if(width==FFABS(dstStride)) linecpy(dstBlock, tempDst + dstStride, height-y, dstStride); else{ int i; for(i=0; i<height-y; i++){ memcpy(dstBlock + i*dstStride, tempDst + (i+1)*dstStride, width); } } } /* for(x=0; x<width; x+=32){ volatile int i; i+= dstBlock[x + 7*dstStride] + dstBlock[x + 8*dstStride] + dstBlock[x + 9*dstStride] + dstBlock[x +10*dstStride] + dstBlock[x +11*dstStride] + dstBlock[x +12*dstStride]; + dstBlock[x +13*dstStride] + dstBlock[x +14*dstStride] + dstBlock[x +15*dstStride]; }*/ } #if TEMPLATE_PP_3DNOW __asm__ volatile("femms"); #elif TEMPLATE_PP_MMX __asm__ volatile("emms"); #endif #ifdef DEBUG_BRIGHTNESS if(!isColor){ int max=1; int i; for(i=0; i<256; i++) if(yHistogram[i] > max) max=yHistogram[i]; for(i=1; i<256; i++){ int x; int start=yHistogram[i-1]/(max/256+1); int end=yHistogram[i]/(max/256+1); int inc= end > start ? 1 : -1; for(x=start; x!=end+inc; x+=inc) dst[ i*dstStride + x]+=128; } for(i=0; i<100; i+=2){ dst[ (white)*dstStride + i]+=128; dst[ (black)*dstStride + i]+=128; } } #endif *c2= c; //copy local context back } | 27,294 |
1 | static char *choose_pix_fmts(OutputStream *ost) { if (ost->keep_pix_fmt) { if (ost->filter) avfilter_graph_set_auto_convert(ost->filter->graph->graph, AVFILTER_AUTO_CONVERT_NONE); if (ost->st->codec->pix_fmt == PIX_FMT_NONE) return NULL; return av_strdup(av_get_pix_fmt_name(ost->st->codec->pix_fmt)); } if (ost->st->codec->pix_fmt != PIX_FMT_NONE) { return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc, ost->st->codec->pix_fmt))); } else if (ost->enc->pix_fmts) { const enum PixelFormat *p; AVIOContext *s = NULL; uint8_t *ret; int len; if (avio_open_dyn_buf(&s) < 0) exit_program(1); p = ost->enc->pix_fmts; if (ost->st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) { if (ost->st->codec->codec_id == CODEC_ID_MJPEG) { p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE }; } else if (ost->st->codec->codec_id == CODEC_ID_LJPEG) { p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE }; } } for (; *p != PIX_FMT_NONE; p++) { const char *name = av_get_pix_fmt_name(*p); avio_printf(s, "%s:", name); } len = avio_close_dyn_buf(s, &ret); ret[len - 1] = 0; return ret; } else return NULL; } | 27,295 |
0 | static int xiph_handle_packet(AVFormatContext *ctx, PayloadContext *data, AVStream *st, AVPacket *pkt, uint32_t *timestamp, const uint8_t *buf, int len, uint16_t seq, int flags) { int ident, fragmented, tdt, num_pkts, pkt_len; if (!buf) { if (!data->split_buf || data->split_pos + 2 > data->split_buf_len || data->split_pkts <= 0) { av_log(ctx, AV_LOG_ERROR, "No more data to return\n"); return AVERROR_INVALIDDATA; } pkt_len = AV_RB16(data->split_buf + data->split_pos); data->split_pos += 2; if (data->split_pos + pkt_len > data->split_buf_len) { av_log(ctx, AV_LOG_ERROR, "Not enough data to return\n"); return AVERROR_INVALIDDATA; } if (av_new_packet(pkt, pkt_len)) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); return AVERROR(ENOMEM); } pkt->stream_index = st->index; memcpy(pkt->data, data->split_buf + data->split_pos, pkt_len); data->split_pos += pkt_len; data->split_pkts--; return data->split_pkts > 0; } if (len < 6 || len > INT_MAX/2) { av_log(ctx, AV_LOG_ERROR, "Invalid %d byte packet\n", len); return AVERROR_INVALIDDATA; } // read xiph rtp headers ident = AV_RB24(buf); fragmented = buf[3] >> 6; tdt = (buf[3] >> 4) & 3; num_pkts = buf[3] & 0xf; pkt_len = AV_RB16(buf + 4); if (pkt_len > len - 6) { av_log(ctx, AV_LOG_ERROR, "Invalid packet length %d in %d byte packet\n", pkt_len, len); return AVERROR_INVALIDDATA; } if (ident != data->ident) { av_log(ctx, AV_LOG_ERROR, "Unimplemented Xiph SDP configuration change detected\n"); return AVERROR_PATCHWELCOME; } if (tdt) { av_log(ctx, AV_LOG_ERROR, "Unimplemented RTP Xiph packet settings (%d,%d,%d)\n", fragmented, tdt, num_pkts); return AVERROR_PATCHWELCOME; } buf += 6; // move past header bits len -= 6; if (fragmented == 0) { if (av_new_packet(pkt, pkt_len)) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); return AVERROR(ENOMEM); } pkt->stream_index = st->index; memcpy(pkt->data, buf, pkt_len); buf += pkt_len; len -= pkt_len; num_pkts--; if (num_pkts > 0) { if (len > data->split_buf_size || !data->split_buf) { av_freep(&data->split_buf); data->split_buf_size = 2 * len; data->split_buf = av_malloc(data->split_buf_size); if (!data->split_buf) { av_log(ctx, AV_LOG_ERROR, "Out of memory.\n"); av_free_packet(pkt); return AVERROR(ENOMEM); } } memcpy(data->split_buf, buf, len); data->split_buf_len = len; data->split_pos = 0; data->split_pkts = num_pkts; return 1; } return 0; } else if (fragmented == 1) { // start of xiph data fragment int res; // end packet has been lost somewhere, so drop buffered data ffio_free_dyn_buf(&data->fragment); if((res = avio_open_dyn_buf(&data->fragment)) < 0) return res; avio_write(data->fragment, buf, pkt_len); data->timestamp = *timestamp; } else { av_assert1(fragmented < 4); if (data->timestamp != *timestamp) { // skip if fragmented timestamp is incorrect; // a start packet has been lost somewhere ffio_free_dyn_buf(&data->fragment); av_log(ctx, AV_LOG_ERROR, "RTP timestamps don't match!\n"); return AVERROR_INVALIDDATA; } if (!data->fragment) { av_log(ctx, AV_LOG_WARNING, "Received packet without a start fragment; dropping.\n"); return AVERROR(EAGAIN); } // copy data to fragment buffer avio_write(data->fragment, buf, pkt_len); if (fragmented == 3) { // end of xiph data packet int ret = ff_rtp_finalize_packet(pkt, &data->fragment, st->index); if (ret < 0) { av_log(ctx, AV_LOG_ERROR, "Error occurred when getting fragment buffer."); return ret; } return 0; } } return AVERROR(EAGAIN); } | 27,297 |
0 | static int check_image_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, const int linesizes[4]) { const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); int i; for (i = 0; i < 4; i++) { int plane = desc->comp[i].plane; if (!data[plane] || !linesizes[plane]) return 0; } return 1; } | 27,298 |
0 | static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) { MpegEncContext *s = &v->s; DSPContext *dsp = &v->s.dsp; uint8_t *srcY; int dxy, mx, my, src_x, src_y; int off; int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0; int v_edge_pos = s->v_edge_pos >> v->field_mode; if (!v->field_mode && !v->s.last_picture.f.data[0]) return; mx = s->mv[dir][n][0]; my = s->mv[dir][n][1]; if (!dir) { if (v->field_mode) { if ((v->cur_field_type != v->ref_field_type[dir]) && v->cur_field_type) srcY = s->current_picture.f.data[0]; else srcY = s->last_picture.f.data[0]; } else srcY = s->last_picture.f.data[0]; } else srcY = s->next_picture.f.data[0]; if (v->field_mode) { if (v->cur_field_type != v->ref_field_type[dir]) my = my - 2 + 4 * v->cur_field_type; } if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) { int same_count = 0, opp_count = 0, k; int chosen_mv[2][4][2], f; int tx, ty; for (k = 0; k < 4; k++) { f = v->mv_f[0][s->block_index[k] + v->blocks_off]; chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0]; chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1]; opp_count += f; same_count += 1 - f; } f = opp_count > same_count; switch (f ? opp_count : same_count) { case 4: tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0], chosen_mv[f][3][0]); ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1], chosen_mv[f][3][1]); break; case 3: tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]); ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]); break; case 2: tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2; ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2; break; } s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx; s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty; for (k = 0; k < 4; k++) v->mv_f[1][s->block_index[k] + v->blocks_off] = f; } if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture int qx, qy; int width = s->avctx->coded_width; int height = s->avctx->coded_height >> 1; qx = (s->mb_x * 16) + (mx >> 2); qy = (s->mb_y * 8) + (my >> 3); if (qx < -17) mx -= 4 * (qx + 17); else if (qx > width) mx -= 4 * (qx - width); if (qy < -18) my -= 8 * (qy + 18); else if (qy > height + 1) my -= 8 * (qy - height - 1); } if ((v->fcm == ILACE_FRAME) && fieldmv) off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8; else off = s->linesize * 4 * (n & 2) + (n & 1) * 8; if (v->field_mode && v->cur_field_type) off += s->current_picture_ptr->f.linesize[0]; src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2); if (!fieldmv) src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2); else src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2); if (v->profile != PROFILE_ADVANCED) { src_x = av_clip(src_x, -16, s->mb_width * 16); src_y = av_clip(src_y, -16, s->mb_height * 16); } else { src_x = av_clip(src_x, -17, s->avctx->coded_width); if (v->fcm == ILACE_FRAME) { if (src_y & 1) src_y = av_clip(src_y, -17, s->avctx->coded_height + 1); else src_y = av_clip(src_y, -18, s->avctx->coded_height); } else { src_y = av_clip(src_y, -18, s->avctx->coded_height + 1); } } srcY += src_y * s->linesize + src_x; if (v->field_mode && v->ref_field_type[dir]) srcY += s->current_picture_ptr->f.linesize[0]; if (fieldmv && !(src_y & 1)) v_edge_pos--; if (fieldmv && (src_y & 1) && src_y < 4) src_y--; if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) || s->h_edge_pos < 13 || v_edge_pos < 23 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) { srcY -= s->mspel * (1 + (s->linesize << fieldmv)); /* check emulate edge stride and offset */ s->dsp.emulated_edge_mc(s->edge_emu_buffer, srcY, s->linesize, 9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv, src_x - s->mspel, src_y - (s->mspel << fieldmv), s->h_edge_pos, v_edge_pos); srcY = s->edge_emu_buffer; /* if we deal with range reduction we need to scale source blocks */ if (v->rangeredfrm) { int i, j; uint8_t *src; src = srcY; for (j = 0; j < 9 + s->mspel * 2; j++) { for (i = 0; i < 9 + s->mspel * 2; i++) src[i] = ((src[i] - 128) >> 1) + 128; src += s->linesize << fieldmv; } } /* if we deal with intensity compensation we need to scale source blocks */ if (v->mv_mode == MV_PMODE_INTENSITY_COMP) { int i, j; uint8_t *src; src = srcY; for (j = 0; j < 9 + s->mspel * 2; j++) { for (i = 0; i < 9 + s->mspel * 2; i++) src[i] = v->luty[src[i]]; src += s->linesize << fieldmv; } } srcY += s->mspel * (1 + (s->linesize << fieldmv)); } if (s->mspel) { dxy = ((my & 3) << 2) | (mx & 3); v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd); } else { // hpel mc - always used for luma dxy = (my & 2) | ((mx & 2) >> 1); if (!v->rnd) dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); else dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); } } | 27,299 |
0 | int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, unsigned int bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; trace_blk_co_pwritev(blk, blk_bs(blk), offset, bytes, flags); ret = blk_check_byte_request(blk, offset, bytes); if (ret < 0) { return ret; } /* throttling disk I/O */ if (blk->public.throttle_state) { throttle_group_co_io_limits_intercept(blk, bytes, true); } if (!blk->enable_write_cache) { flags |= BDRV_REQ_FUA; } return bdrv_co_pwritev(blk_bs(blk), offset, bytes, qiov, flags); } | 27,300 |
0 | void monitor_protocol_event(MonitorEvent event, QObject *data) { QDict *qmp; const char *event_name; Monitor *mon; assert(event < QEVENT_MAX); switch (event) { case QEVENT_DEBUG: event_name = "DEBUG"; break; case QEVENT_SHUTDOWN: event_name = "SHUTDOWN"; break; case QEVENT_RESET: event_name = "RESET"; break; case QEVENT_POWERDOWN: event_name = "POWERDOWN"; break; case QEVENT_STOP: event_name = "STOP"; break; case QEVENT_VNC_CONNECTED: event_name = "VNC_CONNECTED"; break; case QEVENT_VNC_INITIALIZED: event_name = "VNC_INITIALIZED"; break; case QEVENT_VNC_DISCONNECTED: event_name = "VNC_DISCONNECTED"; break; default: abort(); break; } qmp = qdict_new(); timestamp_put(qmp); qdict_put(qmp, "event", qstring_from_str(event_name)); if (data) { qobject_incref(data); qdict_put_obj(qmp, "data", data); } QLIST_FOREACH(mon, &mon_list, entry) { if (!monitor_ctrl_mode(mon)) return; monitor_json_emitter(mon, QOBJECT(qmp)); } QDECREF(qmp); } | 27,301 |
0 | static int ppce500_prep_device_tree(MachineState *machine, PPCE500Params *params, hwaddr addr, hwaddr initrd_base, hwaddr initrd_size) { DeviceTreeParams *p = g_new(DeviceTreeParams, 1); p->machine = machine; p->params = *params; p->addr = addr; p->initrd_base = initrd_base; p->initrd_size = initrd_size; qemu_register_reset(ppce500_reset_device_tree, p); /* Issue the device tree loader once, so that we get the size of the blob */ return ppce500_load_device_tree(machine, params, addr, initrd_base, initrd_size, true); } | 27,302 |
0 | int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_ioctl) return drv->bdrv_ioctl(bs, req, buf); return -ENOTSUP; } | 27,303 |
0 | void tb_invalidate_phys_addr(target_phys_addr_t addr) { ram_addr_t ram_addr; MemoryRegionSection *section; section = phys_page_find(addr >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || (section->mr->rom_device && section->mr->readable))) { return; } ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) + memory_region_section_addr(section, addr); tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); } | 27,304 |
0 | static void scsi_generic_realize(SCSIDevice *s, Error **errp) { int rc; int sg_version; struct sg_scsi_id scsiid; if (!s->conf.bs) { error_setg(errp, "drive property not set"); return; } if (bdrv_get_on_error(s->conf.bs, 0) != BLOCKDEV_ON_ERROR_ENOSPC) { error_setg(errp, "Device doesn't support drive option werror"); return; } if (bdrv_get_on_error(s->conf.bs, 1) != BLOCKDEV_ON_ERROR_REPORT) { error_setg(errp, "Device doesn't support drive option rerror"); return; } /* check we are using a driver managing SG_IO (version 3 and after */ rc = bdrv_ioctl(s->conf.bs, SG_GET_VERSION_NUM, &sg_version); if (rc < 0) { error_setg(errp, "cannot get SG_IO version number: %s. " "Is this a SCSI device?", strerror(-rc)); return; } if (sg_version < 30000) { error_setg(errp, "scsi generic interface too old"); return; } /* get LUN of the /dev/sg? */ if (bdrv_ioctl(s->conf.bs, SG_GET_SCSI_ID, &scsiid)) { error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); return; } /* define device state */ s->type = scsiid.scsi_type; DPRINTF("device type %d\n", s->type); switch (s->type) { case TYPE_TAPE: s->blocksize = get_stream_blocksize(s->conf.bs); if (s->blocksize == -1) { s->blocksize = 0; } break; /* Make a guess for block devices, we'll fix it when the guest sends. * READ CAPACITY. If they don't, they likely would assume these sizes * anyway. (TODO: they could also send MODE SENSE). */ case TYPE_ROM: case TYPE_WORM: s->blocksize = 2048; break; default: s->blocksize = 512; break; } DPRINTF("block size %d\n", s->blocksize); } | 27,305 |
0 | static void pool_release_buffer(void *opaque, uint8_t *data) { BufferPoolEntry *buf = opaque; AVBufferPool *pool = buf->pool; if(CONFIG_MEMORY_POISONING) memset(buf->data, 0x2a, pool->size); add_to_pool(buf); if (!avpriv_atomic_int_add_and_fetch(&pool->refcount, -1)) buffer_pool_free(pool); } | 27,306 |
0 | static void dec_load(DisasContext *dc) { TCGv t, *addr; unsigned int size; size = 1 << (dc->opcode & 3); LOG_DIS("l %x %d\n", dc->opcode, size); t_sync_flags(dc); addr = compute_ldst_addr(dc, &t); /* If we get a fault on a dslot, the jmpstate better be in sync. */ sync_jmpstate(dc); /* Verify alignment if needed. */ if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) { gen_helper_memalign(*addr, tcg_const_tl(dc->rd), tcg_const_tl(0), tcg_const_tl(size)); } if (dc->rd) { gen_load(dc, cpu_R[dc->rd], *addr, size); } else { gen_load(dc, env_imm, *addr, size); } if (addr == &t) tcg_temp_free(t); } | 27,307 |
0 | void ich9_lpc_pm_init(PCIDevice *lpc_pci, bool smm_enabled) { ICH9LPCState *lpc = ICH9_LPC_DEVICE(lpc_pci); qemu_irq sci_irq; sci_irq = qemu_allocate_irq(ich9_set_sci, lpc, 0); ich9_pm_init(lpc_pci, &lpc->pm, smm_enabled, sci_irq); ich9_lpc_reset(&lpc->d.qdev); } | 27,308 |
0 | uint64_t helper_fctiw (uint64_t arg) { CPU_DoubleU farg; farg.ll = arg; if (unlikely(float64_is_signaling_nan(farg.d))) { /* sNaN conversion */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI); } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) { /* qNan / infinity conversion */ farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI); } else { farg.ll = float64_to_int32(farg.d, &env->fp_status); #if USE_PRECISE_EMULATION /* XXX: higher bits are not supposed to be significant. * to make tests easier, return the same as a real PowerPC 750 */ farg.ll |= 0xFFF80000ULL << 32; #endif } return farg.ll; } | 27,309 |
0 | static int ppc_hash64_pte_prot(PowerPCCPU *cpu, ppc_slb_t *slb, ppc_hash_pte64_t pte) { CPUPPCState *env = &cpu->env; unsigned pp, key; /* Some pp bit combinations have undefined behaviour, so default * to no access in those cases */ int prot = 0; key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) : (slb->vsid & SLB_VSID_KS)); pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); if (key == 0) { switch (pp) { case 0x0: case 0x1: case 0x2: prot = PAGE_READ | PAGE_WRITE; break; case 0x3: case 0x6: prot = PAGE_READ; break; } } else { switch (pp) { case 0x0: case 0x6: prot = 0; break; case 0x1: case 0x3: prot = PAGE_READ; break; case 0x2: prot = PAGE_READ | PAGE_WRITE; break; } } /* No execute if either noexec or guarded bits set */ if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) || (slb->vsid & SLB_VSID_N)) { prot |= PAGE_EXEC; } return prot; } | 27,310 |
0 | static void exynos4210_mct_write(void *opaque, target_phys_addr_t offset, uint64_t value, unsigned size) { Exynos4210MCTState *s = (Exynos4210MCTState *)opaque; int index; /* index in buffer which represents register set */ int shift; int lt_i; uint64_t new_frc; uint32_t i; uint32_t old_val; #ifdef DEBUG_MCT static uint32_t icntb_max[2] = {0}; static uint32_t icntb_min[2] = {UINT32_MAX, UINT32_MAX}; static uint32_t tcntb_max[2] = {0}; static uint32_t tcntb_min[2] = {UINT32_MAX, UINT32_MAX}; #endif new_frc = s->g_timer.reg.cnt; switch (offset) { case MCT_CFG: s->reg_mct_cfg = value; exynos4210_mct_update_freq(s); break; case G_CNT_L: case G_CNT_U: if (offset == G_CNT_L) { DPRINTF("global timer write to reg.cntl %llx\n", value); new_frc = (s->g_timer.reg.cnt & (uint64_t)UINT32_MAX << 32) + value; s->g_timer.reg.cnt_wstat |= G_CNT_WSTAT_L; } if (offset == G_CNT_U) { DPRINTF("global timer write to reg.cntu %llx\n", value); new_frc = (s->g_timer.reg.cnt & UINT32_MAX) + ((uint64_t)value << 32); s->g_timer.reg.cnt_wstat |= G_CNT_WSTAT_U; } s->g_timer.reg.cnt = new_frc; exynos4210_gfrc_restart(s); break; case G_CNT_WSTAT: s->g_timer.reg.cnt_wstat &= ~(value); break; case G_COMP_L(0): case G_COMP_L(1): case G_COMP_L(2): case G_COMP_L(3): case G_COMP_U(0): case G_COMP_U(1): case G_COMP_U(2): case G_COMP_U(3): index = GET_G_COMP_IDX(offset); shift = 8 * (offset & 0x4); s->g_timer.reg.comp[index] = (s->g_timer.reg.comp[index] & (((uint64_t)UINT32_MAX << 32) >> shift)) + (value << shift); DPRINTF("comparator %d write 0x%llx val << %d\n", index, value, shift); if (offset&0x4) { s->g_timer.reg.wstat |= G_WSTAT_COMP_U(index); } else { s->g_timer.reg.wstat |= G_WSTAT_COMP_L(index); } exynos4210_gfrc_restart(s); break; case G_TCON: old_val = s->g_timer.reg.tcon; s->g_timer.reg.tcon = value; s->g_timer.reg.wstat |= G_WSTAT_TCON_WRITE; DPRINTF("global timer write to reg.g_tcon %llx\n", value); /* Start FRC if transition from disabled to enabled */ if ((value & G_TCON_TIMER_ENABLE) > (old_val & G_TCON_TIMER_ENABLE)) { exynos4210_gfrc_start(&s->g_timer); } if ((value & G_TCON_TIMER_ENABLE) < (old_val & G_TCON_TIMER_ENABLE)) { exynos4210_gfrc_stop(&s->g_timer); } /* Start CMP if transition from disabled to enabled */ for (i = 0; i < MCT_GT_CMP_NUM; i++) { if ((value & G_TCON_COMP_ENABLE(i)) != (old_val & G_TCON_COMP_ENABLE(i))) { exynos4210_gfrc_restart(s); } } break; case G_INT_CSTAT: s->g_timer.reg.int_cstat &= ~(value); for (i = 0; i < MCT_GT_CMP_NUM; i++) { if (value & G_INT_CSTAT_COMP(i)) { exynos4210_gcomp_lower_irq(&s->g_timer, i); } } break; case G_INT_ENB: /* Raise IRQ if transition from disabled to enabled and CSTAT pending */ for (i = 0; i < MCT_GT_CMP_NUM; i++) { if ((value & G_INT_ENABLE(i)) > (s->g_timer.reg.tcon & G_INT_ENABLE(i))) { if (s->g_timer.reg.int_cstat & G_INT_CSTAT_COMP(i)) { exynos4210_gcomp_raise_irq(&s->g_timer, i); } } if ((value & G_INT_ENABLE(i)) < (s->g_timer.reg.tcon & G_INT_ENABLE(i))) { exynos4210_gcomp_lower_irq(&s->g_timer, i); } } DPRINTF("global timer INT enable %llx\n", value); s->g_timer.reg.int_enb = value; break; case G_WSTAT: s->g_timer.reg.wstat &= ~(value); break; case G_COMP0_ADD_INCR: case G_COMP1_ADD_INCR: case G_COMP2_ADD_INCR: case G_COMP3_ADD_INCR: index = GET_G_COMP_ADD_INCR_IDX(offset); s->g_timer.reg.comp_add_incr[index] = value; s->g_timer.reg.wstat |= G_WSTAT_COMP_ADDINCR(index); break; /* Local timers */ case L0_TCON: case L1_TCON: lt_i = GET_L_TIMER_IDX(offset); old_val = s->l_timer[lt_i].reg.tcon; s->l_timer[lt_i].reg.wstat |= L_WSTAT_TCON_WRITE; s->l_timer[lt_i].reg.tcon = value; /* Stop local CNT */ if ((value & L_TCON_TICK_START) < (old_val & L_TCON_TICK_START)) { DPRINTF("local timer[%d] stop cnt\n", lt_i); exynos4210_ltick_cnt_stop(&s->l_timer[lt_i].tick_timer); } /* Stop local INT */ if ((value & L_TCON_INT_START) < (old_val & L_TCON_INT_START)) { DPRINTF("local timer[%d] stop int\n", lt_i); exynos4210_ltick_int_stop(&s->l_timer[lt_i].tick_timer); } /* Start local CNT */ if ((value & L_TCON_TICK_START) > (old_val & L_TCON_TICK_START)) { DPRINTF("local timer[%d] start cnt\n", lt_i); exynos4210_ltick_cnt_start(&s->l_timer[lt_i].tick_timer); } /* Start local INT */ if ((value & L_TCON_INT_START) > (old_val & L_TCON_INT_START)) { DPRINTF("local timer[%d] start int\n", lt_i); exynos4210_ltick_int_start(&s->l_timer[lt_i].tick_timer); } /* Start or Stop local FRC if TCON changed */ if ((value & L_TCON_FRC_START) > (s->l_timer[lt_i].reg.tcon & L_TCON_FRC_START)) { DPRINTF("local timer[%d] start frc\n", lt_i); exynos4210_lfrc_start(&s->l_timer[lt_i]); } if ((value & L_TCON_FRC_START) < (s->l_timer[lt_i].reg.tcon & L_TCON_FRC_START)) { DPRINTF("local timer[%d] stop frc\n", lt_i); exynos4210_lfrc_stop(&s->l_timer[lt_i]); } break; case L0_TCNTB: case L1_TCNTB: lt_i = GET_L_TIMER_IDX(offset); index = GET_L_TIMER_CNT_REG_IDX(offset, lt_i); /* * TCNTB is updated to internal register only after CNT expired. * Due to this we should reload timer to nearest moment when CNT is * expired and then in event handler update tcntb to new TCNTB value. */ exynos4210_ltick_set_cntb(&s->l_timer[lt_i].tick_timer, value, s->l_timer[lt_i].tick_timer.icntb); s->l_timer[lt_i].reg.wstat |= L_WSTAT_TCNTB_WRITE; s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB] = value; #ifdef DEBUG_MCT if (tcntb_min[lt_i] > value) { tcntb_min[lt_i] = value; } if (tcntb_max[lt_i] < value) { tcntb_max[lt_i] = value; } DPRINTF("local timer[%d] TCNTB write %llx; max=%x, min=%x\n", lt_i, value, tcntb_max[lt_i], tcntb_min[lt_i]); #endif break; case L0_ICNTB: case L1_ICNTB: lt_i = GET_L_TIMER_IDX(offset); index = GET_L_TIMER_CNT_REG_IDX(offset, lt_i); s->l_timer[lt_i].reg.wstat |= L_WSTAT_ICNTB_WRITE; s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] = value & ~L_ICNTB_MANUAL_UPDATE; /* * We need to avoid too small values for TCNTB*ICNTB. If not, IRQ event * could raise too fast disallowing QEMU to execute target code. */ if (s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] * s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB] < MCT_LT_CNT_LOW_LIMIT) { if (!s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB]) { s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] = MCT_LT_CNT_LOW_LIMIT; } else { s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB] = MCT_LT_CNT_LOW_LIMIT / s->l_timer[lt_i].reg.cnt[L_REG_CNT_TCNTB]; } } if (value & L_ICNTB_MANUAL_UPDATE) { exynos4210_ltick_set_cntb(&s->l_timer[lt_i].tick_timer, s->l_timer[lt_i].tick_timer.tcntb, s->l_timer[lt_i].reg.cnt[L_REG_CNT_ICNTB]); } #ifdef DEBUG_MCT if (icntb_min[lt_i] > value) { icntb_min[lt_i] = value; } if (icntb_max[lt_i] < value) { icntb_max[lt_i] = value; } DPRINTF("local timer[%d] ICNTB write %llx; max=%x, min=%x\n\n", lt_i, value, icntb_max[lt_i], icntb_min[lt_i]); #endif break; case L0_FRCNTB: case L1_FRCNTB: lt_i = GET_L_TIMER_IDX(offset); index = GET_L_TIMER_CNT_REG_IDX(offset, lt_i); DPRINTF("local timer[%d] FRCNTB write %llx\n", lt_i, value); s->l_timer[lt_i].reg.wstat |= L_WSTAT_FRCCNTB_WRITE; s->l_timer[lt_i].reg.cnt[L_REG_CNT_FRCCNTB] = value; break; case L0_TCNTO: case L1_TCNTO: case L0_ICNTO: case L1_ICNTO: case L0_FRCNTO: case L1_FRCNTO: fprintf(stderr, "\n[exynos4210.mct: write to RO register " TARGET_FMT_plx "]\n\n", offset); break; case L0_INT_CSTAT: case L1_INT_CSTAT: lt_i = GET_L_TIMER_IDX(offset); DPRINTF("local timer[%d] CSTAT write %llx\n", lt_i, value); s->l_timer[lt_i].reg.int_cstat &= ~value; if (!s->l_timer[lt_i].reg.int_cstat) { qemu_irq_lower(s->l_timer[lt_i].irq); } break; case L0_INT_ENB: case L1_INT_ENB: lt_i = GET_L_TIMER_IDX(offset); old_val = s->l_timer[lt_i].reg.int_enb; /* Raise Local timer IRQ if cstat is pending */ if ((value & L_INT_INTENB_ICNTEIE) > (old_val & L_INT_INTENB_ICNTEIE)) { if (s->l_timer[lt_i].reg.int_cstat & L_INT_CSTAT_INTCNT) { qemu_irq_raise(s->l_timer[lt_i].irq); } } s->l_timer[lt_i].reg.int_enb = value; break; case L0_WSTAT: case L1_WSTAT: lt_i = GET_L_TIMER_IDX(offset); s->l_timer[lt_i].reg.wstat &= ~value; break; default: hw_error("exynos4210.mct: bad write offset " TARGET_FMT_plx "\n", offset); break; } } | 27,312 |
0 | static int no_init_in (HWVoiceIn *hw, audsettings_t *as) { audio_pcm_init_info (&hw->info, as); hw->samples = 1024; return 0; } | 27,313 |
0 | uint32_t HELPER(stfle)(CPUS390XState *env, uint64_t addr) { uint64_t words[MAX_STFL_WORDS]; unsigned count_m1 = env->regs[0] & 0xff; unsigned max_m1 = do_stfle(env, words); unsigned i; for (i = 0; i <= count_m1; ++i) { cpu_stq_data(env, addr + 8 * i, words[i]); } env->regs[0] = deposit64(env->regs[0], 0, 8, max_m1); return (count_m1 >= max_m1 ? 0 : 3); } | 27,314 |
0 | static void pxa2xx_fir_write(void *opaque, hwaddr addr, uint64_t value64, unsigned size) { PXA2xxFIrState *s = (PXA2xxFIrState *) opaque; uint32_t value = value64; uint8_t ch; switch (addr) { case ICCR0: s->control[0] = value; if (!(value & (1 << 4))) /* RXE */ s->rx_len = s->rx_start = 0; if (!(value & (1 << 3))) { /* TXE */ /* Nop */ } s->enable = value & 1; /* ITR */ if (!s->enable) s->status[0] = 0; pxa2xx_fir_update(s); break; case ICCR1: s->control[1] = value; break; case ICCR2: s->control[2] = value & 0x3f; pxa2xx_fir_update(s); break; case ICDR: if (s->control[2] & (1 << 2)) { /* TXP */ ch = value; } else { ch = ~value; } if (s->enable && (s->control[0] & (1 << 3))) { /* TXE */ /* XXX this blocks entire thread. Rewrite to use * qemu_chr_fe_write and background I/O callbacks */ qemu_chr_fe_write_all(&s->chr, &ch, 1); } break; case ICSR0: s->status[0] &= ~(value & 0x66); pxa2xx_fir_update(s); break; case ICFOR: break; default: printf("%s: Bad register " REG_FMT "\n", __FUNCTION__, addr); } } | 27,315 |
0 | static void disas_thumb_insn(CPUARMState *env, DisasContext *s) { uint32_t val, insn, op, rm, rn, rd, shift, cond; int32_t offset; int i; TCGv_i32 tmp; TCGv_i32 tmp2; TCGv_i32 addr; if (s->condexec_mask) { cond = s->condexec_cond; if (cond != 0x0e) { /* Skip conditional when condition is AL. */ s->condlabel = gen_new_label(); gen_test_cc(cond ^ 1, s->condlabel); s->condjmp = 1; } } insn = arm_lduw_code(env, s->pc, s->bswap_code); s->pc += 2; switch (insn >> 12) { case 0: case 1: rd = insn & 7; op = (insn >> 11) & 3; if (op == 3) { /* add/subtract */ rn = (insn >> 3) & 7; tmp = load_reg(s, rn); if (insn & (1 << 10)) { /* immediate */ tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, (insn >> 6) & 7); } else { /* reg */ rm = (insn >> 6) & 7; tmp2 = load_reg(s, rm); } if (insn & (1 << 9)) { if (s->condexec_mask) tcg_gen_sub_i32(tmp, tmp, tmp2); else gen_sub_CC(tmp, tmp, tmp2); } else { if (s->condexec_mask) tcg_gen_add_i32(tmp, tmp, tmp2); else gen_add_CC(tmp, tmp, tmp2); } tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); } else { /* shift immediate */ rm = (insn >> 3) & 7; shift = (insn >> 6) & 0x1f; tmp = load_reg(s, rm); gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0); if (!s->condexec_mask) gen_logic_CC(tmp); store_reg(s, rd, tmp); } break; case 2: case 3: /* arithmetic large immediate */ op = (insn >> 11) & 3; rd = (insn >> 8) & 0x7; if (op == 0) { /* mov */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, insn & 0xff); if (!s->condexec_mask) gen_logic_CC(tmp); store_reg(s, rd, tmp); } else { tmp = load_reg(s, rd); tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, insn & 0xff); switch (op) { case 1: /* cmp */ gen_sub_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); break; case 2: /* add */ if (s->condexec_mask) tcg_gen_add_i32(tmp, tmp, tmp2); else gen_add_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 3: /* sub */ if (s->condexec_mask) tcg_gen_sub_i32(tmp, tmp, tmp2); else gen_sub_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; } } break; case 4: if (insn & (1 << 11)) { rd = (insn >> 8) & 7; /* load pc-relative. Bit 1 of PC is ignored. */ val = s->pc + 2 + ((insn & 0xff) * 4); val &= ~(uint32_t)2; addr = tcg_temp_new_i32(); tcg_gen_movi_i32(addr, val); tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, IS_USER(s)); tcg_temp_free_i32(addr); store_reg(s, rd, tmp); break; } if (insn & (1 << 10)) { /* data processing extended or blx */ rd = (insn & 7) | ((insn >> 4) & 8); rm = (insn >> 3) & 0xf; op = (insn >> 8) & 3; switch (op) { case 0: /* add */ tmp = load_reg(s, rd); tmp2 = load_reg(s, rm); tcg_gen_add_i32(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); store_reg(s, rd, tmp); break; case 1: /* cmp */ tmp = load_reg(s, rd); tmp2 = load_reg(s, rm); gen_sub_CC(tmp, tmp, tmp2); tcg_temp_free_i32(tmp2); tcg_temp_free_i32(tmp); break; case 2: /* mov/cpy */ tmp = load_reg(s, rm); store_reg(s, rd, tmp); break; case 3:/* branch [and link] exchange thumb register */ tmp = load_reg(s, rm); if (insn & (1 << 7)) { ARCH(5); val = (uint32_t)s->pc | 1; tmp2 = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp2, val); store_reg(s, 14, tmp2); } /* already thumb, no need to check */ gen_bx(s, tmp); break; } break; } /* data processing register */ rd = insn & 7; rm = (insn >> 3) & 7; op = (insn >> 6) & 0xf; if (op == 2 || op == 3 || op == 4 || op == 7) { /* the shift/rotate ops want the operands backwards */ val = rm; rm = rd; rd = val; val = 1; } else { val = 0; } if (op == 9) { /* neg */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); } else if (op != 0xf) { /* mvn doesn't read its first operand */ tmp = load_reg(s, rd); } else { TCGV_UNUSED_I32(tmp); } tmp2 = load_reg(s, rm); switch (op) { case 0x0: /* and */ tcg_gen_and_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0x1: /* eor */ tcg_gen_xor_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0x2: /* lsl */ if (s->condexec_mask) { gen_shl(tmp2, tmp2, tmp); } else { gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x3: /* lsr */ if (s->condexec_mask) { gen_shr(tmp2, tmp2, tmp); } else { gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x4: /* asr */ if (s->condexec_mask) { gen_sar(tmp2, tmp2, tmp); } else { gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x5: /* adc */ if (s->condexec_mask) { gen_adc(tmp, tmp2); } else { gen_adc_CC(tmp, tmp, tmp2); } break; case 0x6: /* sbc */ if (s->condexec_mask) { gen_sub_carry(tmp, tmp, tmp2); } else { gen_sbc_CC(tmp, tmp, tmp2); } break; case 0x7: /* ror */ if (s->condexec_mask) { tcg_gen_andi_i32(tmp, tmp, 0x1f); tcg_gen_rotr_i32(tmp2, tmp2, tmp); } else { gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp); gen_logic_CC(tmp2); } break; case 0x8: /* tst */ tcg_gen_and_i32(tmp, tmp, tmp2); gen_logic_CC(tmp); rd = 16; break; case 0x9: /* neg */ if (s->condexec_mask) tcg_gen_neg_i32(tmp, tmp2); else gen_sub_CC(tmp, tmp, tmp2); break; case 0xa: /* cmp */ gen_sub_CC(tmp, tmp, tmp2); rd = 16; break; case 0xb: /* cmn */ gen_add_CC(tmp, tmp, tmp2); rd = 16; break; case 0xc: /* orr */ tcg_gen_or_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0xd: /* mul */ tcg_gen_mul_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0xe: /* bic */ tcg_gen_andc_i32(tmp, tmp, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp); break; case 0xf: /* mvn */ tcg_gen_not_i32(tmp2, tmp2); if (!s->condexec_mask) gen_logic_CC(tmp2); val = 1; rm = rd; break; } if (rd != 16) { if (val) { store_reg(s, rm, tmp2); if (op != 0xf) tcg_temp_free_i32(tmp); } else { store_reg(s, rd, tmp); tcg_temp_free_i32(tmp2); } } else { tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp2); } break; case 5: /* load/store register offset. */ rd = insn & 7; rn = (insn >> 3) & 7; rm = (insn >> 6) & 7; op = (insn >> 9) & 7; addr = load_reg(s, rn); tmp = load_reg(s, rm); tcg_gen_add_i32(addr, addr, tmp); tcg_temp_free_i32(tmp); if (op < 3) { /* store */ tmp = load_reg(s, rd); } else { tmp = tcg_temp_new_i32(); } switch (op) { case 0: /* str */ gen_aa32_st32(tmp, addr, IS_USER(s)); break; case 1: /* strh */ gen_aa32_st16(tmp, addr, IS_USER(s)); break; case 2: /* strb */ gen_aa32_st8(tmp, addr, IS_USER(s)); break; case 3: /* ldrsb */ gen_aa32_ld8s(tmp, addr, IS_USER(s)); break; case 4: /* ldr */ gen_aa32_ld32u(tmp, addr, IS_USER(s)); break; case 5: /* ldrh */ gen_aa32_ld16u(tmp, addr, IS_USER(s)); break; case 6: /* ldrb */ gen_aa32_ld8u(tmp, addr, IS_USER(s)); break; case 7: /* ldrsh */ gen_aa32_ld16s(tmp, addr, IS_USER(s)); break; } if (op >= 3) { /* load */ store_reg(s, rd, tmp); } else { tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 6: /* load/store word immediate offset */ rd = insn & 7; rn = (insn >> 3) & 7; addr = load_reg(s, rn); val = (insn >> 4) & 0x7c; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, IS_USER(s)); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st32(tmp, addr, IS_USER(s)); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 7: /* load/store byte immediate offset */ rd = insn & 7; rn = (insn >> 3) & 7; addr = load_reg(s, rn); val = (insn >> 6) & 0x1f; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld8u(tmp, addr, IS_USER(s)); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st8(tmp, addr, IS_USER(s)); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 8: /* load/store halfword immediate offset */ rd = insn & 7; rn = (insn >> 3) & 7; addr = load_reg(s, rn); val = (insn >> 5) & 0x3e; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld16u(tmp, addr, IS_USER(s)); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st16(tmp, addr, IS_USER(s)); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 9: /* load/store from stack */ rd = (insn >> 8) & 7; addr = load_reg(s, 13); val = (insn & 0xff) * 4; tcg_gen_addi_i32(addr, addr, val); if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, IS_USER(s)); store_reg(s, rd, tmp); } else { /* store */ tmp = load_reg(s, rd); gen_aa32_st32(tmp, addr, IS_USER(s)); tcg_temp_free_i32(tmp); } tcg_temp_free_i32(addr); break; case 10: /* add to high reg */ rd = (insn >> 8) & 7; if (insn & (1 << 11)) { /* SP */ tmp = load_reg(s, 13); } else { /* PC. bit 1 is ignored. */ tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2); } val = (insn & 0xff) * 4; tcg_gen_addi_i32(tmp, tmp, val); store_reg(s, rd, tmp); break; case 11: /* misc */ op = (insn >> 8) & 0xf; switch (op) { case 0: /* adjust stack pointer */ tmp = load_reg(s, 13); val = (insn & 0x7f) * 4; if (insn & (1 << 7)) val = -(int32_t)val; tcg_gen_addi_i32(tmp, tmp, val); store_reg(s, 13, tmp); break; case 2: /* sign/zero extend. */ ARCH(6); rd = insn & 7; rm = (insn >> 3) & 7; tmp = load_reg(s, rm); switch ((insn >> 6) & 3) { case 0: gen_sxth(tmp); break; case 1: gen_sxtb(tmp); break; case 2: gen_uxth(tmp); break; case 3: gen_uxtb(tmp); break; } store_reg(s, rd, tmp); break; case 4: case 5: case 0xc: case 0xd: /* push/pop */ addr = load_reg(s, 13); if (insn & (1 << 8)) offset = 4; else offset = 0; for (i = 0; i < 8; i++) { if (insn & (1 << i)) offset += 4; } if ((insn & (1 << 11)) == 0) { tcg_gen_addi_i32(addr, addr, -offset); } for (i = 0; i < 8; i++) { if (insn & (1 << i)) { if (insn & (1 << 11)) { /* pop */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, IS_USER(s)); store_reg(s, i, tmp); } else { /* push */ tmp = load_reg(s, i); gen_aa32_st32(tmp, addr, IS_USER(s)); tcg_temp_free_i32(tmp); } /* advance to the next address. */ tcg_gen_addi_i32(addr, addr, 4); } } TCGV_UNUSED_I32(tmp); if (insn & (1 << 8)) { if (insn & (1 << 11)) { /* pop pc */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, IS_USER(s)); /* don't set the pc until the rest of the instruction has completed */ } else { /* push lr */ tmp = load_reg(s, 14); gen_aa32_st32(tmp, addr, IS_USER(s)); tcg_temp_free_i32(tmp); } tcg_gen_addi_i32(addr, addr, 4); } if ((insn & (1 << 11)) == 0) { tcg_gen_addi_i32(addr, addr, -offset); } /* write back the new stack pointer */ store_reg(s, 13, addr); /* set the new PC value */ if ((insn & 0x0900) == 0x0900) { store_reg_from_load(env, s, 15, tmp); } break; case 1: case 3: case 9: case 11: /* czb */ rm = insn & 7; tmp = load_reg(s, rm); s->condlabel = gen_new_label(); s->condjmp = 1; if (insn & (1 << 11)) tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel); else tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel); tcg_temp_free_i32(tmp); offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3; val = (uint32_t)s->pc + 2; val += offset; gen_jmp(s, val); break; case 15: /* IT, nop-hint. */ if ((insn & 0xf) == 0) { gen_nop_hint(s, (insn >> 4) & 0xf); break; } /* If Then. */ s->condexec_cond = (insn >> 4) & 0xe; s->condexec_mask = insn & 0x1f; /* No actual code generated for this insn, just setup state. */ break; case 0xe: /* bkpt */ ARCH(5); gen_exception_insn(s, 2, EXCP_BKPT); break; case 0xa: /* rev */ ARCH(6); rn = (insn >> 3) & 0x7; rd = insn & 0x7; tmp = load_reg(s, rn); switch ((insn >> 6) & 3) { case 0: tcg_gen_bswap32_i32(tmp, tmp); break; case 1: gen_rev16(tmp); break; case 3: gen_revsh(tmp); break; default: goto illegal_op; } store_reg(s, rd, tmp); break; case 6: switch ((insn >> 5) & 7) { case 2: /* setend */ ARCH(6); if (((insn >> 3) & 1) != s->bswap_code) { /* Dynamic endianness switching not implemented. */ qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n"); goto illegal_op; } break; case 3: /* cps */ ARCH(6); if (IS_USER(s)) { break; } if (IS_M(env)) { tmp = tcg_const_i32((insn & (1 << 4)) != 0); /* FAULTMASK */ if (insn & 1) { addr = tcg_const_i32(19); gen_helper_v7m_msr(cpu_env, addr, tmp); tcg_temp_free_i32(addr); } /* PRIMASK */ if (insn & 2) { addr = tcg_const_i32(16); gen_helper_v7m_msr(cpu_env, addr, tmp); tcg_temp_free_i32(addr); } tcg_temp_free_i32(tmp); gen_lookup_tb(s); } else { if (insn & (1 << 4)) { shift = CPSR_A | CPSR_I | CPSR_F; } else { shift = 0; } gen_set_psr_im(s, ((insn & 7) << 6), 0, shift); } break; default: goto undef; } break; default: goto undef; } break; case 12: { /* load/store multiple */ TCGv_i32 loaded_var; TCGV_UNUSED_I32(loaded_var); rn = (insn >> 8) & 0x7; addr = load_reg(s, rn); for (i = 0; i < 8; i++) { if (insn & (1 << i)) { if (insn & (1 << 11)) { /* load */ tmp = tcg_temp_new_i32(); gen_aa32_ld32u(tmp, addr, IS_USER(s)); if (i == rn) { loaded_var = tmp; } else { store_reg(s, i, tmp); } } else { /* store */ tmp = load_reg(s, i); gen_aa32_st32(tmp, addr, IS_USER(s)); tcg_temp_free_i32(tmp); } /* advance to the next address */ tcg_gen_addi_i32(addr, addr, 4); } } if ((insn & (1 << rn)) == 0) { /* base reg not in list: base register writeback */ store_reg(s, rn, addr); } else { /* base reg in list: if load, complete it now */ if (insn & (1 << 11)) { store_reg(s, rn, loaded_var); } tcg_temp_free_i32(addr); } break; } case 13: /* conditional branch or swi */ cond = (insn >> 8) & 0xf; if (cond == 0xe) goto undef; if (cond == 0xf) { /* swi */ gen_set_pc_im(s, s->pc); s->is_jmp = DISAS_SWI; break; } /* generate a conditional jump to next instruction */ s->condlabel = gen_new_label(); gen_test_cc(cond ^ 1, s->condlabel); s->condjmp = 1; /* jump to the offset */ val = (uint32_t)s->pc + 2; offset = ((int32_t)insn << 24) >> 24; val += offset << 1; gen_jmp(s, val); break; case 14: if (insn & (1 << 11)) { if (disas_thumb2_insn(env, s, insn)) goto undef32; break; } /* unconditional branch */ val = (uint32_t)s->pc; offset = ((int32_t)insn << 21) >> 21; val += (offset << 1) + 2; gen_jmp(s, val); break; case 15: if (disas_thumb2_insn(env, s, insn)) goto undef32; break; } return; undef32: gen_exception_insn(s, 4, EXCP_UDEF); return; illegal_op: undef: gen_exception_insn(s, 2, EXCP_UDEF); } | 27,316 |