mirror of
https://github.com/mii443/qemu.git
synced 2025-12-03 11:08:25 +00:00
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
Block layer patches - Protect BlockBackend.queued_requests with its own lock - Switch to AIO_WAIT_WHILE_UNLOCKED() where possible - AioContext removal: LinuxAioState/LuringState/ThreadPool - Add more coroutine_fn annotations, use bdrv/blk_co_* - Fix crash when execute hmp_commit # -----BEGIN PGP SIGNATURE----- # # iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmRH0b0RHGt3b2xmQHJl # ZGhhdC5jb20ACgkQfwmycsiPL9Y0yw/6A/vzA4TGgFUP3WIvH/sQri4/V3gyR+PT # u3hOQUCYZ99nioTpKV91TSuUPuU/Mdspy/0NKM+K92yIXqxa9172A2zLOsGOu21l # qKpse+nBf1zqEgB8YzUHyCBdetPz916C/f9RS26SNUCW85GCHYGHA3u7nKvWLMyV # oKIoTlA8QOglOuEKlRoYh7hCFm7ET51NOSEftm8GsYbsW/I2Vzl8a1SHN1lHufjd # We3+898zUrmFqNMp6Rjdhn+yZmmoGzoZqV4YQi83z7xjiv+Ms4VHVVW7X8d20xRX # 5BLFiLHAuZ/1d26HyVhgBUr7KHyf94odocz8BylWKXGl5SXMCZun1Td1vgVKlGK+ # GRxzB2cWGWqzC2UmqSTc0Z0aIWbXukKwvcX76uBKsQZ+kB2A7jFobxHiaoQEDJ8B # WRNEMH2+CqCAu9rsrNRinnJKhT2nXcr9F9YfwRIlagdAePGWin+EUW8huf14dDBm # Z2Y34aKW4RQibF8xirMHeRBbOLmcq2VpKLKwNfBHUDgZB8iuD7bLn4n9nwWXMG1w # zgNsTybkv46vLPamTpEaUoNTHfuRDTAuE7Z7lkcc7jF41Z0V1DC/DCCWcL/0LvhP # GIxFdkYug3hetdF2U/OZhUoEfxvkqcuBnrr55LFzqheKEllQpPwPpt7UF0aH8bg3 # i/YpjHsf3xU= # =mpYX # -----END PGP SIGNATURE----- # gpg: Signature made Tue 25 Apr 2023 02:12:29 PM BST # gpg: using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6 # gpg: issuer "kwolf@redhat.com" # gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full] * tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (25 commits) block/monitor: Fix crash when executing HMP commit vmdk: make vmdk_is_cid_valid a coroutine_fn qcow2: mark various functions as coroutine_fn and GRAPH_RDLOCK tests: mark more coroutine_fns qemu-pr-helper: mark more coroutine_fns 9pfs: mark more coroutine_fns nbd: mark more coroutine_fns, do not use co_wrappers mirror: make mirror_flush a coroutine_fn, do not use co_wrappers blkdebug: add missing coroutine_fn annotation vvfat: mark various functions as coroutine_fn thread-pool: avoid passing the pool parameter every time thread-pool: use ThreadPool from the running thread io_uring: use LuringState from the running thread linux-aio: use LinuxAioState from the running thread block: add missing coroutine_fn to bdrv_sum_allocated_file_size() include/block: fixup typos monitor: convert monitor_cleanup() to AIO_WAIT_WHILE_UNLOCKED() hmp: convert handle_hmp_command() to AIO_WAIT_WHILE_UNLOCKED() block: convert bdrv_drain_all_begin() to AIO_WAIT_WHILE_UNLOCKED() block: convert bdrv_graph_wrlock() to AIO_WAIT_WHILE_UNLOCKED() ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
@@ -203,7 +203,7 @@ typedef struct V9fsDir {
|
||||
QemuMutex readdir_mutex_L;
|
||||
} V9fsDir;
|
||||
|
||||
static inline void v9fs_readdir_lock(V9fsDir *dir)
|
||||
static inline void coroutine_fn v9fs_readdir_lock(V9fsDir *dir)
|
||||
{
|
||||
if (dir->proto_version == V9FS_PROTO_2000U) {
|
||||
qemu_co_mutex_lock(&dir->readdir_mutex_u);
|
||||
@@ -212,7 +212,7 @@ static inline void v9fs_readdir_lock(V9fsDir *dir)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void v9fs_readdir_unlock(V9fsDir *dir)
|
||||
static inline void coroutine_fn v9fs_readdir_unlock(V9fsDir *dir)
|
||||
{
|
||||
if (dir->proto_version == V9FS_PROTO_2000U) {
|
||||
qemu_co_mutex_unlock(&dir->readdir_mutex_u);
|
||||
|
||||
@@ -68,9 +68,9 @@ int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
|
||||
*
|
||||
* See v9fs_co_readdir_many() (as its only user) below for details.
|
||||
*/
|
||||
static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
|
||||
struct V9fsDirEnt **entries, off_t offset,
|
||||
int32_t maxsize, bool dostat)
|
||||
static int coroutine_fn
|
||||
do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp, struct V9fsDirEnt **entries,
|
||||
off_t offset, int32_t maxsize, bool dostat)
|
||||
{
|
||||
V9fsState *s = pdu->s;
|
||||
V9fsString name;
|
||||
|
||||
@@ -41,6 +41,5 @@ static int coroutine_enter_func(void *arg)
|
||||
void co_run_in_worker_bh(void *opaque)
|
||||
{
|
||||
Coroutine *co = opaque;
|
||||
thread_pool_submit_aio(aio_get_thread_pool(qemu_get_aio_context()),
|
||||
coroutine_enter_func, co, coroutine_enter_cb, co);
|
||||
thread_pool_submit_aio(coroutine_enter_func, co, coroutine_enter_cb, co);
|
||||
}
|
||||
|
||||
@@ -496,7 +496,6 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
|
||||
{
|
||||
SpaprNVDIMMDevice *s_nvdimm = (SpaprNVDIMMDevice *)opaque;
|
||||
SpaprNVDIMMDeviceFlushState *state;
|
||||
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
|
||||
HostMemoryBackend *backend = MEMORY_BACKEND(PC_DIMM(s_nvdimm)->hostmem);
|
||||
bool is_pmem = object_property_get_bool(OBJECT(backend), "pmem", NULL);
|
||||
bool pmem_override = object_property_get_bool(OBJECT(s_nvdimm),
|
||||
@@ -517,7 +516,7 @@ static int spapr_nvdimm_flush_post_load(void *opaque, int version_id)
|
||||
}
|
||||
|
||||
QLIST_FOREACH(state, &s_nvdimm->pending_nvdimm_flush_states, node) {
|
||||
thread_pool_submit_aio(pool, flush_worker_cb, state,
|
||||
thread_pool_submit_aio(flush_worker_cb, state,
|
||||
spapr_nvdimm_flush_completion_cb, state);
|
||||
}
|
||||
|
||||
@@ -664,7 +663,6 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
PCDIMMDevice *dimm;
|
||||
HostMemoryBackend *backend = NULL;
|
||||
SpaprNVDIMMDeviceFlushState *state;
|
||||
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
|
||||
int fd;
|
||||
|
||||
if (!drc || !drc->dev ||
|
||||
@@ -699,7 +697,7 @@ static target_ulong h_scm_flush(PowerPCCPU *cpu, SpaprMachineState *spapr,
|
||||
|
||||
state->drcidx = drc_index;
|
||||
|
||||
thread_pool_submit_aio(pool, flush_worker_cb, state,
|
||||
thread_pool_submit_aio(flush_worker_cb, state,
|
||||
spapr_nvdimm_flush_completion_cb, state);
|
||||
|
||||
continue_token = state->continue_token;
|
||||
|
||||
@@ -70,7 +70,6 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
|
||||
VirtIODeviceRequest *req_data;
|
||||
VirtIOPMEM *pmem = VIRTIO_PMEM(vdev);
|
||||
HostMemoryBackend *backend = MEMORY_BACKEND(pmem->memdev);
|
||||
ThreadPool *pool = aio_get_thread_pool(qemu_get_aio_context());
|
||||
|
||||
trace_virtio_pmem_flush_request();
|
||||
req_data = virtqueue_pop(vq, sizeof(VirtIODeviceRequest));
|
||||
@@ -88,7 +87,7 @@ static void virtio_pmem_flush(VirtIODevice *vdev, VirtQueue *vq)
|
||||
req_data->fd = memory_region_get_fd(&backend->mr);
|
||||
req_data->pmem = pmem;
|
||||
req_data->vdev = vdev;
|
||||
thread_pool_submit_aio(pool, worker_cb, req_data, done_cb, req_data);
|
||||
thread_pool_submit_aio(worker_cb, req_data, done_cb, req_data);
|
||||
}
|
||||
|
||||
static void virtio_pmem_get_config(VirtIODevice *vdev, uint8_t *config)
|
||||
|
||||
Reference in New Issue
Block a user