mirror of
https://github.com/mii443/qemu.git
synced 2025-12-16 17:18:49 +00:00
memory: Move AddressSpaceDispatch from AddressSpace to FlatView
As we are going to share FlatView's between AddressSpace's, and AddressSpaceDispatch is a structure to perform quick lookup in FlatView, this moves ASD to FlatView. After previosly open coded ASD rendering, we can also remove as->next_dispatch as the new FlatView pointer is stored on a stack and set to an AS atomically. flatview_destroy() is executed under RCU instead of address_space_dispatch_free() now. This makes mem_begin/mem_commit to work with ASD and mem_add with FV as later on mem_add will be taking FV as an argument anyway. This should cause no behavioural change. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Message-Id: <20170921085110.25598-5-aik@ozlabs.ru> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
committed by
Paolo Bonzini
parent
cc94cd6d36
commit
66a6df1dc6
41
exec.c
41
exec.c
@@ -187,8 +187,6 @@ typedef struct PhysPageMap {
|
||||
} PhysPageMap;
|
||||
|
||||
struct AddressSpaceDispatch {
|
||||
struct rcu_head rcu;
|
||||
|
||||
MemoryRegionSection *mru_section;
|
||||
/* This is a multi-level map on the physical address space.
|
||||
* The bottom level has pointers to MemoryRegionSections.
|
||||
@@ -485,7 +483,7 @@ static MemoryRegionSection address_space_do_translate(AddressSpace *as,
|
||||
IOMMUMemoryRegionClass *imrc;
|
||||
|
||||
for (;;) {
|
||||
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
|
||||
AddressSpaceDispatch *d = address_space_to_dispatch(as);
|
||||
section = address_space_translate_internal(d, addr, &addr, plen, is_mmio);
|
||||
|
||||
iommu_mr = memory_region_get_iommu(section->mr);
|
||||
@@ -1222,7 +1220,7 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
|
||||
} else {
|
||||
AddressSpaceDispatch *d;
|
||||
|
||||
d = atomic_rcu_read(§ion->address_space->dispatch);
|
||||
d = address_space_to_dispatch(section->address_space);
|
||||
iotlb = section - d->map.sections;
|
||||
iotlb += xlat;
|
||||
}
|
||||
@@ -1347,9 +1345,9 @@ static void register_multipage(AddressSpaceDispatch *d,
|
||||
phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
|
||||
}
|
||||
|
||||
void mem_add(AddressSpace *as, MemoryRegionSection *section)
|
||||
void mem_add(AddressSpace *as, FlatView *fv, MemoryRegionSection *section)
|
||||
{
|
||||
AddressSpaceDispatch *d = as->next_dispatch;
|
||||
AddressSpaceDispatch *d = flatview_to_dispatch(fv);
|
||||
MemoryRegionSection now = *section, remain = *section;
|
||||
Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
|
||||
|
||||
@@ -2672,7 +2670,7 @@ static void io_mem_init(void)
|
||||
NULL, UINT64_MAX);
|
||||
}
|
||||
|
||||
void mem_begin(AddressSpace *as)
|
||||
AddressSpaceDispatch *mem_begin(AddressSpace *as)
|
||||
{
|
||||
AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
|
||||
uint16_t n;
|
||||
@@ -2688,26 +2686,19 @@ void mem_begin(AddressSpace *as)
|
||||
|
||||
d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
|
||||
d->as = as;
|
||||
as->next_dispatch = d;
|
||||
|
||||
return d;
|
||||
}
|
||||
|
||||
static void address_space_dispatch_free(AddressSpaceDispatch *d)
|
||||
void address_space_dispatch_free(AddressSpaceDispatch *d)
|
||||
{
|
||||
phys_sections_free(&d->map);
|
||||
g_free(d);
|
||||
}
|
||||
|
||||
void mem_commit(AddressSpace *as)
|
||||
void mem_commit(AddressSpaceDispatch *d)
|
||||
{
|
||||
AddressSpaceDispatch *cur = as->dispatch;
|
||||
AddressSpaceDispatch *next = as->next_dispatch;
|
||||
|
||||
phys_page_compact_all(next, next->map.nodes_nb);
|
||||
|
||||
atomic_rcu_set(&as->dispatch, next);
|
||||
if (cur) {
|
||||
call_rcu(cur, address_space_dispatch_free, rcu);
|
||||
}
|
||||
phys_page_compact_all(d, d->map.nodes_nb);
|
||||
}
|
||||
|
||||
static void tcg_commit(MemoryListener *listener)
|
||||
@@ -2723,21 +2714,11 @@ static void tcg_commit(MemoryListener *listener)
|
||||
* We reload the dispatch pointer now because cpu_reloading_memory_map()
|
||||
* may have split the RCU critical section.
|
||||
*/
|
||||
d = atomic_rcu_read(&cpuas->as->dispatch);
|
||||
d = address_space_to_dispatch(cpuas->as);
|
||||
atomic_rcu_set(&cpuas->memory_dispatch, d);
|
||||
tlb_flush(cpuas->cpu);
|
||||
}
|
||||
|
||||
void address_space_destroy_dispatch(AddressSpace *as)
|
||||
{
|
||||
AddressSpaceDispatch *d = as->dispatch;
|
||||
|
||||
atomic_rcu_set(&as->dispatch, NULL);
|
||||
if (d) {
|
||||
call_rcu(d, address_space_dispatch_free, rcu);
|
||||
}
|
||||
}
|
||||
|
||||
static void memory_map_init(void)
|
||||
{
|
||||
system_memory = g_malloc(sizeof(*system_memory));
|
||||
|
||||
Reference in New Issue
Block a user