Remove PAGE_RESERVED

The usermode PAGE_RESERVED code is not required by the current mmap
implementation, and is already broken when guest_base != 0.
Unfortunately the bsd emulation still uses the old mmap implementation,
so we can't rip it out altogether.

Signed-off-by: Paul Brook <paul@codesourcery.com>
This commit is contained in:
Paul Brook
2010-05-05 16:32:59 +01:00
parent 048d179f20
commit 2e9a5713f0
4 changed files with 5 additions and 54 deletions

31
exec.c
View File

@@ -288,7 +288,7 @@ static void page_init(void)
qemu_host_page_bits++;
qemu_host_page_mask = ~(qemu_host_page_size - 1);
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
{
#ifdef HAVE_KINFO_GETVMMAP
struct kinfo_vmentry *freep;
@@ -324,11 +324,7 @@ static void page_init(void)
last_brk = (unsigned long)sbrk(0);
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
f = fopen("/compat/linux/proc/self/maps", "r");
#else
f = fopen("/proc/self/maps", "r");
#endif
if (f) {
mmap_lock();
@@ -365,24 +361,11 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
int i;
#if defined(CONFIG_USER_ONLY)
/* We can't use qemu_malloc because it may recurse into a locked mutex.
Neither can we record the new pages we reserve while allocating a
given page because that may recurse into an unallocated page table
entry. Stuff the allocations we do make into a queue and process
them after having completed one entire page table allocation. */
unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
int reserve_idx = 0;
/* We can't use qemu_malloc because it may recurse into a locked mutex. */
# define ALLOC(P, SIZE) \
do { \
P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
if (h2g_valid(P)) { \
reserve[reserve_idx] = h2g(P); \
reserve[reserve_idx + 1] = SIZE; \
reserve_idx += 2; \
} \
} while (0)
#else
# define ALLOC(P, SIZE) \
@@ -417,16 +400,6 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
}
#undef ALLOC
#if defined(CONFIG_USER_ONLY)
for (i = 0; i < reserve_idx; i += 2) {
unsigned long addr = reserve[i];
unsigned long len = reserve[i + 1];
page_set_flags(addr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(addr + len),
PAGE_RESERVED);
}
#endif
return pd + (index & (L2_SIZE - 1));
}