mirror of
https://github.com/mii443/qemu.git
synced 2025-08-30 19:09:35 +00:00
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20180615' into staging
TCG patch queue: Workaround macos assembler lossage. Eliminate tb_lock. Fix TB code generation overflow. # gpg: Signature made Fri 15 Jun 2018 20:40:56 BST # gpg: using RSA key 64DF38E8AF7E215F # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20180615: tcg: Reduce max TB opcode count tcg: remove tb_lock translate-all: remove tb_lock mention from cpu_restore_state_from_tb cputlb: remove tb_lock from tlb_flush functions translate-all: protect TB jumps with a per-destination-TB lock translate-all: discard TB when tb_link_page returns an existing matching TB translate-all: introduce assert_no_pages_locked translate-all: add page_locked assertions translate-all: use per-page locking in !user-mode translate-all: move tb_invalidate_phys_page_range up in the file translate-all: work page-by-page in tb_invalidate_phys_range_1 translate-all: remove hole in PageDesc translate-all: make l1_map lockless translate-all: iterate over TBs in a page with PAGE_FOR_EACH_TB tcg: move tb_ctx.tb_phys_invalidate_count to tcg_ctx tcg: track TBs with per-region BST's qht: return existing entry when qht_insert fails qht: require a default comparison function tcg/i386: Use byte form of xgetbv instruction Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
@ -23,7 +23,7 @@ typedef struct CPUListState {
|
||||
FILE *file;
|
||||
} CPUListState;
|
||||
|
||||
/* The CPU list lock nests outside tb_lock/tb_unlock. */
|
||||
/* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */
|
||||
void qemu_init_cpu_list(void);
|
||||
void cpu_list_lock(void);
|
||||
void cpu_list_unlock(void);
|
||||
|
@ -345,7 +345,7 @@ struct TranslationBlock {
|
||||
#define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */
|
||||
#define CF_NOCACHE 0x00010000 /* To be freed after execution */
|
||||
#define CF_USE_ICOUNT 0x00020000
|
||||
#define CF_INVALID 0x00040000 /* TB is stale. Setters need tb_lock */
|
||||
#define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */
|
||||
#define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */
|
||||
/* cflags' mask for hashing/comparison */
|
||||
#define CF_HASH_MASK \
|
||||
@ -359,10 +359,14 @@ struct TranslationBlock {
|
||||
/* original tb when cflags has CF_NOCACHE */
|
||||
struct TranslationBlock *orig_tb;
|
||||
/* first and second physical page containing code. The lower bit
|
||||
of the pointer tells the index in page_next[] */
|
||||
struct TranslationBlock *page_next[2];
|
||||
of the pointer tells the index in page_next[].
|
||||
The list is protected by the TB's page('s) lock(s) */
|
||||
uintptr_t page_next[2];
|
||||
tb_page_addr_t page_addr[2];
|
||||
|
||||
/* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */
|
||||
QemuSpin jmp_lock;
|
||||
|
||||
/* The following data are used to directly call another TB from
|
||||
* the code of this one. This can be done either by emitting direct or
|
||||
* indirect native jump instructions. These jumps are reset so that the TB
|
||||
@ -374,20 +378,26 @@ struct TranslationBlock {
|
||||
#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
|
||||
uintptr_t jmp_target_arg[2]; /* target address or offset */
|
||||
|
||||
/* Each TB has an associated circular list of TBs jumping to this one.
|
||||
* jmp_list_first points to the first TB jumping to this one.
|
||||
* jmp_list_next is used to point to the next TB in a list.
|
||||
* Since each TB can have two jumps, it can participate in two lists.
|
||||
* jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
|
||||
* TranslationBlock structure, but the two least significant bits of
|
||||
* them are used to encode which data field of the pointed TB should
|
||||
* be used to traverse the list further from that TB:
|
||||
* 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
|
||||
* In other words, 0/1 tells which jump is used in the pointed TB,
|
||||
* and 2 means that this is a pointer back to the target TB of this list.
|
||||
/*
|
||||
* Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps.
|
||||
* Each TB can have two outgoing jumps, and therefore can participate
|
||||
* in two lists. The list entries are kept in jmp_list_next[2]. The least
|
||||
* significant bit (LSB) of the pointers in these lists is used to encode
|
||||
* which of the two list entries is to be used in the pointed TB.
|
||||
*
|
||||
* List traversals are protected by jmp_lock. The destination TB of each
|
||||
* outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock
|
||||
* can be acquired from any origin TB.
|
||||
*
|
||||
* jmp_dest[] are tagged pointers as well. The LSB is set when the TB is
|
||||
* being invalidated, so that no further outgoing jumps from it can be set.
|
||||
*
|
||||
* jmp_lock also protects the CF_INVALID cflag; a jump must not be chained
|
||||
* to a destination TB that has CF_INVALID set.
|
||||
*/
|
||||
uintptr_t jmp_list_head;
|
||||
uintptr_t jmp_list_next[2];
|
||||
uintptr_t jmp_list_first;
|
||||
uintptr_t jmp_dest[2];
|
||||
};
|
||||
|
||||
extern bool parallel_cpus;
|
||||
@ -405,7 +415,6 @@ static inline uint32_t curr_cflags(void)
|
||||
| (use_icount ? CF_USE_ICOUNT : 0);
|
||||
}
|
||||
|
||||
void tb_remove(TranslationBlock *tb);
|
||||
void tb_flush(CPUState *cpu);
|
||||
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
|
||||
TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
|
||||
@ -431,9 +440,13 @@ extern uintptr_t tci_tb_ptr;
|
||||
smaller than 4 bytes, so we don't worry about special-casing this. */
|
||||
#define GETPC_ADJ 2
|
||||
|
||||
void tb_lock(void);
|
||||
void tb_unlock(void);
|
||||
void tb_lock_reset(void);
|
||||
#if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_USER_ONLY)
|
||||
|
||||
|
@ -49,6 +49,8 @@ void mtree_print_dispatch(fprintf_function mon, void *f,
|
||||
struct AddressSpaceDispatch *d,
|
||||
MemoryRegion *root);
|
||||
|
||||
struct page_collection;
|
||||
|
||||
/* Opaque struct for passing info from memory_notdirty_write_prepare()
|
||||
* to memory_notdirty_write_complete(). Callers should treat all fields
|
||||
* as private, with the exception of @active.
|
||||
@ -60,10 +62,10 @@ void mtree_print_dispatch(fprintf_function mon, void *f,
|
||||
*/
|
||||
typedef struct {
|
||||
CPUState *cpu;
|
||||
struct page_collection *pages;
|
||||
ram_addr_t ram_addr;
|
||||
vaddr mem_vaddr;
|
||||
unsigned size;
|
||||
bool locked;
|
||||
bool active;
|
||||
} NotDirtyInfo;
|
||||
|
||||
@ -91,7 +93,7 @@ typedef struct {
|
||||
*
|
||||
* This must only be called if we are using TCG; it will assert otherwise.
|
||||
*
|
||||
* We may take a lock in the prepare call, so callers must ensure that
|
||||
* We may take locks in the prepare call, so callers must ensure that
|
||||
* they don't exit (via longjump or otherwise) without calling complete.
|
||||
*
|
||||
* This call must only be made inside an RCU critical section.
|
||||
|
@ -31,14 +31,10 @@ typedef struct TBContext TBContext;
|
||||
|
||||
struct TBContext {
|
||||
|
||||
GTree *tb_tree;
|
||||
struct qht htable;
|
||||
/* any access to the tbs or the page table must use this lock */
|
||||
QemuMutex tb_lock;
|
||||
|
||||
/* statistics */
|
||||
unsigned tb_flush_count;
|
||||
int tb_phys_invalidate_count;
|
||||
};
|
||||
|
||||
extern TBContext tb_ctx;
|
||||
|
Reference in New Issue
Block a user