Sometimes the store may be partially-successful before trapping, for instance if it is partways in valid memory and partways into the guard page.

Use a load instruction before the store to ensure that all the memory is addressable. The loaded value is discarded.

NB. We don't apply this to atomics. It's not clear whether atomic stores can be half-committed.

Fixes align.wast and memory_trap.wast on aarch64.
This commit is contained in:
Nick Lewycky
2020-10-16 16:29:45 -07:00
parent a663a152d2
commit d562c2dd9c

View File

@ -6286,6 +6286,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
4,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let store = self.builder.build_store(effective_address, value);
self.annotate_user_memaccess(memory_index, memarg, 1, store)?;
}
@ -6300,6 +6307,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
8,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let store = self.builder.build_store(effective_address, value);
self.annotate_user_memaccess(memory_index, memarg, 1, store)?;
}
@ -6315,6 +6329,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
4,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let store = self.builder.build_store(effective_address, v);
self.annotate_user_memaccess(memory_index, memarg, 1, store)?;
}
@ -6330,6 +6351,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
8,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let store = self.builder.build_store(effective_address, v);
self.annotate_user_memaccess(memory_index, memarg, 1, store)?;
}
@ -6345,6 +6373,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
16,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let store = self.builder.build_store(effective_address, v);
self.annotate_user_memaccess(memory_index, memarg, 1, store)?;
}
@ -6603,6 +6638,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
1,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let narrow_value =
self.builder
.build_int_truncate(value, self.intrinsics.i8_ty, "");
@ -6620,6 +6662,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
2,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let narrow_value =
self.builder
.build_int_truncate(value, self.intrinsics.i16_ty, "");
@ -6637,6 +6686,13 @@ impl<'ctx, 'a> LLVMFunctionCodeGenerator<'ctx, 'a> {
offset,
4,
)?;
let dead_load = self.builder.build_load(effective_address, "");
self.annotate_user_memaccess(
memory_index,
memarg,
1,
dead_load.as_instruction_value().unwrap(),
)?;
let narrow_value =
self.builder
.build_int_truncate(value, self.intrinsics.i32_ty, "");