Skip to content

Commit

Permalink
Feature/address translation cleanup (#196)
Browse files Browse the repository at this point in the history
* Concatenates all read only sections including the text section into one.

* Enforces exactly one heap memory region per VM.

* Renames frames => stack.

* Enforces memory regions virtual addresses to be aligned.

* Makes address translation constant time complexity.

* Adds an explicit empty NULL region to avoid index shift.

* Implements JIT address translation in x86 directly (no more Rust calls).

* Fixes benchmarks

* Makes stack frame gaps in vm address space optional / configurable.
  • Loading branch information
Lichtso authored Jul 23, 2021
1 parent d87fac7 commit 7091677
Show file tree
Hide file tree
Showing 18 changed files with 327 additions and 312 deletions.
3 changes: 2 additions & 1 deletion benches/jit_compile.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ fn bench_init_vm(bencher: &mut Bencher) {
)
.unwrap();
bencher.iter(|| {
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &[]).unwrap()
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &mut [])
.unwrap()
});
}

Expand Down
90 changes: 50 additions & 40 deletions benches/memory_mapping.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,9 @@ fn generate_memory_regions(
is_writable: bool,
mut prng: Option<&mut SmallRng>,
) -> (Vec<MemoryRegion>, u64) {
let mut memory_regions = Vec::with_capacity(entries);
let mut offset = 0;
let mut memory_regions = Vec::with_capacity(entries + 1);
memory_regions.push(MemoryRegion::default());
let mut offset = 0x100000000;
for _ in 0..entries {
let length = match &mut prng {
Some(prng) => (*prng).gen::<u8>() as u64 + 4,
Expand All @@ -37,7 +38,7 @@ fn generate_memory_regions(
0,
is_writable,
));
offset += length;
offset += 0x100000000;
}
(memory_regions, offset)
}
Expand All @@ -59,20 +60,18 @@ fn bench_gapped_randomized_access_with_1024_entries(bencher: &mut Bencher) {
let frame_size: u64 = 2;
let frame_count: u64 = 1024;
let content = vec![0; (frame_size * frame_count * 2) as usize];
let memory_regions = vec![MemoryRegion::new_from_slice(
&content[..],
0,
frame_size,
false,
)];
let memory_regions = vec![
MemoryRegion::default(),
MemoryRegion::new_from_slice(&content[..], 0x100000000, frame_size, false),
];
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
let mut prng = new_prng!();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(
AccessType::Load,
prng.gen::<u64>() % frame_count * (frame_size * 2),
0x100000000 + (prng.gen::<u64>() % frame_count * (frame_size * 2)),
1
)
.is_ok());
Expand All @@ -82,18 +81,19 @@ fn bench_gapped_randomized_access_with_1024_entries(bencher: &mut Bencher) {
#[bench]
fn bench_randomized_access_with_0001_entry(bencher: &mut Bencher) {
let content = vec![0; 1024 * 2];
let memory_regions = vec![MemoryRegion::new_from_slice(&content[..], 0, 0, false)];
let memory_regions = vec![
MemoryRegion::default(),
MemoryRegion::new_from_slice(&content[..], 0x100000000, 0, false),
];
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
let mut prng = new_prng!();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(
AccessType::Load,
prng.gen::<u64>() % content.len() as u64,
1
)
.is_ok());
let _ = memory_mapping.map::<UserError>(
AccessType::Load,
0x100000000 + (prng.gen::<u64>() % content.len() as u64),
1,
);
});
}

Expand All @@ -104,9 +104,11 @@ fn bench_randomized_mapping_access_with_0004_entries(bencher: &mut Bencher) {
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, prng.gen::<u64>() % end_address, 1)
.is_ok());
let _ = memory_mapping.map::<UserError>(
AccessType::Load,
0x100000000 + (prng.gen::<u64>() % end_address),
1,
);
});
}

Expand All @@ -117,9 +119,11 @@ fn bench_randomized_mapping_access_with_0016_entries(bencher: &mut Bencher) {
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, prng.gen::<u64>() % end_address, 1)
.is_ok());
let _ = memory_mapping.map::<UserError>(
AccessType::Load,
0x100000000 + (prng.gen::<u64>() % end_address),
1,
);
});
}

Expand All @@ -130,9 +134,11 @@ fn bench_randomized_mapping_access_with_0064_entries(bencher: &mut Bencher) {
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, prng.gen::<u64>() % end_address, 1)
.is_ok());
let _ = memory_mapping.map::<UserError>(
AccessType::Load,
0x100000000 + (prng.gen::<u64>() % end_address),
1,
);
});
}

Expand All @@ -143,9 +149,11 @@ fn bench_randomized_mapping_access_with_0256_entries(bencher: &mut Bencher) {
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, prng.gen::<u64>() % end_address, 1)
.is_ok());
let _ = memory_mapping.map::<UserError>(
AccessType::Load,
0x100000000 + (prng.gen::<u64>() % end_address),
1,
);
});
}

Expand All @@ -156,9 +164,11 @@ fn bench_randomized_mapping_access_with_1024_entries(bencher: &mut Bencher) {
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, prng.gen::<u64>() % end_address, 1)
.is_ok());
let _ = memory_mapping.map::<UserError>(
AccessType::Load,
0x100000000 + (prng.gen::<u64>() % end_address),
1,
);
});
}

Expand All @@ -169,9 +179,11 @@ fn bench_randomized_access_with_1024_entries(bencher: &mut Bencher) {
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, prng.gen::<u64>() % end_address, 1)
.is_ok());
let _ = memory_mapping.map::<UserError>(
AccessType::Load,
0x100000000 + (prng.gen::<u64>() % end_address),
1,
);
});
}

Expand All @@ -182,9 +194,7 @@ fn bench_randomized_mapping_with_1024_entries(bencher: &mut Bencher) {
let config = Config::default();
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, 0, 1)
.is_ok());
let _ = memory_mapping.map::<UserError>(AccessType::Load, 0x100000000, 1);
});
}

Expand All @@ -195,7 +205,7 @@ fn bench_mapping_with_1024_entries(bencher: &mut Bencher) {
let memory_mapping = MemoryMapping::new::<UserError>(memory_regions, &config).unwrap();
bencher.iter(|| {
assert!(memory_mapping
.map::<UserError>(AccessType::Load, 0, 1)
.map::<UserError>(AccessType::Load, 0x100000000, 1)
.is_ok());
});
}
8 changes: 5 additions & 3 deletions benches/vm_execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ fn bench_init_interpreter_execution(bencher: &mut Bencher) {
)
.unwrap();
let mut vm =
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &[]).unwrap();
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &mut [])
.unwrap();
bencher.iter(|| {
vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: 29 })
.unwrap()
Expand All @@ -51,7 +52,8 @@ fn bench_init_jit_execution(bencher: &mut Bencher) {
.unwrap();
executable.jit_compile().unwrap();
let mut vm =
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &[]).unwrap();
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &mut [])
.unwrap();
bencher.iter(|| {
vm.execute_program_jit(&mut TestInstructionMeter { remaining: 29 })
.unwrap()
Expand All @@ -73,7 +75,7 @@ fn bench_jit_vs_interpreter(
)
.unwrap();
executable.jit_compile().unwrap();
let mut vm = EbpfVm::new(executable.as_ref(), mem, &[]).unwrap();
let mut vm = EbpfVm::new(executable.as_ref(), &mut [], mem).unwrap();
let interpreter_summary = bencher
.bench(|bencher| {
bencher.iter(|| {
Expand Down
6 changes: 4 additions & 2 deletions examples/uptime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ fn main() {
)
.unwrap();
let mut vm =
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &[]).unwrap();
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &mut [])
.unwrap();
// Execute prog1.
assert_eq!(
vm.execute_program_interpreted(&mut TestInstructionMeter { remaining: 5 })
Expand Down Expand Up @@ -88,7 +89,8 @@ fn main() {
executable.jit_compile().unwrap();
}
let mut vm =
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &[]).unwrap();
EbpfVm::<UserError, TestInstructionMeter>::new(executable.as_ref(), &mut [], &mut [])
.unwrap();
vm.bind_syscall_context_object(Box::new(syscalls::BpfTimeGetNs {}), None)
.unwrap();

Expand Down
2 changes: 1 addition & 1 deletion src/assembler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ fn insn(opc: u8, dst: i64, src: i64, off: i64, imm: i64) -> Result<Insn, String>
/// Config::default(),
/// SyscallRegistry::default(),
/// ).unwrap();
/// let program = executable.get_text_bytes().unwrap().1;
/// let program = executable.get_text_bytes().1;
/// println!("{:?}", program);
/// # assert_eq!(program,
/// # &[0x07, 0x01, 0x00, 0x00, 0x05, 0x06, 0x00, 0x00,
Expand Down
80 changes: 51 additions & 29 deletions src/call_frames.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use crate::{
ebpf::{ELF_INSN_DUMP_OFFSET, HOST_ALIGN, MM_STACK_START, SCRATCH_REGS},
error::{EbpfError, UserDefinedError},
memory_region::MemoryRegion,
vm::Config,
};

/// One call frame
Expand All @@ -19,23 +20,24 @@ struct CallFrame {
/// function to be called in its own frame. CallFrames manages
/// call frames
#[derive(Clone, Debug)]
pub struct CallFrames {
pub struct CallFrames<'a> {
config: &'a Config,
stack: AlignedMemory,
region: MemoryRegion,
frame_index: usize,
frame_index_max: usize,
frames: Vec<CallFrame>,
}
impl CallFrames {
impl<'a> CallFrames<'a> {
/// New call frame, depth indicates maximum call depth
pub fn new(depth: usize, frame_size: usize) -> Self {
let mut stack = AlignedMemory::new(depth * frame_size, HOST_ALIGN);
stack.resize(depth * frame_size, 0).unwrap();
let region =
MemoryRegion::new_from_slice(stack.as_slice(), MM_STACK_START, frame_size as u64, true);
pub fn new(config: &'a Config) -> Self {
let mut stack =
AlignedMemory::new(config.max_call_depth * config.stack_frame_size, HOST_ALIGN);
stack
.resize(config.max_call_depth * config.stack_frame_size, 0)
.unwrap();
let mut frames = CallFrames {
config,
stack,
region,
frame_index: 0,
frame_index_max: 0,
frames: vec![
Expand All @@ -44,19 +46,30 @@ impl CallFrames {
saved_reg: [0u64; SCRATCH_REGS],
return_ptr: 0
};
depth
config.max_call_depth
],
};
for i in 0..depth {
// Seperate each stack frame's virtual address so that stack over/under-run is caught explicitly
frames.frames[i].vm_addr = MM_STACK_START + (i * 2 * frame_size) as u64;
// Seperate each stack frame's virtual address so that stack over/under-run is caught explicitly
let gap_factor = if config.enable_stack_frame_gaps { 2 } else { 1 };
for i in 0..config.max_call_depth {
frames.frames[i].vm_addr =
MM_STACK_START + (i * gap_factor * config.stack_frame_size) as u64;
}
frames
}

/// Get stack memory region
pub fn get_region(&self) -> &MemoryRegion {
&self.region
pub fn get_memory_region(&self) -> MemoryRegion {
MemoryRegion::new_from_slice(
self.stack.as_slice(),
MM_STACK_START,
if self.config.enable_stack_frame_gaps {
self.config.stack_frame_size as u64
} else {
0
},
true,
)
}

/// Get the vm address of the beginning of each stack frame
Expand All @@ -66,7 +79,7 @@ impl CallFrames {

/// Get the address of a frame's top of stack
pub fn get_stack_top(&self) -> u64 {
self.frames[self.frame_index].vm_addr + (1 << self.region.vm_gap_shift)
self.frames[self.frame_index].vm_addr + self.config.stack_frame_size as u64
}

/// Get current call frame index, 0 is the root frame
Expand Down Expand Up @@ -121,32 +134,41 @@ mod tests {

#[test]
fn test_frames() {
const DEPTH: usize = 10;
const FRAME_SIZE: u64 = 8;
let mut frames = CallFrames::new(DEPTH, FRAME_SIZE as usize);
let config = Config {
max_call_depth: 10,
stack_frame_size: 8,
enable_stack_frame_gaps: true,
..Config::default()
};
let mut frames = CallFrames::new(&config);
let mut ptrs: Vec<u64> = Vec::new();
for i in 0..DEPTH - 1 {
let registers = vec![i as u64; FRAME_SIZE as usize];
for i in 0..config.max_call_depth - 1 {
let registers = vec![i as u64; config.stack_frame_size];
assert_eq!(frames.get_frame_index(), i);
ptrs.push(frames.get_frame_pointers()[i]);

let top = frames.push::<UserError>(&registers[0..4], i).unwrap();
let new_ptrs = frames.get_frame_pointers();
assert_eq!(top, new_ptrs[i + 1] + FRAME_SIZE);
assert_ne!(top, ptrs[i] + FRAME_SIZE - 1);
assert!(!(ptrs[i] <= new_ptrs[i + 1] && new_ptrs[i + 1] < ptrs[i] + FRAME_SIZE));
assert_eq!(top, new_ptrs[i + 1] + config.stack_frame_size as u64);
assert_ne!(top, ptrs[i] + config.stack_frame_size as u64 - 1);
assert!(
!(ptrs[i] <= new_ptrs[i + 1]
&& new_ptrs[i + 1] < ptrs[i] + config.stack_frame_size as u64)
);
}
let i = DEPTH - 1;
let registers = vec![i as u64; FRAME_SIZE as usize];
let i = config.max_call_depth - 1;
let registers = vec![i as u64; config.stack_frame_size];
assert_eq!(frames.get_frame_index(), i);
ptrs.push(frames.get_frame_pointers()[i]);

assert!(frames.push::<UserError>(&registers, DEPTH - 1).is_err());
assert!(frames
.push::<UserError>(&registers, config.max_call_depth - 1)
.is_err());

for i in (0..DEPTH - 1).rev() {
for i in (0..config.max_call_depth - 1).rev() {
let (saved_reg, stack_ptr, return_ptr) = frames.pop::<UserError>().unwrap();
assert_eq!(saved_reg, [i as u64, i as u64, i as u64, i as u64]);
assert_eq!(ptrs[i] + FRAME_SIZE, stack_ptr);
assert_eq!(ptrs[i] + config.stack_frame_size as u64, stack_ptr);
assert_eq!(i, return_ptr);
}

Expand Down
Loading

0 comments on commit 7091677

Please sign in to comment.