Skip to content

Commit

Permalink
Refactor fuzzer to use a custom StorageBackend
Browse files Browse the repository at this point in the history
This removes a bunch of cfg(fuzzing) statements from the main codebase
  • Loading branch information
cberner committed Oct 24, 2023
1 parent a7d00e8 commit d51179e
Show file tree
Hide file tree
Showing 5 changed files with 214 additions and 131 deletions.
104 changes: 92 additions & 12 deletions fuzz/fuzz_targets/fuzz_redb.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
#![no_main]

use libfuzzer_sys::fuzz_target;
use redb::{AccessGuard, Database, Durability, Error, MultimapTable, MultimapTableDefinition, MultimapValue, ReadableMultimapTable, ReadableTable, Savepoint, Table, TableDefinition, WriteTransaction};
use redb::{AccessGuard, Database, Durability, Error, MultimapTable, MultimapTableDefinition, MultimapValue, ReadableMultimapTable, ReadableTable, Savepoint, StorageBackend, Table, TableDefinition, WriteTransaction};
use std::collections::{BTreeMap, BTreeSet, HashSet};
use std::io::{Read, Seek, SeekFrom};
use std::fmt::Debug;
use std::io::{ErrorKind, Read, Seek, SeekFrom};
use std::sync::atomic::{AtomicU64, Ordering};
use tempfile::NamedTempFile;

mod common;
use common::*;
use redb::backends::FileBackend;
use crate::FuzzerSavepoint::{Ephemeral, NotYetDurablePersistent, Persistent};

// These slow down the fuzzer, so don't create too many
Expand All @@ -16,6 +19,62 @@ const TABLE_DEF: TableDefinition<u64, &[u8]> = TableDefinition::new("fuzz_table"
const MULTIMAP_TABLE_DEF: MultimapTableDefinition<u64, &[u8]> =
MultimapTableDefinition::new("fuzz_multimap_table");

#[derive(Debug)]
struct FuzzerBackend {
inner: FileBackend,
countdown: AtomicU64,
}

impl FuzzerBackend {
fn new(backend: FileBackend, countdown: u64) -> Self {
Self {
inner: backend,
countdown: AtomicU64::new(countdown),
}
}

fn check_countdown(&self) -> Result<(), std::io::Error> {
if self.countdown.load(Ordering::SeqCst) == 0 {
return Err(std::io::Error::from(ErrorKind::Other));
}

Ok(())
}

fn decrement_countdown(&self) -> Result<(), std::io::Error> {
if self.countdown.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| if x > 0 { Some(x - 1) } else { None } ).is_err() {
return Err(std::io::Error::from(ErrorKind::Other));
}

Ok(())
}
}

impl StorageBackend for FuzzerBackend {
fn len(&self) -> Result<u64, std::io::Error> {
self.inner.len()
}

fn read(&self, offset: u64, len: usize) -> Result<Vec<u8>, std::io::Error> {
self.check_countdown()?;
self.inner.read(offset, len)
}

fn set_len(&self, len: u64) -> Result<(), std::io::Error> {
self.inner.set_len(len)
}

fn sync_data(&self, _eventual: bool) -> Result<(), std::io::Error> {
// No-op. The fuzzer doesn't test crashes, so fsync is unnecessary
Ok(())
}

fn write(&self, offset: u64, data: &[u8]) -> Result<(), std::io::Error> {
self.decrement_countdown()?;
self.inner.write(offset, data)
}
}

enum FuzzerSavepoint<T: Clone> {
Ephemeral(Savepoint, BTreeMap<u64, T>),
Persistent(u64, BTreeMap<u64, T>),
Expand Down Expand Up @@ -355,7 +414,7 @@ fn handle_table_op(op: &FuzzOperation, reference: &mut BTreeMap<u64, usize>, tab
}
drop(reference_iter);
reference.retain(|x, _| (*x < start || *x >= end) || *x % modulus != 0);
// This is basically assert!(iter.next().is_none()), but we also allow an Err such as SimulatedIOFailure
// This is basically assert!(iter.next().is_none()), but we also allow an Err such as a simulated IO error
if let Some(Ok((_, _))) = iter.next() {
panic!();
}
Expand Down Expand Up @@ -390,16 +449,35 @@ fn handle_table_op(op: &FuzzOperation, reference: &mut BTreeMap<u64, usize>, tab
Ok(())
}

fn is_simulated_io_error(err: &redb::Error) -> bool {
match err {
Error::Io(io_err) => {
matches!(io_err.kind(), ErrorKind::Other)
},
_ => false
}
}

fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database, &mut BTreeMap<u64, T>, &FuzzTransaction, &mut SavepointManager<T>) -> Result<(), redb::Error>) -> Result<(), redb::Error> {
let mut redb_file: NamedTempFile = NamedTempFile::new().unwrap();
let backend = FuzzerBackend::new(FileBackend::new(redb_file.as_file().try_clone().unwrap())?, config.crash_after_ops.value);

let mut db = Database::builder()
let result = Database::builder()
.set_page_size(config.page_size.value)
.set_cache_size(config.cache_size.value)
.set_region_size(config.region_size.value as u64)
.create(redb_file.path())
.unwrap();
db.set_crash_countdown(config.crash_after_ops.value);
.create_with_backend(backend);
let mut db = match result {
Ok(db) => db,
Err(err) => {
let err: redb::Error = err.into();
if is_simulated_io_error(&err) {
return Ok(());
} else {
return Err(err);
}
}
};

let mut savepoint_manager = SavepointManager::new();
let mut reference = BTreeMap::new();
Expand All @@ -414,7 +492,7 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database,
}
}
Err(err) => {
if matches!(err, Error::SimulatedIOFailure) {
if is_simulated_io_error(&err) {
drop(db);
savepoint_manager.crash();
non_durable_reference = reference.clone();
Expand All @@ -426,11 +504,12 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database,
assert_ne!(god_byte[0] & 2, 0);

// Repair the database
let backend = FuzzerBackend::new(FileBackend::new(redb_file.as_file().try_clone().unwrap()).unwrap(), u64::MAX);
db = Database::builder()
.set_page_size(config.page_size.value)
.set_cache_size(config.cache_size.value)
.set_region_size(config.region_size.value as u64)
.create(redb_file.path())
.create_with_backend(backend)
.unwrap();
} else {
return Err(err);
Expand All @@ -440,7 +519,7 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database,

let result = apply(&db, &mut non_durable_reference, transaction, &mut savepoint_manager);
if result.is_err() {
if matches!(result, Err(Error::SimulatedIOFailure)) {
if is_simulated_io_error(result.as_ref().err().unwrap()) {
drop(db);
savepoint_manager.crash();
non_durable_reference = reference.clone();
Expand All @@ -452,11 +531,12 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database,
assert_ne!(god_byte[0] & 2, 0);

// Repair the database
let backend = FuzzerBackend::new(FileBackend::new(redb_file.as_file().try_clone().unwrap()).unwrap(), u64::MAX);
db = Database::builder()
.set_page_size(config.page_size.value)
.set_cache_size(config.cache_size.value)
.set_region_size(config.region_size.value as u64)
.create(redb_file.path())
.create_with_backend(backend)
.unwrap();
} else {
return result;
Expand All @@ -469,7 +549,7 @@ fn exec_table_crash_support<T: Clone>(config: &FuzzConfig, apply: fn(&Database,
match run_compaction(&mut db, &mut savepoint_manager) {
Ok(_) => {}
Err(err) => {
if !matches!(err, Error::SimulatedIOFailure) {
if !is_simulated_io_error(&err) {
return Err(err);
}
}
Expand Down
176 changes: 116 additions & 60 deletions src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,20 @@ pub trait StorageBackend: 'static + Debug + Send + Sync {
fn len(&self) -> std::result::Result<u64, io::Error>;

/// Reads the specified array of bytes from the storage.
///
/// If `len` + `offset` exceeds the length of the storage an appropriate `Error` should be returned or a panic may occur.
fn read(&self, offset: u64, len: usize) -> std::result::Result<Vec<u8>, io::Error>;

/// Sets the length of the storage.
///
/// When extending the storage the new positions should be zero initialized.
fn set_len(&self, len: u64) -> std::result::Result<(), io::Error>;

/// Syncs all buffered data with the persistent storage.
///
/// If `eventual` is true, data may become persistent at some point after this call returns,
/// but the storage must gaurantee that a write barrier is inserted: i.e. all writes before this
/// call to `sync_data()` will become persistent before any writes that occur after.
fn sync_data(&self, eventual: bool) -> std::result::Result<(), io::Error>;

/// Writes the specified array to the storage.
Expand Down Expand Up @@ -304,11 +312,6 @@ impl Database {
&self.mem
}

#[cfg(any(fuzzing, test))]
pub fn set_crash_countdown(&self, value: u64) {
self.mem.set_crash_countdown(value);
}

pub(crate) fn verify_primary_checksums(mem: &TransactionalMemory) -> Result<bool> {
let fake_freed_pages = Arc::new(Mutex::new(vec![]));
let table_tree = TableTree::new(mem.get_data_root(), mem, fake_freed_pages.clone());
Expand Down Expand Up @@ -876,10 +879,117 @@ impl std::fmt::Debug for Database {

#[cfg(test)]
mod test {
use crate::backends::FileBackend;
use crate::{
Database, DatabaseError, Durability, ReadableTable, StorageError, TableDefinition,
Database, DatabaseError, Durability, ReadableTable, StorageBackend, StorageError,
TableDefinition,
};
use std::io::ErrorKind;
use std::sync::atomic::{AtomicU64, Ordering};

#[derive(Debug)]
struct FailingBackend {
inner: FileBackend,
countdown: AtomicU64,
}

impl FailingBackend {
fn new(backend: FileBackend, countdown: u64) -> Self {
Self {
inner: backend,
countdown: AtomicU64::new(countdown),
}
}

fn check_countdown(&self) -> Result<(), std::io::Error> {
if self.countdown.load(Ordering::SeqCst) == 0 {
return Err(std::io::Error::from(ErrorKind::Other));
}

Ok(())
}

fn decrement_countdown(&self) -> Result<(), std::io::Error> {
if self
.countdown
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
if x > 0 {
Some(x - 1)
} else {
None
}
})
.is_err()
{
return Err(std::io::Error::from(ErrorKind::Other));
}

Ok(())
}
}

impl StorageBackend for FailingBackend {
fn len(&self) -> Result<u64, std::io::Error> {
self.inner.len()
}

fn read(&self, offset: u64, len: usize) -> Result<Vec<u8>, std::io::Error> {
self.check_countdown()?;
self.inner.read(offset, len)
}

fn set_len(&self, len: u64) -> Result<(), std::io::Error> {
self.inner.set_len(len)
}

fn sync_data(&self, eventual: bool) -> Result<(), std::io::Error> {
self.check_countdown()?;
self.inner.sync_data(eventual)
}

fn write(&self, offset: u64, data: &[u8]) -> Result<(), std::io::Error> {
self.decrement_countdown()?;
self.inner.write(offset, data)
}
}

#[test]
fn crash_regression4() {
let tmpfile = crate::create_tempfile();

let backend = FailingBackend::new(
FileBackend::new(tmpfile.as_file().try_clone().unwrap()).unwrap(),
23,
);
let db = Database::builder()
.set_cache_size(12686)
.set_page_size(8 * 1024)
.set_region_size(32 * 4096)
.create_with_backend(backend)
.unwrap();

let table_def: TableDefinition<u64, &[u8]> = TableDefinition::new("x");

let tx = db.begin_write().unwrap();
let _savepoint = tx.ephemeral_savepoint().unwrap();
let _persistent_savepoint = tx.persistent_savepoint().unwrap();
tx.commit().unwrap();
let tx = db.begin_write().unwrap();
{
let mut table = tx.open_table(table_def).unwrap();
let _ = table.insert_reserve(118821, 360).unwrap();
}
let result = tx.commit();
assert!(result.is_err());

drop(db);
Database::builder()
.set_cache_size(1024 * 1024)
.set_page_size(8 * 1024)
.set_region_size(32 * 4096)
.create(tmpfile.path())
.unwrap();
}

#[test]
fn small_pages() {
Expand Down Expand Up @@ -1062,60 +1172,6 @@ mod test {
tx.abort().unwrap();
}

#[test]
fn crash_regression3() {
let tmpfile = crate::create_tempfile();

let db = Database::builder()
.set_cache_size(1024 * 1024)
.set_page_size(16 * 1024)
.set_region_size(32 * 4096)
.create(tmpfile.path())
.unwrap();

let tx = db.begin_write().unwrap();
let savepoint = tx.ephemeral_savepoint().unwrap();
tx.commit().unwrap();

let mut tx = db.begin_write().unwrap();
tx.restore_savepoint(&savepoint).unwrap();
tx.commit().unwrap();
}

#[test]
fn crash_regression4() {
let tmpfile = crate::create_tempfile();

let db = Database::builder()
.set_cache_size(12686)
.set_page_size(8 * 1024)
.set_region_size(32 * 4096)
.create(tmpfile.path())
.unwrap();
db.set_crash_countdown(10);

let table_def: TableDefinition<u64, &[u8]> = TableDefinition::new("x");

let tx = db.begin_write().unwrap();
let _savepoint = tx.ephemeral_savepoint().unwrap();
let _persistent_savepoint = tx.persistent_savepoint().unwrap();
tx.commit().unwrap();
let tx = db.begin_write().unwrap();
{
let mut table = tx.open_table(table_def).unwrap();
let _ = table.insert_reserve(118821, 360);
}

drop(tx);
drop(db);
Database::builder()
.set_cache_size(1024 * 1024)
.set_page_size(8 * 1024)
.set_region_size(32 * 4096)
.create(tmpfile.path())
.unwrap();
}

#[test]
fn dynamic_shrink() {
let tmpfile = crate::create_tempfile();
Expand Down
Loading

0 comments on commit d51179e

Please sign in to comment.