Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Various changes and improvements for fixture support #35

Merged
merged 1 commit into from
May 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 15 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ solana-test-suite minimize-tests --input-dir <input_dir> --solana-target <solana
| `--num-processes` | Number of processes to use |


### Creating Fixtures
### Creating Fixtures from Instruction Context

Create full test fixtures containing both instruction context and effects. Effects are computed by running instruction context through `--solana-target`. Fixtures with `None` values for instruction context/effects are not included.

Expand All @@ -123,6 +123,20 @@ solana-test-suite create-fixtures --input-dir <input_dir> --solana-target <solan
| `--readable` | Output fixtures in human-readable format |


### Create Instruction Context from Fixtures

Opposite as above. Does not require a target.

```sh
solana-test-suite instr-from-fixtures --input-dir <input_dir> --solana-target <solana_target.so> --output-dir <fixtures_output_dir> --num-processes <num_processes> [--readable]
```

| Argument | Description |
|-----------------|-----------------------------------------------------------------------------------------------------|
| `--input-dir` | Input directory containing instruction fixture messages |
| `--output-dir` | Output directory for instr contexts |
| `--num-processes` | Number of processes to use |

### Validation

Used to detect potential memory corruption issues / inconsistent outputs. The program will run each supplied library `num-iteration` times on the entire test suite. Use the following:
Expand Down
28 changes: 26 additions & 2 deletions src/test_suite/fixture_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import test_suite.globals as globals
import test_suite.invoke_pb2 as pb
from google.protobuf import text_format
from pathlib import Path


def create_fixture(
Expand Down Expand Up @@ -85,12 +86,35 @@ def write_fixture_to_disk(file_stem: str, serialized_instruction_fixture: str) -
instr_fixture.input.CopyFrom(instr_context)
instr_fixture.output.CopyFrom(instr_effects)

with open(globals.output_dir / (file_stem + ".txt"), "w") as f:
with open(globals.output_dir / (file_stem + ".fix.txt"), "w") as f:
f.write(
text_format.MessageToString(instr_fixture, print_unknown_fields=False)
)
else:
with open(f"{globals.output_dir}/{file_stem}.bin", "wb") as f:
with open(f"{globals.output_dir}/{file_stem}.fix", "wb") as f:
f.write(serialized_instruction_fixture)

return 1


def extract_instr_context_from_fixture(fixture_file: Path):
"""
Extract InstrContext from InstrEffects and write to disk.

Args:
- fixture_file (Path): Path to fixture file

Returns:
- int: 1 on success, 0 on failure
"""
try:
instr_fixture = pb.InstrFixture()
with open(fixture_file, "rb") as f:
instr_fixture.ParseFromString(f.read())

with open(globals.output_dir / (fixture_file.stem + ".bin"), "wb") as f:
f.write(instr_fixture.input.SerializeToString(deterministic=True))
except:
return 0

return 1
31 changes: 26 additions & 5 deletions src/test_suite/multiprocessing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def merge_results_over_iterations(results: tuple) -> tuple[str, dict]:


def prune_execution_result(
file_serialized_instruction_context: tuple[str, dict],
file_serialized_instruction_context: tuple[str, str],
file_serialized_instruction_effects: tuple[str, dict[str, str | None]],
) -> tuple[str, dict]:
"""
Expand Down Expand Up @@ -302,7 +302,9 @@ def check_consistency_in_results(file_stem: str, results: dict) -> dict[str, boo
return results_per_target


def build_test_results(file_stem: str, results: dict[str, str | None]) -> int:
def build_test_results(
file_stem: str, results: dict[str, str | None]
) -> tuple[str, int, dict | None]:
"""
Build a single result of single test execution and returns whether the test passed or failed.

Expand All @@ -312,9 +314,9 @@ def build_test_results(file_stem: str, results: dict[str, str | None]) -> int:

Returns:
- tuple[str, int, dict | None]: Tuple of:
File stem; 1 if passed, -1 if failed, 0 if skipped
Dictionary of target library
Names and file-dumpable serialized instruction effects.
- File stem
- 1 if passed, -1 if failed, 0 if skipped
- Dictionary of target library names and file-dumpable serialized instruction effects
"""
# If no results or Agave rejects input, mark case as skipped
if results is None:
Expand Down Expand Up @@ -393,3 +395,22 @@ def get_feature_pool(library: ctypes.CDLL) -> FeaturePool:
result.hardcoded_features[i] for i in range(result.hardcoded_feature_cnt)
]
return FeaturePool(supported, hardcoded)


def run_test(test_file: Path) -> tuple[str, int, dict | None]:
"""
Runs a single test from start to finish.

Args:
- test_file (Path): Path to the file containing serialized instruction contexts.

Returns:
- tuple[str, int, dict | None]: Tuple of:
- File stem
- 1 if passed, -1 if failed, 0 if skipped
- Dictionary of target library names and file-dumpable serialized instruction effects
"""
test_case = generate_test_case(test_file)
results = process_single_test_case(*test_case)
pruned_results = prune_execution_result(test_case, results)
return build_test_results(*pruned_results)
100 changes: 54 additions & 46 deletions src/test_suite/test_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,14 @@
import ctypes
from multiprocessing import Pool
from pathlib import Path
from google.protobuf import text_format
from test_suite.constants import LOG_FILE_SEPARATOR_LENGTH
from test_suite.fixture_utils import create_fixture, write_fixture_to_disk
from test_suite.fixture_utils import (
create_fixture,
extract_instr_context_from_fixture,
write_fixture_to_disk,
)
import test_suite.invoke_pb2 as pb
from test_suite.codec_utils import decode_input, encode_input, encode_output
from test_suite.codec_utils import encode_output
from test_suite.minimize_utils import minimize_single_test_case
from test_suite.multiprocessing_utils import (
check_consistency_in_results,
Expand All @@ -21,9 +24,9 @@
merge_results_over_iterations,
process_instruction,
process_single_test_case,
build_test_results,
prune_execution_result,
get_feature_pool,
run_test,
)
import test_suite.globals as globals
from test_suite.debugger import debug_host
Expand Down Expand Up @@ -64,6 +67,7 @@ def exec_instr(
instruction_effects = process_instruction(lib, instruction_context)

if not instruction_effects:
print("No instruction effects returned")
return None

instruction_effects = instruction_effects.SerializeToString(deterministic=True)
Expand Down Expand Up @@ -287,6 +291,48 @@ def minimize_tests(
print(f"{sum(minimize_results)} files successfully minimized")


@app.command()
def instr_from_fixtures(
input_dir: Path = typer.Option(
Path("fixtures"),
"--input-dir",
"-i",
help="Input directory containing instruction fixture messages",
),
output_dir: Path = typer.Option(
Path("instr"),
"--output-dir",
"-o",
help="Output directory for instr contexts",
),
num_processes: int = typer.Option(
4, "--num-processes", "-p", help="Number of processes to use"
),
):
# Specify globals
globals.output_dir = output_dir

# Create the output directory, if necessary
if globals.output_dir.exists():
shutil.rmtree(globals.output_dir)
globals.output_dir.mkdir(parents=True, exist_ok=True)

num_test_cases = len(list(input_dir.iterdir()))

print("Converting to InstrContext...")
execution_contexts = []
with Pool(processes=num_processes) as pool:
for result in tqdm.tqdm(
pool.imap(extract_instr_context_from_fixture, input_dir.iterdir()),
total=num_test_cases,
):
execution_contexts.append(result)

print("-" * LOG_FILE_SEPARATOR_LENGTH)
print(f"{len(execution_contexts)} total files seen")
print(f"{sum(execution_contexts)} files successfully written")


@app.command()
def create_fixtures(
input_dir: Path = typer.Option(
Expand Down Expand Up @@ -452,54 +498,16 @@ def run_tests(

num_test_cases = len(list(input_dir.iterdir()))

# Generate the test cases in parallel from files on disk
execution_contexts = []
print("Reading test files...")
with Pool(processes=num_processes) as pool:
for result in tqdm.tqdm(
pool.imap(generate_test_case, input_dir.iterdir()), total=num_test_cases
):
execution_contexts.append(result)

# Process the test cases in parallel through shared libraries
print("Executing tests...")
execution_results = []
# Process the test results in parallel
print("Running tests...")
test_case_results = []
with Pool(
processes=num_processes,
initializer=initialize_process_output_buffers,
initargs=(randomize_output_buffer,),
) as pool:
for result in tqdm.tqdm(
pool.imap(
functools.partial(lazy_starmap, function=process_single_test_case),
execution_contexts,
),
total=num_test_cases,
):
execution_results.append(result)

# Prune accounts that were not actually modified
print("Pruning results...")
pruned_execution_results = []
with Pool(processes=num_processes) as pool:
for result in tqdm.tqdm(
pool.imap(
functools.partial(lazy_starmap, function=prune_execution_result),
zip(execution_contexts, execution_results),
),
total=num_test_cases,
):
pruned_execution_results.append(result)

# Process the test results in parallel
print("Building test results...")
test_case_results = []
with Pool(processes=num_processes) as pool:
for result in tqdm.tqdm(
pool.imap(
functools.partial(lazy_starmap, function=build_test_results),
pruned_execution_results,
),
pool.imap(run_test, input_dir.iterdir()),
total=num_test_cases,
):
test_case_results.append(result)
Expand Down
Loading