-
Notifications
You must be signed in to change notification settings - Fork 0
/
assembly_judge.py
158 lines (131 loc) · 7.07 KB
/
assembly_judge.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import os
import sys
from types import SimpleNamespace
from dodona.dodona_command import Judgement, Message, ErrorType, Tab, Context, TestCase, MessageFormat
from dodona.dodona_config import DodonaConfig, AssemblyLanguage
from dodona.translator import Translator
from evaluation.arguments import format_arguments
from exceptions.config_exceptions import UnknownArgumentTypeError
from utils.file_loaders import text_loader
from exceptions.evaluation_exceptions import ValidationError, TestRuntimeError
from utils.messages import compile_error, report_test, config_error, unknown_argument_type
from evaluation.compilation import run_compilation
from evaluation.run import run_test
import json
def amend_submission(config: DodonaConfig):
"""
Adds the assembly directives necessary for generating the debug and linker information used in later staged.
"""
submission_content = text_loader(config.source)
modified_submission_content = []
if config.assembly == AssemblyLanguage.X86_32_INTEL or config.assembly == AssemblyLanguage.X86_64_INTEL:
modified_submission_content.append(".intel_syntax noprefix")
if config.assembly == AssemblyLanguage.ARM_32 or config.assembly == AssemblyLanguage.ARM_64:
global_directive = "global"
function_type = "%function"
else:
global_directive = "globl"
function_type = "@function"
# Note: we surround everything with .type & .size, as if it is one function, such that all the student's
# code will be under a single entry in the Valgrind output (as if it were one function).
modified_submission_content.extend((
f".{global_directive} {config.tested_function}",
f".type {config.tested_function}, {function_type}",
))
line_shift = len(modified_submission_content) + 1
modified_submission_content.extend((
submission_content,
f".size {config.tested_function}, .-{config.tested_function}\n", # newline to prevent warning
))
submission_file = os.path.join(config.workdir, "submission.s")
with open(submission_file, "w") as modified_submission_file:
modified_submission_file.write("\n".join(modified_submission_content))
return submission_file, line_shift
def main():
"""
Main judge method
"""
# Read config JSON from stdin
config = DodonaConfig.from_json(sys.stdin)
with Judgement() as judge:
# Perform sanity check
config.sanity_check()
# Initiate translator
config.translator = Translator.from_str(config.natural_language)
try:
config.process_judge_specific_options()
except ValueError as e:
config_error(judge, config.translator, str(e))
return
# Counter for failed tests because this judge works a bit differently
# Allows nicer feedback on Dodona (displays amount of failed tests)
failed_tests = 0
submission_file, line_shift = amend_submission(config)
# Load test plan
with open(os.path.join(config.resources, config.plan_name), "r") as plan_file:
plan = json.load(plan_file, object_hook=lambda d: SimpleNamespace(**d))
# Compile code
try:
test_program_path = run_compilation(config, plan, submission_file)
except ValidationError as validation_error:
compile_error(judge, config, validation_error.msg, line_shift)
return
# Run the tests
with Tab('Feedback'):
# Put each testcase in a separate context
for test_id, test in enumerate(plan.tests):
try:
formatted_arguments = format_arguments(test.arguments)
except UnknownArgumentTypeError as e:
unknown_argument_type(judge, config.translator, e.argument)
continue
test_name = f"{config.tested_function}({formatted_arguments})"
with Context() as test_context, TestCase(test_name, format=MessageFormat.CODE) as test_case:
expected = str(test.expected_return_value)
accepted = False
try:
test_result = run_test(config.translator, test_program_path, test_id, config)
accepted = test_result.generated == expected
# Return value test
report_test(
config.translator.translate(Translator.Text.RETURN_VALUE),
expected,
test_result.generated,
accepted,
)
# Time measurement test
if test_result.performance:
# Combine the performance counters into a total number of cycles
simulated_total_cycles = config.performance_cycle_factor_instructions * test_result.performance.instruction_count \
+ config.performance_cycle_factor_data_reads * test_result.performance.data_read_count \
+ config.performance_cycle_factor_data_writes * test_result.performance.data_write_count
accepted_cycles = simulated_total_cycles <= test.max_cycles
accepted = accepted and accepted_cycles
report_test(
config.translator.translate(Translator.Text.MEASURED_CYCLES),
config.translator.translate(Translator.Text.EXECUTED_IN_CYCLES, msg=f"<= {str(test.max_cycles)}"),
config.translator.translate(Translator.Text.EXECUTED_IN_CYCLES, msg=str(simulated_total_cycles)),
accepted_cycles,
)
# Calling convention test
if test_result.calling_convention_error is not None:
accepted_calling_convention = not bool(test_result.calling_convention_error)
if not accepted_calling_convention:
accepted = False
report_test(
config.translator.translate(Translator.Text.CALLING_CONVENTION_VIOLATION),
"",
config.translator.translate(Translator.Text.CALLING_CONVENTION_MSG, msg=test_result.calling_convention_error),
accepted_calling_convention,
)
except TestRuntimeError as e:
with Message(str(e)):
pass
test_context.accepted = accepted
test_case.accepted = accepted
if not accepted:
failed_tests += 1
status = ErrorType.CORRECT if failed_tests == 0 else ErrorType.WRONG
judge.status = config.translator.error_status(status, amount=failed_tests)
if __name__ == "__main__":
main()