Skip to content

Commit

Permalink
Update test to write results to S3 bucket
Browse files Browse the repository at this point in the history
Only manual test being used now, due to udev rules being too slow.
Change the manual test to save results to s3 bucket, so that the results
can more easily be processed

Signed-off-by: James Curtis <[email protected]>
  • Loading branch information
JamesC1305 committed Aug 7, 2024
1 parent 2c96f8e commit bf763f9
Show file tree
Hide file tree
Showing 3 changed files with 131 additions and 125 deletions.
5 changes: 4 additions & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,10 @@ def rootfs_fxt(request, record_property):
guest_kernel_fxt, params=kernel_params("vmlinux-5.10*")
)
guest_kernel_linux_acpi_only = pytest.fixture(
guest_kernel_fxt, params=kernel_params("vmlinux-5.10.221")
guest_kernel_fxt, params=kernel_params("vmlinux-5.10.219")
)
guest_kernel_linux_6_5 = pytest.fixture(
guest_kernel_fxt, params=kernel_params("vmlinux-6.5.*", select=kernels_unfiltered)
)
# Use the unfiltered selector, since we don't officially support 6.1 yet.
# TODO: switch to default selector once we add full 6.1 support.
Expand Down
2 changes: 1 addition & 1 deletion tests/host_tools/hotplug.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ done
readarray -t offline_cpus < <(lscpu -p=cpu --offline | sed '/^#/d')

for cpu_idx in ${offline_cpus[@]}; do
echo 1 >/sys/devices/system/cpu/cpu$cpu_idx/online
echo 1 | tee cpu*/online
done

/home/hotplug_time.o
249 changes: 126 additions & 123 deletions tests/integration_tests/performance/test_vcpu_hotplug.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,133 +12,133 @@

from host_tools.cargo_build import gcc_compile


@pytest.mark.parametrize(
"vcpu_count", [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
)
def test_custom_udev_rule_latency(
microvm_factory, guest_kernel_linux_acpi_only, rootfs_rw, vcpu_count
):
"""Test the latency for hotplugging and booting CPUs in the guest"""
api_durations = []
onlining_durations = []
print(f"Vcpu count: {vcpu_count}")
for i in range(5):
uvm_hotplug = microvm_factory.build(guest_kernel_linux_acpi_only, rootfs_rw)
uvm_hotplug.jailer.extra_args.update({"no-seccomp": None})
uvm_hotplug.help.enable_console()
uvm_hotplug.spawn()
uvm_hotplug.basic_config(vcpu_count=1, mem_size_mib=128)
uvm_hotplug.add_net_iface()
uvm_hotplug.start()
uvm_hotplug.ssh.run("rm /usr/lib/udev/rules.d/40-vm-hotadd.rules")
uvm_hotplug.ssh.scp_put(
Path("./host_tools/1-cpu-hotplug.rules"),
Path("/usr/lib/udev/rules.d/1-cpu-hotplug.rules"),
)

time.sleep(0.25)

uvm_hotplug.api.hotplug.put(Vcpu={"add": vcpu_count})
time.sleep(0.25)
_, stdout, _ = uvm_hotplug.ssh.run("dmesg")

# Extract API call duration
api_duration = (
float(
re.findall(
r"Total previous API call duration: (\d+) us\.",
uvm_hotplug.log_data,
)[-1]
)
/ 1000
)

# Extract onlining timings
start = float(
re.findall(r"\[\s+(\d+\.\d+)\] CPU1 has been hot-added\n", stdout)[0]
)
end = float(re.findall(r"\[\s+(\d+\.\d+)\] \w+", stdout)[-1])
elapsed_time = (end - start) * 1000
print(f"Api call duration: {api_duration} ms")
print(f"Onlining duration: {elapsed_time} ms")
api_durations.append(api_duration)
onlining_durations.append(elapsed_time)
uvm_hotplug.kill()
time.sleep(1)

avg_api_duration = sum(api_durations) / 5
avg_onlining_duration = sum(onlining_durations) / 5
print(f"Averages for {vcpu_count} hotplugged vcpus:")
print(f"\tAverage API call duration: {avg_api_duration} ms")
print(f"\tAverage onliing duration: {avg_onlining_duration} ms")


@pytest.mark.parametrize(
"vcpu_count", [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
)
def test_default_udev_rule_latency(
microvm_factory, guest_kernel_linux_acpi_only, rootfs_rw, vcpu_count
):
"""Test the latency for hotplugging and booting CPUs in the guest"""
api_durations = []
onlining_durations = []
print(f"Vcpu count: {vcpu_count}")
for i in range(5):
uvm_hotplug = microvm_factory.build(guest_kernel_linux_acpi_only, rootfs_rw)
uvm_hotplug.jailer.extra_args.update({"no-seccomp": None})
uvm_hotplug.help.enable_console()
uvm_hotplug.spawn()
uvm_hotplug.basic_config(vcpu_count=1, mem_size_mib=128)
uvm_hotplug.add_net_iface()
uvm_hotplug.start()

time.sleep(0.25)

_, stdout, _ = uvm_hotplug.ssh.run("ls /usr/lib/udev/rules.d")
default_rule = re.search(r"40-vm-hotadd\.rules", stdout)
assert default_rule is not None

uvm_hotplug.api.hotplug.put(Vcpu={"add": vcpu_count})
time.sleep(0.25)
_, stdout, _ = uvm_hotplug.ssh.run("dmesg")

# Extract API call duration
api_duration = (
float(
re.findall(
r"Total previous API call duration: (\d+) us\.",
uvm_hotplug.log_data,
)[-1]
)
/ 1000
)

# Extract onlining timings
start = float(
re.findall(r"\[\s+(\d+\.\d+)\] CPU1 has been hot-added\n", stdout)[0]
)
end = float(re.findall(r"\[\s+(\d+\.\d+)\] \w+", stdout)[-1])
elapsed_time = (end - start) * 1000
print(f"Api call duration: {api_duration} ms")
print(f"Onlining duration: {elapsed_time} ms")
api_durations.append(api_duration)
onlining_durations.append(elapsed_time)
uvm_hotplug.kill()
time.sleep(1)

avg_api_duration = sum(api_durations) / 5
avg_onlining_duration = sum(onlining_durations) / 5
print(f"Averages for {vcpu_count} hotplugged vcpus:")
print(f"\tAverage API call duration: {avg_api_duration} ms")
print(f"\tAverage onliing duration: {avg_onlining_duration} ms")
# @pytest.mark.parametrize(
# "vcpu_count", [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
# )
# def test_custom_udev_rule_latency(
# microvm_factory, guest_kernel_linux_acpi_only, rootfs_rw, vcpu_count
# ):
# """Test the latency for hotplugging and booting CPUs in the guest"""
# api_durations = []
# onlining_durations = []
# print(f"Vcpu count: {vcpu_count}")
# for i in range(5):
# uvm_hotplug = microvm_factory.build(guest_kernel_linux_acpi_only, rootfs_rw)
# uvm_hotplug.jailer.extra_args.update({"no-seccomp": None})
# uvm_hotplug.help.enable_console()
# uvm_hotplug.spawn()
# uvm_hotplug.basic_config(vcpu_count=1, mem_size_mib=128)
# uvm_hotplug.add_net_iface()
# uvm_hotplug.start()
# uvm_hotplug.ssh.run("rm /usr/lib/udev/rules.d/40-vm-hotadd.rules")
# uvm_hotplug.ssh.scp_put(
# Path("./host_tools/1-cpu-hotplug.rules"),
# Path("/usr/lib/udev/rules.d/1-cpu-hotplug.rules"),
# )
#
# time.sleep(0.25)
#
# uvm_hotplug.api.hotplug.put(Vcpu={"add": vcpu_count})
# time.sleep(0.25)
# _, stdout, _ = uvm_hotplug.ssh.run("dmesg")
#
# # Extract API call duration
# api_duration = (
# float(
# re.findall(
# r"Total previous API call duration: (\d+) us\.",
# uvm_hotplug.log_data,
# )[-1]
# )
# / 1000
# )
#
# # Extract onlining timings
# start = float(
# re.findall(r"\[\s+(\d+\.\d+)\] CPU1 has been hot-added\n", stdout)[0]
# )
# end = float(re.findall(r"\[\s+(\d+\.\d+)\] \w+", stdout)[-1])
# elapsed_time = (end - start) * 1000
# print(f"Api call duration: {api_duration} ms")
# print(f"Onlining duration: {elapsed_time} ms")
# api_durations.append(api_duration)
# onlining_durations.append(elapsed_time)
# uvm_hotplug.kill()
# time.sleep(1)
#
# avg_api_duration = sum(api_durations) / 5
# avg_onlining_duration = sum(onlining_durations) / 5
# print(f"Averages for {vcpu_count} hotplugged vcpus:")
# print(f"\tAverage API call duration: {avg_api_duration} ms")
# print(f"\tAverage onliing duration: {avg_onlining_duration} ms")
#
#
# @pytest.mark.parametrize(
# "vcpu_count", [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
# )
# def test_default_udev_rule_latency(
# microvm_factory, guest_kernel_linux_acpi_only, rootfs_rw, vcpu_count
# ):
# """Test the latency for hotplugging and booting CPUs in the guest"""
# api_durations = []
# onlining_durations = []
# print(f"Vcpu count: {vcpu_count}")
# for i in range(5):
# uvm_hotplug = microvm_factory.build(guest_kernel_linux_acpi_only, rootfs_rw)
# uvm_hotplug.jailer.extra_args.update({"no-seccomp": None})
# uvm_hotplug.help.enable_console()
# uvm_hotplug.spawn()
# uvm_hotplug.basic_config(vcpu_count=1, mem_size_mib=128)
# uvm_hotplug.add_net_iface()
# uvm_hotplug.start()
#
# time.sleep(0.25)
#
# _, stdout, _ = uvm_hotplug.ssh.run("ls /usr/lib/udev/rules.d")
# default_rule = re.search(r"40-vm-hotadd\.rules", stdout)
# assert default_rule is not None
#
# uvm_hotplug.api.hotplug.put(Vcpu={"add": vcpu_count})
# time.sleep(0.25)
# _, stdout, _ = uvm_hotplug.ssh.run("dmesg")
#
# # Extract API call duration
# api_duration = (
# float(
# re.findall(
# r"Total previous API call duration: (\d+) us\.",
# uvm_hotplug.log_data,
# )[-1]
# )
# / 1000
# )
#
# # Extract onlining timings
# start = float(
# re.findall(r"\[\s+(\d+\.\d+)\] CPU1 has been hot-added\n", stdout)[0]
# )
# end = float(re.findall(r"\[\s+(\d+\.\d+)\] \w+", stdout)[-1])
# elapsed_time = (end - start) * 1000
# print(f"Api call duration: {api_duration} ms")
# print(f"Onlining duration: {elapsed_time} ms")
# api_durations.append(api_duration)
# onlining_durations.append(elapsed_time)
# uvm_hotplug.kill()
# time.sleep(1)
#
# avg_api_duration = sum(api_durations) / 5
# avg_onlining_duration = sum(onlining_durations) / 5
# print(f"Averages for {vcpu_count} hotplugged vcpus:")
# print(f"\tAverage API call duration: {avg_api_duration} ms")
# print(f"\tAverage onliing duration: {avg_onlining_duration} ms")
#


@pytest.mark.parametrize(
"vcpu_count", [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30]
)
def test_manual_latency(
microvm_factory, guest_kernel_linux_acpi_only, rootfs_rw, vcpu_count
microvm_factory, guest_kernel_linux_acpi_only, rootfs_rw, vcpu_count, results_dir
):
"""Test the latency for hotplugging and booting CPUs in the guest"""
gcc_compile(Path("./host_tools/hotplug_time.c"), Path("host_tools/hotplug_time.o"))
Expand Down Expand Up @@ -189,8 +189,11 @@ def test_manual_latency(
# Extract onlining timings
data.append({"vcpus": vcpu_count, "api": api_duration, "onlining": timestamp})

df = pandas.DataFrame.from_dict(data).to_csv(
f"../test_results/manual-hotplug_{vcpu_count}.csv",
output_file = results_dir / f"hotplug-{vcpu_count}.csv"

csv_data = pandas.DataFrame.from_dict(data).to_csv(
index=False,
float_format="%.3f",
)

output_file.write_text(csv_data)

0 comments on commit bf763f9

Please sign in to comment.