Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

More benchmarks and tmpfs fix... #6

Merged
merged 4 commits into from
Sep 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 23 additions & 4 deletions ci-based/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,9 @@ CPU_SET:
- 1
- 2

# This is the number of loops that the zeek benchmarker will run against the data file
# in order to average out noise in the process. A value of 3 is a reasonable balance
# for overall runtime for each request.
RUN_COUNT: 3
# Default number of times each test runs. Can be overridden by providing
# a runs key within each test.
RUN_COUNT: 5

# Max time for unpacking the build archives.
TAR_TIMEOUT: 20
Expand All @@ -52,10 +51,26 @@ ZEEK_TESTS:
- id: pcap-50k-tcp-conns
pcap_file: 50k-tcp-conns.pcap

- id: pcap-much-alexa-https-top-100
pcap_file: much-alexa-https-top-100.pcap

- id: micro-misc-zeek-version
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D -b microbenchmarks/misc/zeek-version.zeek

# Same as above, but not bare and load test-all-policy, too.
- id: micro-misc-zeek-version-all-policy
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D test-all-policy microbenchmarks/misc/zeek-version.zeek

- id: micro-vector-ops-simple-value
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D -b microbenchmarks/vector-ops/simple-value.zeek

- id: micro-vector-ops-complex-value
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D -b microbenchmarks/vector-ops/complex-value.zeek

- id: micro-table-ops-simple-key-value
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D -b microbenchmarks/table-ops/simple-key-value.zeek
Expand Down Expand Up @@ -104,6 +119,10 @@ ZEEK_TESTS:
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D -b microbenchmarks/function-calls/recursion.zeek

- id: micro-function-calls-many
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D -b microbenchmarks/function-calls/many.zeek

- id: micro-patterns-basic
bench_command: /benchmarker/scripts/tiny-benchmark.sh
bench_args: -D -b microbenchmarks/patterns/basic.zeek
Expand Down
40 changes: 40 additions & 0 deletions ci-based/scripts/microbenchmarks/function-calls/many.zeek
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
global n = 5000000;

function f(c: count): count
{
return c;
}

function fvoid() { }

event zeek_init()
{
local i = n;
local sum = 0;
while ( i > 0 )
{
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
sum += f(i);
fvoid();
--i;
}

print sum;
}
22 changes: 22 additions & 0 deletions ci-based/scripts/microbenchmarks/vector-ops/complex-value.zeek
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
type V: record {
c1: count;
c2: count;
};

global n = 10000000;

global vec: vector of V;

event zeek_init()
{
while ( |vec| < n )
{
vec += V($c1=|vec|,$c2=|vec|);
}

local sum = 0;
for ( i in vec )
sum += vec[i]$c1 + vec[i]$c2;

print sum;
}
17 changes: 17 additions & 0 deletions ci-based/scripts/microbenchmarks/vector-ops/simple-value.zeek
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
global n = 30000000;

global vec: vector of count;

event zeek_init()
{
while ( |vec| < n )
{
vec += |vec|;
}

local sum = 0;
for ( i in vec )
sum += vec[i];

print sum;
}
5 changes: 5 additions & 0 deletions ci-based/scripts/run-zeek.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,11 @@ if [ -n "${BENCH_COMMAND}" ] && [ -n "${BENCH_ARGS}" ]; then
exit 1
fi

if [ -z "${DATA_FILE_NAME}" ] || [ -z "${TMPFS_PATH}" ]; then
echo "DATA_FILE_NAME or TMPFS_PATH not set" >&2
exit 1
fi

cp /test_data/${DATA_FILE_NAME} ${TMPFS_PATH}/${DATA_FILE_NAME}
timeout --signal=SIGKILL 5m /benchmarker/scripts/perf-benchmark.sh --quiet --parseable --mode file \
--seed ${ZEEKSEED} --build ${ZEEKBIN} --data-file ${TMPFS_PATH}/${DATA_FILE_NAME} \
Expand Down
51 changes: 51 additions & 0 deletions ci-based/tests/test_tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ def tearDownClass(cls):

def setUp(self):
self._client_mock = mock.Mock(spec=docker.client.DockerClient)
self._container_mock = mock.Mock(spec=docker.models.containers.Container)
self._client_mock.containers.run.return_value = self._container_mock
self._container_mock.wait.return_value = {"StatusCode": 0}
self._container_mock.logs.return_value = "fake-logs"

self._cr = zeek_benchmarker.tasks.ContainerRunner(client=self._client_mock)
self.test_path = self.test_spool / "fake-job-id/build.tgz"

Expand Down Expand Up @@ -92,3 +97,49 @@ def test_unpack_spool_volume(self):
self.assertEqual("test-spool-volume", source_volume["Source"])
self.assertEqual("/source", source_volume["Target"])
self.assertTrue(source_volume["ReadOnly"])

def test__runc(self):
result = self._cr.runc(
image="test-image",
command="test-exit 1",
env={},
seccomp_profile={},
install_volume="test-install-volume",
install_target="/test/install",
test_data_volume="test_data",
)
self.assertEqual(0, result.returncode)
self.assertEqual("fake-logs", result.stdout)
self.assertEqual("fake-logs", result.stderr)

def test__runc_command__failed(self):
self._container_mock.wait.return_value = {"StatusCode": 1}

with self.assertRaises(zeek_benchmarker.tasks.CommandFailed):
self._cr.runc(
image="test-image",
command="test-exit 1",
env={},
seccomp_profile={},
install_volume="test-install-volume",
install_target="/test/install",
test_data_volume="test_data",
)

def test__runc__tmpfs(self):
self._cr.runc(
image="test-image",
command="test-exit 1",
env={},
seccomp_profile={},
install_volume="test-install-volume",
install_target="/test/install",
test_data_volume="test_data",
)

# Regression test for env not being populated with TMPFS_PATH
run_kwargs = self._client_mock.containers.run.call_args[1]
self.assertEqual("/mnt/data/tmpfs", run_kwargs["environment"]["TMPFS_PATH"])
self.assertEqual("/run", run_kwargs["environment"]["RUN_PATH"])
self.assertEqual("", run_kwargs["tmpfs"]["/mnt/data/tmpfs"])
self.assertEqual("", run_kwargs["tmpfs"]["/run"])
4 changes: 4 additions & 0 deletions ci-based/zeek_benchmarker/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ def tar_timeout(self) -> str:
def zeek_cpus(self) -> str:
return ",".join(str(c) for c in self._d["CPU_SET"])

@property
def run_count(self) -> int:
return self._d["RUN_COUNT"]

def __getitem__(self, k: str, default: typing.Any = None):
"""
Allow dictionary key lookups.
Expand Down
28 changes: 20 additions & 8 deletions ci-based/zeek_benchmarker/tasks.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,12 @@ class InvalidChecksum(Error):
pass


class CommandFailed(Error):
"""Raised when the command within the container has a non-zero exit status."""

pass


Env = dict[str, str]


Expand Down Expand Up @@ -73,7 +79,6 @@ def runc(
test_data_target: str = "/test_data",
timeout: float = None,
cap_add: list[str] | None = None,
tmpfs: list[str] | None = None,
network_disabled: bool = True,
):
"""
Expand All @@ -93,12 +98,14 @@ class Result(typing.NamedTuple):

cap_add = cap_add or ["SYS_NICE"]
default_tmpfs_path = "/mnt/data/tmpfs"
tmpfs = tmpfs or {
default_run_path = "/run"
tmpfs = {
default_tmpfs_path: "",
default_run_path: "",
}

if not tmpfs:
env["TMPFS_PATH"] = default_tmpfs_path
env["TMPFS_PATH"] = default_tmpfs_path
env["RUN_PATH"] = default_run_path

mounts = [
docker.types.Mount(
Expand All @@ -119,6 +126,7 @@ class Result(typing.NamedTuple):

container = self._client.containers.run(
image=image,
working_dir=default_run_path,
command=command,
detach=True,
cap_add=cap_add,
Expand All @@ -140,6 +148,10 @@ class Result(typing.NamedTuple):
result.stdout,
result.stderr,
)

if result.returncode:
raise CommandFailed(result)

return result
finally:
container.remove(force=True)
Expand Down Expand Up @@ -395,10 +407,10 @@ class ZeekTest(typing.NamedTuple):
skip: bool = None

@staticmethod
def from_dict(d: dict[str, any]):
def from_dict(cfg: config.Config, d: dict[str, any]):
return ZeekTest(
test_id=d["id"],
runs=d.get("runs", 3),
runs=d.get("runs", cfg.run_count),
bench_command=d.get("bench_command"),
bench_args=d.get("bench_args"),
pcap=d.get("pcap_file"),
Expand Down Expand Up @@ -477,14 +489,14 @@ def run_zeek_test(self, t):
logger.error(error)
store.store_zeek_error(job=self, test=t, test_run=i, error=error)
except Exception as e:
error = f"Unhandled exception {e}"
error = f"Unhandled exception {type(e)} {e}"
logger.exception(error)
store.store_zeek_error(job=self, test=t, test_run=i, error=error)

def _process(self):
cfg = config.get()
for t in cfg["ZEEK_TESTS"]:
zeek_test = ZeekTest.from_dict(t)
zeek_test = ZeekTest.from_dict(cfg, t)
self.run_zeek_test(zeek_test)


Expand Down