diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..d184f19 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,41 @@ +# /rst/source/sections/modules.rst +/build/ +/dist/ +# /test_report.html +# /test_report/ +# /test_log.txt +# /test_screenshots.tar.gz +/.coverage +/cover/ +**/*.egg +**/*.egg-info +**/*.pid +**/.DS_Store +**/*.pyc +**/__pycache__/ +**/*.kdev* +**/*~ +/.tox/ +/.pytest_cache/ +/.eggs/ +**/*.swp +/deployment/ +/venv*/ +/.idea/ + +# docker specific + +/Dockerfile* +/docker* +!docker-entrypoint.sh +/.dockerignore +/.git +/.gitignore +/.github +/extra/ +/test* +/*.rst +/rst/ + +/docker/ +/rst/ diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..acb11a8 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,42 @@ +name: Build and publish Docker image + +on: + push: + tags: + - '*' + +jobs: + test: + uses: ./.github/workflows/tests-reusable.yml + with: + python-version: '3.7' + + build: + name: Build image + runs-on: ubuntu-latest + needs: test + permissions: + contents: read + packages: write + + steps: + - name: Login to container registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata for Docker + id: metadata + uses: docker/metadata-action@v4 + with: + images: ghcr.io/${{ github.repository_owner }}/sioworkers + + - name: Build and publish image + uses: docker/build-push-action@v3 + with: + platforms: linux/amd64 + push: true + tags: ${{ steps.metadata.outputs.tags }} + labels: ${{ steps.metadata.outputs.labels }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..13c79fd --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,18 @@ +name: Tests + +on: + push: + branches: + - master + pull_request: + +jobs: + test: + strategy: + fail-fast: false + matrix: + python-version: ['3.7', '3.8'] + + uses: './.github/workflows/tests-reusable.yml' + with: + python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/tests-reusable.yml b/.github/workflows/tests-reusable.yml new file mode 100644 index 0000000..2037c7c --- /dev/null +++ b/.github/workflows/tests-reusable.yml @@ -0,0 +1,62 @@ +name: Tests + +on: + workflow_call: + inputs: + python-version: + required: true + type: string + +jobs: + test: + name: Run tests + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: ${{ inputs.python-version }} + + - name: Setup Java 8 + uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: '8' + + - name: Install apt dependencies + run: | + sudo apt-get update + sudo apt-get install libdb-dev fp-compiler fp-units-base fp-units-math + + - name: Cache Python dependencies + uses: actions/cache@v3 + env: + cache-name: 'cache-pip' + with: + path: ~/.cache/pip + key: ${{ runner.os }}-dev-${{ env.cache-name }}-${{ inputs.python-version }}-${{ hashFiles('**/setup.py') }} + restore-keys: | + ${{ runner.os }}-dev-${{ env.cache-name }}-${{ inputs.python-version }}-${{ hashFiles('**/setup.py') }} + ${{ runner.os }}-dev-${{ env.cache-name }}-${{ inputs.python-version }}- + ${{ runner.os }}-dev- + ${{ runner.os }}- + + - name: Install Python dependencies + run: | + pip install --user virtualenv + virtualenv venv + . venv/bin/activate + pip install -e .[dev] + + - name: Run tests + env: + TEST_SANDBOXES: '1' + NO_JAVA_TESTS: '0' + NO_SIO2JAIL_TESTS: '1' + run: | + . venv/bin/activate + pytest -v diff --git a/.gitignore b/.gitignore index fd4560c..3f5bb8f 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ twisted/plugins/dropin.cache config/supervisord.conf config/supervisord-conf-vars.conf +config/logging.json diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..5b0cdb6 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,48 @@ +FROM python:3.7 as build + +ENV PYTHONUNBUFFERED 1 + +RUN useradd -m oioioi \ + && mkdir -p /sio2/sioworkers \ + && chown -R oioioi:oioioi /sio2 + +USER oioioi +WORKDIR /sio2 + +RUN pip install --user virtualenv \ + && /home/oioioi/.local/bin/virtualenv -p python3.7 venv + +COPY --chown=oioioi:oioioi setup.py setup.cfg /sio2/sioworkers/ +COPY --chown=oioioi:oioioi sio /sio2/sioworkers/sio +COPY --chown=oioioi:oioioi twisted /sio2/sioworkers/twisted + +WORKDIR /sio2/sioworkers + +RUN . /sio2/venv/bin/activate \ + && pip install . + +FROM python:3.7 AS production + +ENV PYTHONUNBUFFERED 1 + +RUN useradd -m oioioi \ + && mkdir -p /sio2/sioworkers \ + && chown -R oioioi:oioioi /sio2 + +COPY --from=build --chown=oioioi:oioioi /sio2/venv /sio2/venv + +COPY --chown=oioioi:oioioi config/supervisord.conf.example /sio2/sioworkers/config/supervisord.conf +COPY --chown=oioioi:oioioi config/supervisord-conf-vars.conf.docker /sio2/sioworkers/config/supervisord-conf-vars.conf +COPY --chown=oioioi:oioioi config/logging.json.example /sio2/sioworkers/config/logging.json +COPY --chown=oioioi:oioioi supervisor.sh /sio2/sioworkers + +COPY --chown=oioioi:oioioi docker-entrypoint.sh /sio2 + +USER oioioi +WORKDIR /sio2/sioworkers + +ENV SIOWORKERSD_HOST="web" + +ENTRYPOINT [ "/sio2/docker-entrypoint.sh" ] + +CMD [ "/sio2/sioworkers/supervisor.sh", "startfg" ] diff --git a/README.md b/README.md index 66a030b..6959467 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,93 @@ -# INSTALLATION +# sioworkers -### for python 2 installation ### -pip install -r requirements.txt +`sioworkers` is the task runner used by [SIO2](https://github.com/sio2project/oioioi) - the platform for running algorithmic/competitive programming contests. It handles all kinds of asynchronously run jobs - ranging from compiling submissions, to executing them in a supervised, sandboxed environment. -python setup.py install +# Installation -### for python 3 installation ### -pip install -r requirements_py3.txt +``` +$ pip install . # for production deployments +$ pip install .[dev] # with development dependencies +``` -python setup.py install +# Tests -# TESTS +All tests in this project are being managed with `tox`, which is simply invoked by running: -### to run all tests ### -`tox` -in main directory +```console +$ tox +``` -### to run twisted tests (python2) ### -run: -trial sio.sioworkersd.twisted_t -in the directory of installation +in the main directory. -### to run twisted tests (python3) ### -run: -trial sio/sioworkersd/twisted_t -in the directory of installation \ No newline at end of file +Alternatively you can also invoke all the tests directly. + +```console +$ TEST_SANDBOXES=1 NO_JAVA_TESTS=1 NO_SIO2JAIL_TESTS=1 pytest -v . +``` +This allows you to enable/disable sandboxed, Java, and Sio2Jail tests respectively. +Note that Sio2Jail requires the CPU performance counters to be exposed to the system to work. +This usually isn't the case on VPS servers and on public/free continuous integration services, +which will cause the tests to fail. It is recommended to skip testing Sio2Jail in those cases. + +# Docker + +An official Docker image for sioworkers is available at https://hub.docker.com/r/sio2project/sioworkers. + +```console +$ docker run --rm \ + --network=sio2-network \ + --cap-add=ALL \ + --privileged \ + -e "SIOWORKERSD_HOST=oioioi" \ + -e "WORKER_ALLOW_RUN_CPU_EXEC=true" \ + -e "WORKER_CONCURRENCY=1" \ + -e "WORKER_RAM=1024" \ + --memory="1152m" \ + --cpus=2.0 \ + sio2project/sioworkers:latest +``` + +Notes: +* `--privileged` is only needed if Sio2Jail is used for judging submissions (ie. `WORKER_ALLOW_RUN_CPU_EXEC` is set to `true`), +* You can limit the memory/CPUs available to the container how you usually would in the container runtime of your choice, + the container will determine how many workers it should expose to OIOIOI based on that. + * You can also manually override the amount of available workers/memory by specifying the `WORKER_CONCURRENCY` + and `WORKER_RAM` (in MiB) environment variables. +* 128 MiB is reserved for processes in the container other than the submission being judged. That is, if you want + the maximum memory available to a judged program to be 1024 MiB, limit the container's memory to + 128 MiB + (number of workers) * 1024 MiB. + +Equivalent Docker Compose configuration: + +```yaml +version: '3.8' + +... + +worker: + image: sio2project/sioworkers:latest + deploy: + resources: + limits: + cpus: '2' + memory: 1152m + cap_add: + - ALL + privileged: true + environment: + SIOWORKERSD_HOST: 'web' + WORKER_ALLOW_RUN_CPU_EXEC: 'true' + # these *will* override any automatic detection of available + # memory/cpu cores based on container limits! + WORKER_CONCURRENCY: '1' + WORKER_RAM: '1024' +``` + +## Environment variables + +The container exposes two environment variables, from which only `SIOWORKERSD_HOST` is required. + +* `SIOWORKERSD_HOST` - name of the host on which the `sioworkersd` service is available (usually the same as the main OIOIOI instance) +* `WORKER_ALLOW_RUN_CPU_EXEC` - marks this worker as suitable for judging directly on the CPU (without any isolation like Sio2Jail). + This is used in some contest types (for instance, ACM style contests), however it isn't needed when running the regular OI style + contests. diff --git a/config/supervisord-conf-vars.conf.docker b/config/supervisord-conf-vars.conf.docker new file mode 100644 index 0000000..813b53a --- /dev/null +++ b/config/supervisord-conf-vars.conf.docker @@ -0,0 +1,38 @@ +#!/bin/bash + +export WORKER_USER="$(id -u -n)" +export WORKER_HOME="/sio2/sioworkers" + +export WORKER_LOGCONFIG="${WORKER_HOME}/config/logging.json" + +# Cache cleaner config +export FILETRACKER_CACHE_CLEANER_ENABLED="true" +export CACHE_SIZE="10G" +export SCAN_INTERVAL="1h" +export CLEAN_LEVEL="50" # in percents + +# Workers config +export WORKER_ENABLED="true" + +# Set worker concurrency parameters +if [ ! -f /sys/fs/cgroup/cpu.max ] || [ $(cat /sys/fs/cgroup/cpu.max | cut -d \ -f 1) = "max" ] ; then + WORKERS_TOTAL=$(($(nproc) * 3/2)) +else + WORKERS_TOTAL=$(cat /sys/fs/cgroup/cpu.max | awk '{print int($1 / $2)}') +fi + +if [ ! -f /sys/fs/cgroup/memory.max ] || [ $(cat /sys/fs/cgroup/memory.max) = "max" ]; then + MEM_TOTAL=$(grep MemTotal /proc/meminfo | awk '{print int($2 / 1024)}') # in MiB +else + MEM_TOTAL=$(cat /sys/fs/cgroup/memory.max | awk '{print int($1 / 1048576)}') # in MiB +fi +# Set how much memory we should reserve for OS +OS_MEMORY=128 # in MiB + +if [ -z ${WORKER_RAM+x} ]; then + export WORKER_RAM=$(($MEM_TOTAL - $OS_MEMORY)) +fi + +if [ -z ${WORKER_CONCURRENCY+x} ]; then + export WORKER_CONCURRENCY=${WORKERS_TOTAL} +fi diff --git a/config/supervisord.conf.example b/config/supervisord.conf.example index 717544d..b50f7fb 100644 --- a/config/supervisord.conf.example +++ b/config/supervisord.conf.example @@ -21,7 +21,7 @@ stdout_logfile=%(ENV_WORKER_HOME)s/logs/filetracker-cache-cleaner.log [program:oioioiworker] -command=twistd -n -l- --pidfile=%(ENV_WORKER_HOME)s/pidfiles/oioioiworker.pid worker -c %(ENV_WORKER_CONCURRENCY)s -r %(ENV_WORKER_RAM)s -l %(ENV_WORKER_LOGCONFIG)s %(ENV_SIOWORKERSD_HOST)s +command=twistd -n -l- --pidfile=%(ENV_WORKER_HOME)s/pidfiles/oioioiworker.pid worker -c %(ENV_WORKER_CONCURRENCY)s -r %(ENV_WORKER_RAM)s -l %(ENV_WORKER_LOGCONFIG)s %(ENV_WORKER_EXTRA_FLAGS)s %(ENV_SIOWORKERSD_HOST)s autostart=%(ENV_WORKER_ENABLED)s priority=100 redirect_stderr=true @@ -39,4 +39,3 @@ supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] serverurl=unix://%(ENV_WORKER_HOME)s/supervisor.sock - diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100755 index 0000000..18ba6bf --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +. /sio2/venv/bin/activate + +exec "$@" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 1175c53..0000000 --- a/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -# Some dependencies have ill-pinned dependencies these packages are installed to fix those. -pyparsing==2.4.4 -zipp==1.2.0 -idna==2.10 -PyHamcrest==1.9.0 - -# Testing utility -tox diff --git a/requirements_py3.txt b/requirements_py3.txt deleted file mode 100644 index 1160e93..0000000 --- a/requirements_py3.txt +++ /dev/null @@ -1,3 +0,0 @@ -# Requests need this version, -# whereas hyperlink from Twisted install >=2.5 which defaults to 3.1. -idna>=2.5,<3 diff --git a/setup.py b/setup.py index 8ea6041..4e81e31 100644 --- a/setup.py +++ b/setup.py @@ -1,36 +1,5 @@ -from __future__ import absolute_import -from sys import version_info from setuptools import setup, find_packages -PYTHON_VERSION = version_info[0] - -python2_specific_requirements = [ - 'enum34>=1.1,<1.2', -] - -python3_specific_requirements = [ - 'bsddb3==6.2.7', -] - -python23_universal_requirements = [ - 'filetracker>=2.1.5,<3.0', - 'simplejson==3.14.0', - 'supervisor>=4.0,<4.3', - 'Twisted==20.3.0', - 'sortedcontainers==2.1.0', - 'six', - 'pytest>=4.6,<4.7', - 'pytest-runner==5.1', - 'pytest-timeout==1.3.3', - 'urllib3<2.0', # urllib3 will drop support for python2 in version 2.0. -] - -if PYTHON_VERSION == 2: - final_requirements = python23_universal_requirements + python2_specific_requirements -else: - final_requirements = python23_universal_requirements + python3_specific_requirements - - setup( name = "sioworkers", version = '1.4.1', @@ -44,16 +13,24 @@ packages = find_packages() + ['twisted.plugins'], namespace_packages = ['sio', 'sio.compilers', 'sio.executors'], - install_requires=final_requirements, - - setup_requires = [ - 'pytest-runner', + install_requires = [ + 'filetracker>=2.1.5,<3.0', + 'bsddb3==6.2.7', + 'simplejson==3.14.0', + 'supervisor>=4.0,<4.3', + 'Twisted==20.3.0', + 'sortedcontainers==2.1.0', + 'six', + 'urllib3>=1.26.14,<2.0', ], - tests_require = [ - 'pytest', - 'pytest-timeout' - ], + extras_require = { + 'dev' : [ + 'pytest>=7.2.1,<8.0', + 'pytest-timeout==2.1.0', + 'tox', + ] + }, entry_points = { 'sio.jobs': [ diff --git a/sio/compilers/test/test_compilers.py b/sio/compilers/test/test_compilers.py index 0a533c6..5c16ecd 100644 --- a/sio/compilers/test/test_compilers.py +++ b/sio/compilers/test/test_compilers.py @@ -6,6 +6,7 @@ import pytest from sio.assertion_utils import ok_, eq_, timed +from sio.testing_utils import str_to_bool from sio.compilers.job import run from sio.executors.common import run as run_from_executors @@ -40,8 +41,8 @@ # SOURCES = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sources') -ENABLE_SANDBOXED_COMPILERS = os.environ.get('TEST_SANDBOXES', False) -NO_JAVA_TESTS = os.environ.get('NO_JAVA_TESTS', False) +ENABLE_SANDBOXED_COMPILERS = str_to_bool(os.environ.get('TEST_SANDBOXES', False)) +NO_JAVA_TESTS = str_to_bool(os.environ.get('NO_JAVA_TESTS', False)) def in_(a, b, msg=None): diff --git a/sio/sioworkersd/twisted_t.py b/sio/sioworkersd/test_twisted.py similarity index 96% rename from sio/sioworkersd/twisted_t.py rename to sio/sioworkersd/test_twisted.py index 24aa1bb..fee8db9 100644 --- a/sio/sioworkersd/twisted_t.py +++ b/sio/sioworkersd/test_twisted.py @@ -122,7 +122,7 @@ def loseConnection(self): self.connected = False -class TestWorker(server.WorkerServer): +class _TestWorker(server.WorkerServer): def __init__(self, clientInfo=None): server.WorkerServer.__init__(self) self.wm = None @@ -172,7 +172,7 @@ def setUp2(self, _=None): # We must mock notifying functions to ensure proper deferred handling. self.wm.notifyOnNewWorker(self._notify_new_cb) self.wm.notifyOnLostWorker(self._notify_lost_cb) - self.worker_proto = TestWorker() + self.worker_proto = _TestWorker() return self.wm.newWorker('unique1', self.worker_proto) def setUp(self): @@ -206,7 +206,7 @@ def addWorker(id, ram, is_any_cpu=True): 'available_ram_mb': ram, 'can_run_cpu_exec': is_any_cpu, } - self.wm.newWorker(id, TestWorker(clientInfo)) + self.wm.newWorker(id, _TestWorker(clientInfo)) # Note that setUp() also adds a default worker which has 4 GiB of RAM. addWorker('w1', 128, is_any_cpu=True) @@ -255,24 +255,24 @@ def test_gone(self): return self.assertFailure(d, workermanager.WorkerGone) def test_duplicate(self): - w2 = TestWorker() + w2 = _TestWorker() d = self.wm.newWorker('unique2', w2) self.assertFalse(w2.transport.connected) return self.assertFailure(d, server.DuplicateWorker) def test_rejected(self): - w2 = TestWorker() + w2 = _TestWorker() w2.running = ['asdf'] w2.name = 'name2' d = self.wm.newWorker('unique2', w2) return self.assertFailure(d, server.WorkerRejected) def test_reject_incomplete_worker(self): - w3 = TestWorker({'name': 'no_concurrency'}) + w3 = _TestWorker({'name': 'no_concurrency'}) d = self.wm.newWorker('no_concurrency', w3) self.assertFailure(d, server.WorkerRejected) - w4 = TestWorker( + w4 = _TestWorker( { 'name': 'unique4', 'concurrency': 'not a number', @@ -283,7 +283,7 @@ def test_reject_incomplete_worker(self): d = self.wm.newWorker('unique4', w4) self.assertFailure(d, server.WorkerRejected) - w5 = TestWorker( + w5 = _TestWorker( { 'name': 'unique5', 'concurrency': 2, @@ -294,11 +294,11 @@ def test_reject_incomplete_worker(self): d = self.wm.newWorker('unique5', w5) self.assertFailure(d, server.WorkerRejected) - w6 = TestWorker({'name': 'no_ram', 'concurrency': 2, 'can_run_cpu_exec': True}) + w6 = _TestWorker({'name': 'no_ram', 'concurrency': 2, 'can_run_cpu_exec': True}) d = self.wm.newWorker('no_ram', w6) self.assertFailure(d, server.WorkerRejected) - w7 = TestWorker( + w7 = _TestWorker( { 'name': 'unique7', 'concurrency': 2, @@ -310,7 +310,7 @@ def test_reject_incomplete_worker(self): self.assertFailure(d, server.WorkerRejected) -class TestClient(rpc.WorkerRPC): +class _TestClient(rpc.WorkerRPC): def __init__(self, running, can_run_cpu_exec=True, name='test'): rpc.WorkerRPC.__init__(self, server=False) self.running = running @@ -368,7 +368,7 @@ def setUp(self): return d def _wrap_test(self, callback, callback_args, *client_args): - creator = protocol.ClientCreator(reactor, TestClient, *client_args) + creator = protocol.ClientCreator(reactor, _TestClient, *client_args) def cb(client): self.addCleanup(client.transport.loseConnection) diff --git a/sio/testing_utils.py b/sio/testing_utils.py new file mode 100644 index 0000000..8b64f5f --- /dev/null +++ b/sio/testing_utils.py @@ -0,0 +1,8 @@ +def str_to_bool(value): + if type(value) == bool: + return value + + if not value or type(value) != str: + return False + + return value.lower() in ("y", "yes", "true", "on", "1") diff --git a/sio/workers/executors.py b/sio/workers/executors.py index b0984d3..7119fac 100644 --- a/sio/workers/executors.py +++ b/sio/workers/executors.py @@ -663,6 +663,8 @@ def _execute(self, command, **kwargs): renv['result_code'] = 'TLE' elif renv['result_string'] == 'memory limit exceeded': renv['result_code'] = 'MLE' + elif renv['result_string'] == 'output limit exceeded': + renv['result_code'] = 'OLE' elif renv['result_string'].startswith('intercepted forbidden syscall'): renv['result_code'] = 'RV' elif renv['result_string'].startswith('process exited due to signal'): diff --git a/sio/workers/test/sources/1-sec-prog.c b/sio/workers/test/sources/1-sec-prog.c deleted file mode 100644 index 2e840a7..0000000 --- a/sio/workers/test/sources/1-sec-prog.c +++ /dev/null @@ -1,11 +0,0 @@ -#include - -int main() { - int i = 2; - int j = i; - for(;i<500000000;++i) - j += i; - - printf("%d\n", j-1711656321); - return 0; -} diff --git a/sio/workers/test/sources/die-scanf.c b/sio/workers/test/sources/die-scanf.c new file mode 100644 index 0000000..61b4906 --- /dev/null +++ b/sio/workers/test/sources/die-scanf.c @@ -0,0 +1,9 @@ +#include +#include +#include + +int main() { + int a = 0; + scanf("%d", &a); + kill(getpid(), a); +} diff --git a/sio/workers/test/sources/proc1secprog.java b/sio/workers/test/sources/proc1secprog.java deleted file mode 100644 index bafd145..0000000 --- a/sio/workers/test/sources/proc1secprog.java +++ /dev/null @@ -1,11 +0,0 @@ -public class proc1secprog { - static public void main(String[] args) { - int i = 2; - int j = i; - for(;i<500000000;++i) - j += i; - - System.out.println(j-1711656321); - - } -} diff --git a/sio/workers/test/sources/procspam.c b/sio/workers/test/sources/time_infinite.c similarity index 100% rename from sio/workers/test/sources/procspam.c rename to sio/workers/test/sources/time_infinite.c diff --git a/sio/workers/test/sources/procspam.java b/sio/workers/test/sources/time_infinite.java similarity index 85% rename from sio/workers/test/sources/procspam.java rename to sio/workers/test/sources/time_infinite.java index 8f8c275..701cf3d 100644 --- a/sio/workers/test/sources/procspam.java +++ b/sio/workers/test/sources/time_infinite.java @@ -1,4 +1,4 @@ -public class procspam { +public class time_infinite { static public void main(String[] args) { int n = 7348; int m = 43; diff --git a/sio/workers/test/sources/time_s2j_200ms.c b/sio/workers/test/sources/time_s2j_200ms.c new file mode 100644 index 0000000..7c3167d --- /dev/null +++ b/sio/workers/test/sources/time_s2j_200ms.c @@ -0,0 +1,10 @@ +#include + +int main() { + // since sio2jail counts instructions and assumes each instruction + // takes the same amount of time, we can just throw enough NOPs + // in here that sio2jail will report a roughly 200ms execution time + for (int i = 0; i < 400000; i++) { + asm(".rept 1000 ; nop ; .endr"); + } +} diff --git a/sio/workers/test/sources/time_verylong.c b/sio/workers/test/sources/time_verylong.c new file mode 100644 index 0000000..a25ea44 --- /dev/null +++ b/sio/workers/test/sources/time_verylong.c @@ -0,0 +1,18 @@ +#include +#include + +int main() { + int i = 2; + int j = i; + + // runtime (wall time) on various machines: + // * my workstation (Ryzen 9 5900X): ~1.5s + // * a dedicated judging machine (Xeon E5530): ~5.5s + for (; i < INT_MAX; i++) { + j += i; + } + + printf("%d\n", j - 1073741826); + + return 0; +} diff --git a/sio/workers/test/sources/time_verylong.java b/sio/workers/test/sources/time_verylong.java new file mode 100644 index 0000000..07ffad7 --- /dev/null +++ b/sio/workers/test/sources/time_verylong.java @@ -0,0 +1,15 @@ +import java.lang.System; +public class time_verylong { + static public void main(String[] args) { + int i = 2; + int j = i; + + // runtime (wall time) on various machines: + // * my workstation (Ryzen 9 5900X): ~0.5s + // * a dedicated judging machine (Xeon E5530): ~1.1s + for(; i < Integer.MAX_VALUE; i++) + j += i; + + System.out.println(j - 1073741826); + } +} diff --git a/sio/workers/test/test_executors.py b/sio/workers/test/test_executors.py index 10412e0..233a20f 100644 --- a/sio/workers/test/test_executors.py +++ b/sio/workers/test/test_executors.py @@ -14,6 +14,7 @@ in_, not_in_, ) +from sio.testing_utils import str_to_bool from filetracker.client.dummy import DummyClient from sio.compilers.job import run as run_compiler @@ -26,9 +27,10 @@ from sio.workers.executors import ( UnprotectedExecutor, DetailedUnprotectedExecutor, + SandboxExecutor, SupervisedExecutor, + Sio2JailExecutor, ExecError, - _SIOSupervisedExecutor, ) from sio.workers.file_runners import get_file_runner from sio.workers.util import tempcwd, TemporaryCwd @@ -56,8 +58,9 @@ # SOURCES = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sources') -ENABLE_SANDBOXES = os.environ.get('TEST_SANDBOXES', False) -NO_JAVA_TESTS = os.environ.get('NO_JAVA_TESTS', False) +ENABLE_SANDBOXES = str_to_bool(os.environ.get('TEST_SANDBOXES', False)) +NO_JAVA_TESTS = str_to_bool(os.environ.get('NO_JAVA_TESTS', False)) +NO_SIO2JAIL_TESTS = str_to_bool(os.environ.get('NO_SIO2JAIL_TESTS', False)) def upload_files(): @@ -89,7 +92,7 @@ def compile(source, output='/exe', use_sandboxes=ENABLE_SANDBOXES): def compile_and_execute(source, executor, **exec_args): - cenv = compile(source, use_sandboxes=isinstance(executor, _SIOSupervisedExecutor)) + cenv = compile(source, use_sandboxes=isinstance(executor, SandboxExecutor)) frunner = get_file_runner(executor, cenv) ft.download( @@ -106,7 +109,7 @@ def compile_and_execute(source, executor, **exec_args): def compile_and_run(source, executor_env, executor, use_sandboxes=False): - renv = compile(source, use_sandboxes=isinstance(executor, _SIOSupervisedExecutor)) + renv = compile(source, use_sandboxes=isinstance(executor, SandboxExecutor)) executor_env['exe_file'] = renv['out_file'] executor_env['exec_info'] = renv['exec_info'] return run_executor(executor_env, executor, use_sandboxes=use_sandboxes) @@ -129,6 +132,9 @@ def fail(*args, **kwargs): CHECKING_EXECUTORS = [DetailedUnprotectedExecutor] SANDBOXED_CHECKING_EXECUTORS = [SupervisedExecutor] +if not NO_SIO2JAIL_TESTS: + SANDBOXED_CHECKING_EXECUTORS.append(Sio2JailExecutor) + # Status helpers def res_ok(env): eq_('OK', env['result_code']) @@ -179,7 +185,7 @@ def _make_running_cases(): for executor in executors: yield '/add_print.c', executor(), res_ok - if not NO_JAVA_TESTS: + if not NO_JAVA_TESTS and not issubclass(executor, Sio2JailExecutor): yield '/add_print.java', executor(), res_ok @@ -234,7 +240,7 @@ def res_mle_or_fail(env): oh = 2.5 if 'stack' in test else 1.2 yield "/" + test, int(MEMORY_CHECKS_LIMIT * oh), executor(), res_ok - yield "/" + test, int(MEMORY_CHECKS_LIMIT * 0.9), executor(), res_not_ok + yield "/" + test, int(MEMORY_CHECKS_LIMIT * 0.8), executor(), res_not_ok if ENABLE_SANDBOXES: executor = SupervisedExecutor @@ -258,20 +264,30 @@ def test_common_memory_limiting(source, mem_limit, executor, callback): def _make_common_time_limiting_cases(): for executor in CHECKING_EXECUTORS: - yield '/procspam.c', 500, executor(), res_tle + yield '/time_infinite.c', 500, executor(), res_tle + yield '/time_verylong.c', 100, executor(), res_tle + yield '/time_verylong.c', 10000, executor(), res_ok if not NO_JAVA_TESTS: - yield '/procspam.java', 500, executor(), res_tle + yield '/time_infinite.java', 500, executor(), res_tle + yield '/time_verylong.java', 100, executor(), res_tle + yield '/time_verylong.java', 5000, executor(), res_ok if ENABLE_SANDBOXES: for executor in SANDBOXED_CHECKING_EXECUTORS: - yield "/procspam.c", 200, executor(), res_tle - yield "/1-sec-prog.c", 10, executor(), res_tle + yield "/time_infinite.c", 200, executor(), res_tle - yield "/1-sec-prog.c", 1000, SupervisedExecutor(), res_ok - if not NO_JAVA_TESTS: - yield "/proc1secprog.java", 100, SupervisedExecutor(), res_tle - yield "/proc1secprog.java", 1000, SupervisedExecutor(), res_ok + if issubclass(executor, SupervisedExecutor): + yield "/time_verylong.c", 100, executor(), res_tle + yield "/time_verylong.c", 10000, executor(), res_ok + + if not NO_JAVA_TESTS: + yield '/time_infinite.java', 500, executor(), res_tle + yield '/time_verylong.java', 100, executor(), res_tle + yield '/time_verylong.java', 5000, executor(), res_ok + if issubclass(executor, Sio2JailExecutor): + yield "/time_s2j_200ms.c", 100, executor(), res_tle + yield "/time_s2j_200ms.c", 1000, executor(), res_ok @pytest.mark.parametrize( "source,time_limit,executor,callback", @@ -613,7 +629,7 @@ def ret_42(env): for executor in executors: yield raises(ExecError)(_test_transparent_exec), [ - '/return-scanf.c', + '/die-scanf.c', executor(), None, {}, @@ -636,7 +652,7 @@ def ret_42(env): checking_executors = checking_executors + SANDBOXED_CHECKING_EXECUTORS for executor in checking_executors: - yield _test_exec, ['/return-scanf.c', executor(), res_re(42), {}] + yield _test_exec, ['/die-scanf.c', executor(), res_re(42), {}] if ENABLE_SANDBOXES: yield _test_exec, [ @@ -679,8 +695,7 @@ def inner(env): checking_executors = checking_executors + SANDBOXED_CHECKING_EXECUTORS for executor in checking_executors: - yield ['/add_print.c', executor(), ole, {'output_limit': 10}] - yield ['/iospam-hard.c', executor(), ole, {}] # Default + yield ['/iospam-hard.c', executor(), ole, {'capture_output': 'True'}] # Default @pytest.mark.parametrize( @@ -712,12 +727,13 @@ def test_signals(args): def _make_rule_violation_cases(): - checking_executors = [] if ENABLE_SANDBOXES: - checking_executors = checking_executors + SANDBOXED_CHECKING_EXECUTORS + for executor in SANDBOXED_CHECKING_EXECUTORS: + if issubclass(executor, Sio2JailExecutor): + # sio2jail appears to be allowing open() syscalls + continue - for executor in checking_executors: - yield ['/open.c', executor(), res_rv('opening files'), {}] + yield ['/open.c', executor(), res_rv('opening files'), {}] @pytest.mark.parametrize( @@ -770,7 +786,7 @@ def syscall_limit(env): for executor in checking_executors: yield [ - '/procspam.c', + '/time_infinite.c', executor(), real_tle, {'real_time_limit': 1000, 'time_limit': 10000}, diff --git a/supervisor.sh b/supervisor.sh index f7ee05b..4df6dae 100755 --- a/supervisor.sh +++ b/supervisor.sh @@ -11,6 +11,7 @@ Options: Commands: start starts supervisor + startfg starts supervisor in foreground stop stops supervisor restart restart supervisor status shows status of daemons that supervisor run @@ -25,7 +26,7 @@ while [ -n "$1" ]; do help exit 0 ;; - "start"|"stop"|"restart"|"status"|"shell") + "start"|"startfg"|"stop"|"restart"|"status"|"shell") command="$1" ;; *) @@ -54,7 +55,10 @@ if ! [ -e supervisord.conf ] || \ fi # Activate venv: -source ../../venv/bin/activate +if [ -d "../../venv" ] +then + source ../../venv/bin/activate +fi # Set all config variables. source supervisord-conf-vars.conf @@ -72,20 +76,23 @@ mkdir -pv "${WORKER_HOME}"/{logs,pidfiles} # And run supervisor.* case "$command" in "start") - supervisord + exec supervisord + ;; + "startfg") + exec supervisord -n ;; "stop") - supervisorctl shutdown + exec supervisorctl shutdown ;; "restart") supervisorctl shutdown - supervisord + exec supervisord ;; "status") - supervisorctl status + exec supervisorctl status ;; "shell") echo "Caution: In order to reload config, run \`$0 restart\`" - supervisorctl + exec supervisorctl ;; esac diff --git a/tox.ini b/tox.ini index 02e69d8..d3d9895 100644 --- a/tox.ini +++ b/tox.ini @@ -1,20 +1,14 @@ [tox] -envlist = py27,py37 +envlist = py37 [testenv] setenv = TEST_SANDBOXES = 1 NO_JAVA_TESTS = 1 + NO_SIO2JAIL_TESTS = 0 deps = pytest commands = pytest -v {posargs} -[testenv:py27] -usedevelop = true -deps = - -r requirements.txt - bsddb3 - [testenv:py37] usedevelop = true -deps = - -r requirements_py3.txt +extras = dev