From ffc15376e4128121ea430bee87c417c4790b68ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 14:01:12 +0200 Subject: [PATCH 01/26] Upgrading the use CEGA user ids and no more fuse --- docker/.gitignore | 5 + docker/Makefile | 33 ++ docker/README.md | 43 +++ docker/bootstrap/boot.sh | 69 ++++ docker/bootstrap/cega.sh | 248 +++++++++++++ docker/bootstrap/defs.sh | 57 +++ docker/bootstrap/lega.sh | 318 +++++++++++++++++ docker/bootstrap/settings.rc | 34 ++ docker/bootstrap/troubleshooting.md | 17 + docker/images/Makefile | 32 ++ docker/images/README.md | 39 +++ docker/images/base/Dockerfile | 49 +++ docker/images/cega/eureka.py | 72 ++++ docker/images/cega/server.py | 140 ++++++++ docker/images/cega/users.html | 33 ++ docker/images/inbox/Dockerfile | 107 ++++++ docker/images/inbox/banner | 1 + docker/images/inbox/entrypoint.sh | 48 +++ docker/images/inbox/notify_cega.patch | 153 ++++++++ docker/images/inbox/pam.ega | 5 + docker/images/inbox/sshd_config | 29 ++ docker/images/mq/defs.json | 20 ++ docker/images/mq/entrypoint.sh | 94 +++++ docker/images/mq/rabbitmq.config | 15 + docker/test/.gitignore | 5 + docker/test/Makefile | 63 ++++ docker/test/README.md | 12 + .../docker => docker}/tests/.gitignore | 0 .../docker => docker}/tests/README.md | 0 {deployments/docker => docker}/tests/pom.xml | 0 .../se/nbis/lega/cucumber/CommonTests.java | 0 .../java/se/nbis/lega/cucumber/Context.java | 0 .../nbis/lega/cucumber/RobustnessTests.java | 0 .../java/se/nbis/lega/cucumber/Utils.java | 0 .../lega/cucumber/hooks/BeforeAfterHooks.java | 0 .../nbis/lega/cucumber/pojo/FileStatus.java | 0 .../lega/cucumber/publisher/Checksum.java | 0 .../nbis/lega/cucumber/publisher/Message.java | 0 .../lega/cucumber/steps/Authentication.java | 0 .../nbis/lega/cucumber/steps/Ingestion.java | 0 .../nbis/lega/cucumber/steps/Robustness.java | 0 .../nbis/lega/cucumber/steps/Uploading.java | 0 .../src/test/resources/config.properties | 0 .../cucumber/features/authentication.feature | 0 .../cucumber/features/checksums.feature | 0 .../cucumber/features/ingestion.feature | 0 .../cucumber/features/robustness.feature | 0 .../cucumber/features/uploading.feature | 0 .../test/resources/simplelogger.properties | 0 lega/conf/defaults.ini | 2 +- lega/conf/loggers/console.yaml | 7 +- lega/conf/loggers/silent.yaml | 27 ++ lega/inbox.py | 330 ++++++------------ lega/utils/db.py | 86 ++--- requirements.txt | 9 +- 55 files changed, 1902 insertions(+), 300 deletions(-) create mode 100644 docker/.gitignore create mode 100644 docker/Makefile create mode 100644 docker/README.md create mode 100755 docker/bootstrap/boot.sh create mode 100644 docker/bootstrap/cega.sh create mode 100644 docker/bootstrap/defs.sh create mode 100755 docker/bootstrap/lega.sh create mode 100644 docker/bootstrap/settings.rc create mode 100644 docker/bootstrap/troubleshooting.md create mode 100644 docker/images/Makefile create mode 100644 docker/images/README.md create mode 100644 docker/images/base/Dockerfile create mode 100644 docker/images/cega/eureka.py create mode 100644 docker/images/cega/server.py create mode 100644 docker/images/cega/users.html create mode 100644 docker/images/inbox/Dockerfile create mode 100644 docker/images/inbox/banner create mode 100755 docker/images/inbox/entrypoint.sh create mode 100644 docker/images/inbox/notify_cega.patch create mode 100644 docker/images/inbox/pam.ega create mode 100644 docker/images/inbox/sshd_config create mode 100644 docker/images/mq/defs.json create mode 100644 docker/images/mq/entrypoint.sh create mode 100644 docker/images/mq/rabbitmq.config create mode 100644 docker/test/.gitignore create mode 100644 docker/test/Makefile create mode 100644 docker/test/README.md rename {deployments/docker => docker}/tests/.gitignore (100%) rename {deployments/docker => docker}/tests/README.md (100%) rename {deployments/docker => docker}/tests/pom.xml (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/CommonTests.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/Context.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/RobustnessTests.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/Utils.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/hooks/BeforeAfterHooks.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/pojo/FileStatus.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/publisher/Checksum.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/publisher/Message.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/steps/Authentication.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/steps/Ingestion.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/steps/Robustness.java (100%) rename {deployments/docker => docker}/tests/src/test/java/se/nbis/lega/cucumber/steps/Uploading.java (100%) rename {deployments/docker => docker}/tests/src/test/resources/config.properties (100%) rename {deployments/docker => docker}/tests/src/test/resources/cucumber/features/authentication.feature (100%) rename {deployments/docker => docker}/tests/src/test/resources/cucumber/features/checksums.feature (100%) rename {deployments/docker => docker}/tests/src/test/resources/cucumber/features/ingestion.feature (100%) rename {deployments/docker => docker}/tests/src/test/resources/cucumber/features/robustness.feature (100%) rename {deployments/docker => docker}/tests/src/test/resources/cucumber/features/uploading.feature (100%) rename {deployments/docker => docker}/tests/src/test/resources/simplelogger.properties (100%) create mode 100644 lega/conf/loggers/silent.yaml diff --git a/docker/.gitignore b/docker/.gitignore new file mode 100644 index 00000000..c1e0e313 --- /dev/null +++ b/docker/.gitignore @@ -0,0 +1,5 @@ +.env +.env.201* +private* +.err +images/inbox/sftp-server.c* diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 00000000..d9189dda --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,33 @@ +ARGS= + +.PHONY: help bootstrap private network up down clean ps + +help: + @echo "Usage: make \n" + @echo "where is: 'bootstrap', 'up' 'ps', 'down', 'network' or 'clean'\n" + +private/cega.yml private/lega.yml private bootstrap: + @docker run --rm -it \ + -v /dev/urandom:/dev/random \ + -v ${PWD}:/ega \ + -v ${PWD}/../extras/db.sql:/tmp/db.sql \ + -v ${PWD}/../extras/generate_pgp_key.py:/tmp/generate_pgp_key.py \ + -v ${PWD}/../extras/rabbitmq_hash.py:/tmp/rabbitmq_hash.py \ + --entrypoint /ega/bootstrap/boot.sh \ + nbisweden/ega-base ${ARGS} + +network: + @docker network inspect cega &>/dev/null || docker network create cega &>/dev/null + +up:network private/cega.yml private/lega.yml + @docker-compose -f private/cega.yml -f private/lega.yml up -d + +ps: + @docker-compose ps + +down: #.env + @[[ -f private/cega.yml ]] && [[ -f private/lega.yml ]] && docker-compose down -v || echo "No recipe to bring containers down\nHave you bootstrapped? (ie make bootstrap)" + +clean: + rm -rf .env private + -docker network rm cega &>/dev/null diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..61685e81 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,43 @@ +# Deploy LocalEGA using Docker + +## Bootstrap + +First [create the EGA docker images](images) beforehand, with `make -C images`. + +You can then [generate the private data](bootstrap), with either: + + make bootstrap + +The command will create a `.env` file and a `private` folder holding +the necessary data (ie the PGP key, the Main LEGA password, the SSL +certificates for internal communication, passwords, default users, +etc...) + +It will also create a docker network `cega` used by the (fake) CentralEGA instance, +separate from to network used by the LocalEGA instance. + +These networks are reflected in their corresponding YML files +* `private/cega.yml` +* `private/lega.yml` + +The passwords are in `private//.trace` and the errors (if +any) are in `private/.err`. + +## Running + + docker-compose up -d + +Use `docker-compose up -d --scale ingest=3` instead, if you want to +start 3 ingestion workers. + +Note that, in this architecture, we use separate volumes, e.g. for +the inbox area, for the vault (here backed by S3). They +will be created on-the-fly by docker-compose. + +## Stopping + + docker-compose down -v + +## Status + + docker-compose ps diff --git a/docker/bootstrap/boot.sh b/docker/bootstrap/boot.sh new file mode 100755 index 00000000..f13ba49e --- /dev/null +++ b/docker/bootstrap/boot.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +set -e + +[ ${BASH_VERSINFO[0]} -lt 4 ] && echo 'Bash 4 (or higher) is required' 1>&2 && exit 1 + +HERE=$(dirname ${BASH_SOURCE[0]}) +PRIVATE=${HERE}/../private +DOT_ENV=${HERE}/../.env +EXTRAS=${HERE}/../../../extras + +# Defaults +VERBOSE=no +FORCE=yes +OPENSSL=openssl +INBOX=fuse + +function usage { + echo "Usage: $0 [options]" + echo -e "\nOptions are:" + echo -e "\t--openssl \tPath to the Openssl executable [Default: ${OPENSSL}]" + echo -e "\t--inbox \tSelect inbox \"fuse\" or \"mina\" [Default: ${DEFAULT_INBOX}]" + echo "" + echo -e "\t--verbose, -v \tShow verbose output" + echo -e "\t--polite, -p \tDo not force the re-creation of the subfolders. Ask instead" + echo -e "\t--help, -h \tOutputs this message and exits" + echo -e "\t-- ... \tAny other options appearing after the -- will be ignored" + echo "" +} + + +# While there are arguments or '--' is reached +while [[ $# -gt 0 ]]; do + case "$1" in + --help|-h) usage; exit 0;; + --verbose|-v) VERBOSE=yes;; + --polite|-p) FORCE=no;; + --inbox) INBOX=$2; shift;; + --openssl) OPENSSL=$2; shift;; + --) shift; break;; + *) echo "$0: error - unrecognized option $1" 1>&2; usage; exit 1;; esac + shift +done + +[[ $VERBOSE == 'no' ]] && echo -en "Bootstrapping " + +source ${HERE}/defs.sh + +[[ -x $(readlink ${OPENSSL}) ]] && echo "${OPENSSL} is not executable. Adjust the setting with --openssl" && exit 3 + +rm_politely ${PRIVATE} +mkdir -p ${PRIVATE}/{cega,lega} +exec 2>${PRIVATE}/.err +backup ${DOT_ENV} +cat > ${DOT_ENV} < ${PRIVATE}/cega/users/john.yml < ${PRIVATE}/cega/users/jane.yml < ${PRIVATE}/cega/users/taylor.yml < ${PRIVATE}/cega/users/test.yml < ${PRIVATE}/cega/users/hello.yml <> ${PRIVATE}/cega/.trace < ${PRIVATE}/cega.yml < ${PRIVATE}/cega/env < ${PRIVATE}/cega/mq/defs.json < ${PRIVATE}/cega/mq/rabbitmq.config < $1 \xF0\x9F\x91\x8D" + else + echo -e " \xF0\x9F\x91\x8D" + fi +} + + +function backup { + local target=$1 + if [[ -e $target ]] && [[ $FORCE != 'yes' ]]; then + echomsg "Backing up $target" + mv -f $target $target.$(date +"%Y-%m-%d_%H:%M:%S") + fi +} + +function rm_politely { + local FOLDER=$1 + + if [[ -d $FOLDER ]]; then + if [[ $FORCE == 'yes' ]]; then + rm -rf $FOLDER + else + # Asking + echo "[Warning] The folder \"$FOLDER\" already exists. " + while : ; do # while = In a subshell + echo -n "[Warning] " + echo -n -e "Proceed to re-create it? [y/N] " + read -t 10 yn + case $yn in + y) rm -rf $FOLDER; break;; + N) echo "Ok. Choose another private directory. Exiting"; exit 1;; + *) echo "Eh?";; + esac + done + fi + fi +} + +function generate_password { + local size=${1:-16} # defaults to 16 characters + python3.6 -c "import secrets,string;print(''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(${size})))" +} + diff --git a/docker/bootstrap/lega.sh b/docker/bootstrap/lega.sh new file mode 100755 index 00000000..98649dc0 --- /dev/null +++ b/docker/bootstrap/lega.sh @@ -0,0 +1,318 @@ +#!/usr/bin/env bash + +mkdir -p $PRIVATE/lega/{pgp,certs,logs} +chmod 700 $PRIVATE/lega/{pgp,certs,logs} + +echomsg "\t* the PGP key" + +# Running in a container +GEN_KEY="python3.6 /tmp/generate_pgp_key.py" + +# Python 3.6 +${GEN_KEY} "${PGP_NAME}" "${PGP_EMAIL}" "${PGP_COMMENT}" --passphrase "${PGP_PASSPHRASE}" --pub ${PRIVATE}/lega/pgp/ega.pub --priv ${PRIVATE}/lega/pgp/ega.sec --armor +chmod 644 ${PRIVATE}/lega/pgp/ega.pub + +${GEN_KEY} "${PGP_NAME}" "${PGP_EMAIL}" "${PGP_COMMENT}" --passphrase "${PGP_PASSPHRASE}" --pub ${PRIVATE}/lega/pgp/ega2.pub --priv ${PRIVATE}/lega/pgp/ega2.sec --armor +chmod 644 ${PRIVATE}/lega/pgp/ega2.pub + +######################################################################### + +echomsg "\t* the SSL certificates" +${OPENSSL} req -x509 -newkey rsa:2048 -keyout ${PRIVATE}/lega/certs/ssl.key -nodes -out ${PRIVATE}/lega/certs/ssl.cert -sha256 -days 1000 -subj ${SSL_SUBJ} + +######################################################################### + +echomsg "\t* keys.ini" +${OPENSSL} enc -aes-256-cbc -salt -out ${PRIVATE}/lega/keys.ini.enc -md md5 -k ${KEYS_PASSWORD} < ${PRIVATE}/lega/conf.ini <> ${PRIVATE}/lega/db.sql + + +######################################################################### +# Populate env-settings for docker compose +######################################################################### + +# For the moment, still using guest:guest +echomsg "\t* Local broker to Central EGA broker credentials" +cat > ${PRIVATE}/lega/mq.env <> ${PRIVATE}/lega.yml <> ${PRIVATE}/lega/.trace < or latest | SFTP server on top of `nbisweden/ega-base:latest` | +| nbisweden/ega-base | or latest | Base Image for all services including python 3.6.1 | + + +We also use 2 stubbing services in order to fake the necessary Central EGA components + +| Repository | Tag | Role | +|------------|:--------:|------| +| cega-users | or latest | Sets up a postgres database with appropriate tables | +| cega-mq | or latest | Sets up a RabbitMQ message broker with appropriate accounts, exchanges, queues and bindings | +| cega-eureka | or latest | Sets up a fake Eureka service discovery server in order to make the LocalEGA Keyserver register | + +## Logging + +We also make use of ELK stack for logging thus the `elasticsearch-oss` `logstash-oss` and `kibana-oss` will be pulled from Docker hub. diff --git a/docker/images/base/Dockerfile b/docker/images/base/Dockerfile new file mode 100644 index 00000000..8f25499f --- /dev/null +++ b/docker/images/base/Dockerfile @@ -0,0 +1,49 @@ +FROM centos:7.4.1708 +LABEL maintainer "NBIS System Developers" + +RUN yum -y install https://centos7.iuscommunity.org/ius-release.rpm && \ + yum -y install epel-release && \ + yum -y update && \ + yum -y install git gcc make bzip2 curl \ + zlib-devel bzip2-devel unzip \ + wget dpkg \ + openssl \ + python36u python36u-pip + +RUN [[ -e /lib64/libpython3.6m.so ]] || ln -s /lib64/libpython3.6m.so.1.0 /lib64/libpython3.6m.so + +ENV GOSU_VERSION 1.10 +ENV GPG_KEYS B42F6819007F00F88E364FD4036A9C25BF357DD4 +RUN set -ex && \ + dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" && \ + wget -O /usr/bin/gosu "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${dpkgArch}" && \ + wget -O /tmp/gosu.asc "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${dpkgArch}.asc" + +# verify the signature +RUN export GNUPGHOME="$(mktemp -d)" && \ + (gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$GPG_KEYS" \ + || gpg --keyserver pgp.mit.edu --recv-keys "$GPG_KEYS" \ + || gpg --keyserver keyserver.pgp.com --recv-keys "$GPG_KEYS") && \ + gpg --keyserver hkps://hkps.pool.sks-keyservers.net --recv-keys && \ + gpg --batch --verify /tmp/gosu.asc /usr/bin/gosu && \ + rm -r "$GNUPGHOME" /tmp/gosu.asc && \ + chmod +x /usr/bin/gosu + +# verify that the binary works +RUN gosu nobody true && \ + yum -y remove dpkg + +RUN yum clean all && rm -rf /var/cache/yum + +RUN groupadd -r lega +# useradd -M -r -g lega lega + +ARG PIP_EGA_PACKAGES= +RUN pip3.6 install --upgrade pip && \ + pip3.6 install PyYaml ${PIP_EGA_PACKAGES} + +RUN pip3.6 install git+https://github.com/NBISweden/LocalEGA.git@${checkout} + +RUN pip3.6 install git+https://github.com/NBISweden/LocalEGA-cryptor.git + + diff --git a/docker/images/cega/eureka.py b/docker/images/cega/eureka.py new file mode 100644 index 00000000..65eca050 --- /dev/null +++ b/docker/images/cega/eureka.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +'''\ +A fake Eureka server. + +Spinning the whole Spring Framework Netflix Eureka would take too long, +thus we are going to fake the responses. +''' + +import sys +import asyncio +from aiohttp import web + +import logging as LOG + + +routes = web.RouteTableDef() + +# Followjng the responses from https://github.com/Netflix/eureka/wiki/Eureka-REST-operations + + +@routes.post('/eureka/apps/{app_name}') +async def register(request): + """No matter the app it should register with success response 204.""" + return web.HTTPNoContent() + +@routes.delete('/eureka/apps/{app_name}/{instance_id}') +async def deregister(request): + """No matter the app it should deregister with success response 200.""" + return web.HTTPOk() + +@routes.put('/eureka/apps/{app_name}/{instance_id}') +async def heartbeat(request): + """No matter the app it should renew lease with success response 200.""" + return web.HTTPOk() + +async def init(app): + '''Initialization running before the loop.run_forever''' + LOG.info('Initializing') + +async def shutdown(app): + '''Function run after a KeyboardInterrupt. After that: cleanup''' + LOG.info('Shutting down the database engine') + +async def cleanup(app): + '''Function run after a KeyboardInterrupt. Right after, the loop is closed''' + LOG.info('Cancelling all pending tasks') + + +def main(args=None): + """Where the magic happens.""" + + host = sys.argv[1] if len(sys.argv) > 1 else "0.0.0.0" + port = 8761 + sslcontext = None + + loop = asyncio.get_event_loop() + eureka = web.Application(loop=loop) + eureka.router.add_routes(routes) + + # Registering some initialization and cleanup routines + LOG.info('Setting up callbacks') + eureka.on_startup.append(init) + eureka.on_shutdown.append(shutdown) + eureka.on_cleanup.append(cleanup) + + LOG.info(f"Start fake eureka on {host}:{port}") + web.run_app(eureka, host=host, port=port, shutdown_timeout=0, ssl_context=sslcontext) + + +if __name__ == '__main__': + main() diff --git a/docker/images/cega/server.py b/docker/images/cega/server.py new file mode 100644 index 00000000..901102f9 --- /dev/null +++ b/docker/images/cega/server.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3.6 +# -*- coding: utf-8 -*- + +''' +Test server to act as CentralEGA endpoint for users + +:author: Frédéric Haziza +:copyright: (c) 2017, NBIS System Developers. +''' + +import sys +import os +import asyncio +import ssl +import yaml +from pathlib import Path +from functools import wraps +from base64 import b64decode + +import logging as LOG + +from aiohttp import web +import jinja2 +import aiohttp_jinja2 + +instances = {} +for instance in os.environ.get('LEGA_INSTANCES','').strip().split(','): + instances[instance] = (Path(f'/cega/users/{instance}'), os.environ[f'CEGA_REST_{instance}_PASSWORD']) + +def protected(func): + @wraps(func) + def wrapped(request): + auth_header = request.headers.get('AUTHORIZATION') + if not auth_header: + raise web.HTTPUnauthorized(text=f'Protected access\n') + _, token = auth_header.split(None, 1) # Skipping the Basic keyword + instance,passwd = b64decode(token).decode().split(':', 1) + info = instances.get(instance) + if info is not None and info[1] == passwd: + request.match_info['lega'] = instance + request.match_info['users_dir'] = info[0] + return func(request) + raise web.HTTPUnauthorized(text=f'Protected access\n') + return wrapped + + +@aiohttp_jinja2.template('users.html') +async def index(request): + users={} + for instance, (users_dir, _) in instances.items(): + users[instance]= {} + files = (f for f in users_dir.iterdir() if f.is_file()) + for f in files: + with open(f, 'r') as stream: + users[instance][f.stem] = yaml.load(stream) + return { "cega_users": users } + +@protected +async def user(request): + name = request.match_info['name'] + lega_instance = request.match_info['lega'] + users_dir = request.match_info['users_dir'] + + try: + with open(f'{users_dir}/{name}.yml', 'r') as stream: + d = yaml.load(stream) + return web.json_response(d) + # json_data = { + # 'username': d.get("username", None), + # 'password_hash': d.get("password_hash", None), + # 'pubkey': d.get("pubkey", None), + # 'uid': int(d.get("uid", None)), + # 'gecos': d.get("gecos", "EGA User"), + # } + #return web.json_response(json_data) + except OSError: + raise web.HTTPBadRequest(text=f'No info for user {name} in LocalEGA {lega_instance}... yet\n') + +@protected +async def userid(request): + uid = request.match_info['id'] + lega_instance = request.match_info['lega'] + users_dir = request.match_info['users_dir'] + + try: + with open(f'{users_dir}_ids/{uid}.yml', 'r') as stream: + d = yaml.load(stream) + return web.json_response(d) + # json_data = { + # 'username': d.get("username", None), + # 'password_hash': d.get("password_hash", None), + # 'pubkey': d.get("pubkey", None), + # 'uid': int(d.get("uid", None)), + # 'gecos': d.get("gecos", "EGA User"), + # } + #return web.json_response(json_data) + except OSError: + raise web.HTTPBadRequest(text=f'No info for user id {userid} in LocalEGA {lega_instance}... yet\n') + +# Unprotected access +async def pgp_pbk(request): + name = request.match_info['id'] + try: + with open(f'/ega/users/pgp/{name}.pub', 'r') as stream: # 'rb' + return web.Response(text=stream.read()) # .hex() + except OSError: + raise web.HTTPBadRequest(text=f'No info about {name} in CentralEGA... yet\n') + +def main(): + + host = sys.argv[1] if len(sys.argv) > 1 else "0.0.0.0" + + # ssl_certfile = Path(CONF.get('keyserver', 'ssl_certfile')).expanduser() + # ssl_keyfile = Path(CONF.get('keyserver', 'ssl_keyfile')).expanduser() + # LOG.debug(f'Certfile: {ssl_certfile}') + # LOG.debug(f'Keyfile: {ssl_keyfile}') + + # sslcontext = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) + # sslcontext.check_hostname = False + # sslcontext.load_cert_chain(ssl_certfile, ssl_keyfile) + sslcontext = None + + loop = asyncio.get_event_loop() + server = web.Application(loop=loop) + + template_loader = jinja2.FileSystemLoader("/cega") + aiohttp_jinja2.setup(server, loader=template_loader) + + # Registering the routes + server.router.add_get( '/' , index , name='root') + server.router.add_get( '/user/{name}', user , name='user') + server.router.add_get( '/id/{id}' , userid , name='id') + server.router.add_get( '/pgp/{id}' , pgp_pbk, name='pgp') + + # And ...... cue music! + web.run_app(server, host=host, port=80, shutdown_timeout=0, ssl_context=sslcontext) + +if __name__ == '__main__': + main() + diff --git a/docker/images/cega/users.html b/docker/images/cega/users.html new file mode 100644 index 00000000..fe1f279d --- /dev/null +++ b/docker/images/cega/users.html @@ -0,0 +1,33 @@ + + + + + Central EGA + + + +

Central EGA Users

+ + {% for instance, lega_users in cega_users.items() %} +

{{ instance }}

+
+ {% for username, data in lega_users.items() %} +
{{ username }} (User ID: {{ data['uid'] }})
+
password_hash{{ data['password_hash'] }}
+
pubkey{{ data['pubkey'] }}
+
gecos{{ data['gecos'] }}
+
groups{{ data['groups'] }}
+ {% endfor %} +
+ {% endfor %} + + + diff --git a/docker/images/inbox/Dockerfile b/docker/images/inbox/Dockerfile new file mode 100644 index 00000000..275fb048 --- /dev/null +++ b/docker/images/inbox/Dockerfile @@ -0,0 +1,107 @@ +FROM nbisweden/ega-base +LABEL maintainer "NBIS System Developers" + +EXPOSE 9000 +VOLUME /ega/inbox + +RUN yum -y install autoconf patch openssl-devel pam-devel libcurl-devel nss-tools readline-devel +# nc nmap tcpdump lsof strace bash-completion bash-completion-extras + +################################################# +## +## Install SQLite 3.24 +## +################################################# +ARG SQLITE_VERSION=3240000 +RUN cd && wget https://sqlite.org/2018/sqlite-autoconf-${SQLITE_VERSION}.tar.gz && \ + tar xzf sqlite-autoconf-${SQLITE_VERSION}.tar.gz && \ + cd sqlite-autoconf-${SQLITE_VERSION} && \ + ./configure && \ + make && make install && \ + echo '/usr/local/lib' >> /etc/ld.so.conf.d/ega.conf && \ + cd && rm -rf sqlite-autoconf-${SQLITE_VERSION}{,.tar.gz} + + +################################################# +## +## Install EGA NSS+PAM +## +################################################# +ARG AUTH_BRANCH=master +RUN mkdir -p /usr/local/lib/ega && \ + echo '/usr/local/lib/ega' >> /etc/ld.so.conf.d/ega.conf && \ + echo 'Welcome to Local EGA' > /ega/banner && \ + cp /etc/nsswitch.conf /etc/nsswitch.conf.bak && \ + sed -i -e 's/^passwd:\(.*\)files/passwd:\1files ega/' /etc/nsswitch.conf && \ + git clone https://github.com/NBISweden/LocalEGA-auth /root/ega-auth && \ + cd /root/ega-auth/src && \ + git checkout ${AUTH_BRANCH} && \ + make install clean + +RUN ldconfig -v + +################################################# +## +## Install OpenSSH from sources +## +################################################# +ARG OPENSSH_DIR=/opt/openssh +ARG SSHD_UID=74 +ARG SSHD_GID=74 +ARG OPENSSH_TAG=V_7_7_P1 + +RUN getent group ssh_keys >/dev/null || groupadd -r ssh_keys || : +RUN getent group sshd || groupadd -g ${SSHD_GID} -r sshd + +# RUN mkdir -p /var/empty && \ +# useradd -c "Privilege-separated SSH" \ +# -u ${SSHD_UID} \ +# -g sshd \ +# -s /sbin/nologin \ +# -r \ +# -m -d /var/empty/sshd sshd +#### Weird.... useradd does segfault. + +RUN sed -i -e '/sshd/ d' /etc/passwd && \ + echo "sshd:x:${SSHD_UID}:${SSHD_GID}:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin" >> /etc/passwd && \ + mkdir -p /var/empty/sshd && chmod 700 /var/empty/sshd + +RUN git clone --branch ${OPENSSH_TAG} git://anongit.mindrot.org/openssh.git /root/openssh && \ + cd /root/openssh && \ + autoreconf && \ + ./configure \ + --prefix=${OPENSSH_DIR} \ + --with-privsep-path=/var/empty/sshd \ + --with-privsep-user=sshd \ + --with-ssl-engine \ + --disable-strip \ + --without-smartcard \ + --with-pam \ + --without-selinux \ + --without-kerberos5 \ + --without-libedit + +COPY notify_cega.patch /root/openssh/notify_cega.patch + +RUN cd /root/openssh && \ + patch -p0 < notify_cega.patch && \ + make && make install + +RUN rm -f /etc/ssh/ssh_host_{rsa,dsa,ecdsa,ed25519}_key && \ + ${OPENSSH_DIR}/bin/ssh-keygen -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key && \ + ${OPENSSH_DIR}/bin/ssh-keygen -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key && \ + ${OPENSSH_DIR}/bin/ssh-keygen -t ecdsa -N '' -f /etc/ssh/ssh_host_ecdsa_key && \ + ${OPENSSH_DIR}/bin/ssh-keygen -t ed25519 -N '' -f /etc/ssh/ssh_host_ed25519_key + +RUN rm -rf /root/openssh && \ + cp ${OPENSSH_DIR}/sbin/sshd ${OPENSSH_DIR}/sbin/ega + +################################################# + +COPY banner /ega/banner +COPY pam.ega /etc/pam.d/ega +COPY sshd_config /etc/ega/sshd_config + +COPY entrypoint.sh /usr/local/bin/entrypoint.sh +RUN chmod 755 /usr/local/bin/entrypoint.sh +ENTRYPOINT ["entrypoint.sh"] diff --git a/docker/images/inbox/banner b/docker/images/inbox/banner new file mode 100644 index 00000000..be26bc09 --- /dev/null +++ b/docker/images/inbox/banner @@ -0,0 +1 @@ +Welcome to Local EGA Demo instance diff --git a/docker/images/inbox/entrypoint.sh b/docker/images/inbox/entrypoint.sh new file mode 100755 index 00000000..34922182 --- /dev/null +++ b/docker/images/inbox/entrypoint.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +set -e + +# Some env must be defined +[[ -z "${CEGA_ENDPOINT}" ]] && echo 'Environment CEGA_ENDPOINT is empty' 1>&2 && exit 1 +[[ -z "${CEGA_ENDPOINT_CREDS}" ]] && echo 'Environment CEGA_ENDPOINT_CREDS is empty' 1>&2 && exit 1 +# Check if set +[[ -z "${CEGA_ENDPOINT_JSON_PREFIX+x}" ]] && echo 'Environment CEGA_ENDPOINT_JSON_PREFIX must be set' 1>&2 && exit 1 + +EGA_GID=$(getent group lega | awk -F: '{ print $3 }') + +cat > /etc/ega/auth.conf < + #include + ++#include ++#include ++#include ++ + #include "xmalloc.h" + #include "sshbuf.h" + #include "ssherr.h" +@@ -289,6 +293,17 @@ + u_int num_handles = 0; + int first_unused_handle = -1; + ++/* Buffer for the cega messages */ ++static size_t cega_msg_size = 4096; ++static char* cega_msg = NULL; ++/* Connection Socket */ ++static int cega_sock; ++/* Sending the filename to the socket */ ++static char delim = '$'; ++static void notify_init(void); ++static void notify_destroy(void); ++static void notify_send(char*); ++ + static void handle_unused(int i) + { + handles[i].use = HANDLE_UNUSED; +@@ -422,7 +437,14 @@ + int ret = -1; + + if (handle_is_ok(handle, HANDLE_FILE)) { +- ret = close(handles[handle].fd); ++ Handle h = handles[handle]; ++ ret = close(h.fd); ++ if(cega_sock != -1 ++ && !ret ++ && (h.flags & (O_CREAT|O_TRUNC|O_APPEND)) /* Create or Truncate or Append */ ++ && !(h.flags & O_RDONLY) /* and not Read-Only */ ++ ) ++ notify_send(h.name); + free(handles[handle].name); + handle_unused(handle); + } else if (handle_is_ok(handle, HANDLE_DIR)) { +@@ -670,6 +692,8 @@ + fatal("%s: buffer error: %s", __func__, ssh_err(r)); + send_msg(msg); + sshbuf_free(msg); ++ ++ notify_init(); + } + + static void +@@ -1474,6 +1498,8 @@ + void + sftp_server_cleanup_exit(int i) + { ++ notify_destroy(); ++ + if (pw != NULL && client_addr != NULL) { + handle_log_exit(); + logit("session closed for local user %s from [%s]", +@@ -1706,3 +1732,87 @@ + __func__, ssh_err(r)); + } + } ++ ++/**************************************************** ++ * Notifications for TCP listener on 127.0.0.1:8888 * ++ ****************************************************/ ++#define RESET_SOCKET close(cega_sock), cega_sock = -1 ++ ++static void ++notify_init(void){ ++ ++ logit("Initializing the upload notification system"); ++ ++ cega_sock = socket(AF_INET , SOCK_STREAM , 0); ++ if(cega_sock < 0){ debug("Oh oh socket problem: %s", strerror(errno)); return; } ++ ++ /* Connect the socket. Yup, hard-coded settings */ ++ struct sockaddr_in cega; ++ inet_pton(AF_INET, "127.0.0.1", &(cega.sin_addr)); ++ cega.sin_family = AF_INET; ++ cega.sin_port = htons( 8888 ); ++ ++ if (connect(cega_sock, (struct sockaddr*)&cega, sizeof(cega)) < 0){ ++ debug("Could not connect to the notification server"); ++ notify_destroy(); /* Failure */ ++ } else { ++ debug("Connected to the notification server"); ++ } ++ /* Note: not handling the connection drops at the moment */ ++ ++ /* Allocating space for the message */ ++ cega_msg = malloc(sizeof(char) * cega_msg_size); ++ if (!cega_msg){ ++ fatal("%s: memory allocation for the notification message", __func__); ++ RESET_SOCKET; return; ++ } ++ ++ /* Clean slate */ ++ *cega_msg = '\0'; ++ /* memset(cega_msg, '\0', cega_msg_size); */ ++ ++ logit("notification initialized [socket fd: %d]", cega_sock); ++} ++ ++static void ++notify_destroy(void){ ++ debug("cleaning up notification system"); ++ if(!cega_msg) free(cega_msg); ++ if(cega_sock > 0) RESET_SOCKET; ++ cega_sock = -1; ++} ++ ++/* ++ * Called only when cega_sock != -1 ++ * and when the file descriptor was open for: ++ * either Create, Truncate, Append and not in Read-Only ++ */ ++static void ++notify_send(char* filename){ ++ ++ logit("Notifying the upload of %s", filename); ++ size_t len = strlen(filename) + strlen(pw->pw_name) + 2; ++ ++ /* Formatting the message */ ++ if( len + 1 > cega_msg_size ){ ++ size_t len1 = len + 1, cega_msg_size2 = cega_msg_size << 1; ++ cega_msg_size = (len1 > cega_msg_size2) ? len1 : cega_msg_size2; ++ debug("Reallocating message buffer [new size: %zu]", cega_msg_size); ++ free(cega_msg); ++ cega_msg = malloc(sizeof(char) * cega_msg_size); ++ if (!cega_msg){ ++ fatal("%s: memory allocation for the notification message", __func__); ++ RESET_SOCKET; return; ++ } ++ } ++ /* *cega_msg = '\0'; */ ++ /* memset(cega_msg, '\0', cega_msg_size); */ ++ sprintf(cega_msg, "%s%c%s%c", pw->pw_name, delim, filename, delim); ++ ++ /* Ok, send it and forget. Not handling connection drops at the moment */ ++ if (send(cega_sock, cega_msg, len, 0) < 0){ ++ logit("Problem sending the message: %s", strerror(errno)); ++ /* RESET_SOCKET; */ ++ return; ++ } ++} diff --git a/docker/images/inbox/pam.ega b/docker/images/inbox/pam.ega new file mode 100644 index 00000000..07249562 --- /dev/null +++ b/docker/images/inbox/pam.ega @@ -0,0 +1,5 @@ +#%PAM-1.0 +auth requisite /usr/local/lib/ega/pam_ega.so +account requisite /usr/local/lib/ega/pam_ega.so +password required pam_deny.so +session requisite /usr/local/lib/ega/pam_ega.so diff --git a/docker/images/inbox/sshd_config b/docker/images/inbox/sshd_config new file mode 100644 index 00000000..fd7c57b6 --- /dev/null +++ b/docker/images/inbox/sshd_config @@ -0,0 +1,29 @@ +Port 9000 +Protocol 2 +HostKey /etc/ssh/ssh_host_rsa_key +HostKey /etc/ssh/ssh_host_ecdsa_key +HostKey /etc/ssh/ssh_host_ed25519_key +# Authentication +UsePAM yes +AuthenticationMethods "publickey" "keyboard-interactive:pam" +PubkeyAuthentication yes +PasswordAuthentication no +ChallengeResponseAuthentication yes +# Faster connection +UseDNS no +# Limited access +DenyGroups *,!lega +DenyUsers root ega lega +PermitRootLogin no +X11Forwarding no +AllowTcpForwarding no +PermitTunnel no +AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES +AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT +AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE +AcceptEnv XMODIFIERS +Subsystem sftp internal-sftp +ForceCommand internal-sftp +Banner /ega/banner +AuthorizedKeysCommand /usr/local/bin/ega_ssh_keys +AuthorizedKeysCommandUser root diff --git a/docker/images/mq/defs.json b/docker/images/mq/defs.json new file mode 100644 index 00000000..7a7e31e1 --- /dev/null +++ b/docker/images/mq/defs.json @@ -0,0 +1,20 @@ +{"rabbit_version":"3.6.14", + "users":[{"name":"guest","password_hash":"4tHURqDiZzypw0NTvoHhpn8/MMgONWonWxgRZ4NXgR8nZRBz","hashing_algorithm":"rabbit_password_hashing_sha256","tags":"administrator"}], + "vhosts":[{"name":"/"}], + "permissions":[{"user":"guest","vhost":"/","configure":".*","write":".*","read":".*"}], + "parameters":[], + "global_parameters":[{"name":"cluster_name","value":"rabbit@localhost"}], + "policies":[], + "queues":[{"name":"files","vhost":"/","durable":true,"auto_delete":false,"arguments":{}}, + {"name":"archived","vhost":"/","durable":true,"auto_delete":false,"arguments":{}}, + {"name":"qc","vhost":"/","durable":true,"auto_delete":false,"arguments":{}}, + {"name":"qc.errors","vhost":"/","durable":true,"auto_delete":false,"arguments":{}}, + {"name":"qc.done","vhost":"/","durable":true,"auto_delete":false,"arguments":{}}], + "exchanges":[{"name":"lega","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{}}, + {"name":"cega","vhost":"/","type":"topic","durable":true,"auto_delete":false,"internal":false,"arguments":{}}], + "bindings":[{"source":"lega", "vhost":"/", "destination":"archived", "destination_type":"queue", "routing_key":"archived", "arguments":{}}, + {"source":"lega", "vhost":"/", "destination":"qc", "destination_type":"queue", "routing_key":"qc", "arguments":{}}, + {"source":"lega", "vhost":"/", "destination":"qc.done", "destination_type":"queue", "routing_key":"qc.done", "arguments":{}}, + {"source":"lega", "vhost":"/", "destination":"qc.errors", "destination_type":"queue", "routing_key":"qc.errors", "arguments":{}}, + {"source":"lega", "vhost":"/", "destination":"cega", "destination_type":"exchange", "routing_key":"qc.errors", "arguments":{}}] +} diff --git a/docker/images/mq/entrypoint.sh b/docker/images/mq/entrypoint.sh new file mode 100644 index 00000000..459c54a8 --- /dev/null +++ b/docker/images/mq/entrypoint.sh @@ -0,0 +1,94 @@ +#!/bin/bash + +set -e +set -x + +[[ -z "${CEGA_CONNECTION}" ]] && echo 'Environment CEGA_CONNECTION is empty' 1>&2 && exit 1 + +# Initialization +rabbitmq-plugins enable --offline rabbitmq_federation +rabbitmq-plugins enable --offline rabbitmq_federation_management +rabbitmq-plugins enable --offline rabbitmq_shovel +rabbitmq-plugins enable --offline rabbitmq_shovel_management + +chown rabbitmq:rabbitmq /etc/rabbitmq/rabbitmq.config +chmod 640 /etc/rabbitmq/rabbitmq.config +chown rabbitmq:rabbitmq /etc/rabbitmq/defs.json +chmod 640 /etc/rabbitmq/defs.json + +# Problem of loading the plugins and definitions out-of-orders. +# Explanation: https://github.com/rabbitmq/rabbitmq-shovel/issues/13 +# Therefore: we run the server, with some default confs +# and then we upload the cega-definitions through the HTTP API + +# We cannot add those definitions to defs.json (loaded by the +# management plugin. See /etc/rabbitmq/rabbitmq.config) +# So we use curl afterwards, to upload the extras definitions +# See also https://pulse.mozilla.org/api/ + +# dest-exchange-key is not set for the shovel, so the key is re-used. + +# For the moment, still using guest:guest +cat > /etc/rabbitmq/defs-cega.json <&1 && exit 1 + + ROUND=30 + until rabbitmqadmin import /etc/rabbitmq/defs-cega.json || ((ROUND<0)) + do + sleep 1 + $((ROUND--)) + done + ((ROUND<0)) && echo "Central EGA connections *_not_* loaded" 2>&1 && exit 1 + echo "Central EGA connections loaded" +} & + +exec "$@" # ie CMD rabbitmq-server diff --git a/docker/images/mq/rabbitmq.config b/docker/images/mq/rabbitmq.config new file mode 100644 index 00000000..e139ce88 --- /dev/null +++ b/docker/images/mq/rabbitmq.config @@ -0,0 +1,15 @@ +%% -*- mode: erlang -*- +%% +[{rabbit,[{loopback_users, [ ] }, + {tcp_listeners, [ 5672 ] }, + {ssl_listeners, [ ] }, + {hipe_compile, false }, + {default_vhost, "/"}, + {default_user, "guest"}, + {default_pass, "guest"}, + {default_permissions, [".*", ".*",".*"]}, + {default_user_tags, [administrator]}, + {disk_free_limit, "1GB"}]}, + {rabbitmq_management, [ { listener, [ { port, 15672 }, { ssl, false }] }, + { load_definitions, "/etc/rabbitmq/defs.json"} ]} +]. diff --git a/docker/test/.gitignore b/docker/test/.gitignore new file mode 100644 index 00000000..5015af38 --- /dev/null +++ b/docker/test/.gitignore @@ -0,0 +1,5 @@ +*.bam +*.c4ga +*.c4ga.md5 +*.md5 +mq.env diff --git a/docker/test/Makefile b/docker/test/Makefile new file mode 100644 index 00000000..be5b81b8 --- /dev/null +++ b/docker/test/Makefile @@ -0,0 +1,63 @@ +.PHONY: upload submit user + +# folder for the localegarepo +MAIN_REPO=~/_ega + +# Dummy key +SSH_KEY_PUB=~/.ssh/lega.pub +SSH_KEY_PRIV=~/.ssh/lega + +USER=ega-box-999 +USER_ID=100 +FILE=HG00458.unmapped.ILLUMINA.bwa.CHS.low_coverage.20130415.bam + +############################## + +DOCKER_PATH=$(MAIN_REPO)/deploy +INSTANCE_PORT=$(shell awk -F= '/DOCKER_PORT_inbox/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) +PGP_PUB=$(DOCKER_PATH)/private/lega/pgp/ega.pub +PGP_EMAIL=$(shell awk -F= '/PGP_EMAIL/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) +CEGA_USERS=$(DOCKER_PATH)/private/cega/users +CEGA_MQ_CONNECTION=$(shell awk -F= '/^CEGA_CONNECTION/ {print $$2}' $(DOCKER_PATH)/private/lega/mq.env) + +############################## + +all: user upload submit + +# $(FILE): +# @echo 'Hello' > $(FILE) + +$(FILE).c4ga: $(FILE) + lega-cryptor encrypt --pk $(PGP_PUB) -i $< -o $@ + +# lega-cryptor encrypt -r Sweden -i $< -o $@ + +upload: $(FILE).c4ga + cd $( $@ + +$(FILE).md5: $(FILE) + printf '%s' $(shell md5 $< | cut -d' ' -f4) > $@ + +submit: $(FILE).c4ga $(FILE).c4ga.md5 $(FILE).md5 + @echo publish.py --connection amqp://[redacted]@$(lastword $(subst @, ,$(CEGA_MQ_CONNECTION))) $(USER) dir/$(FILE).c4ga --enc ... + @python $(MAIN_REPO)/extras/publish.py --connection $(subst cega-mq,localhost,$(CEGA_MQ_CONNECTION)) $(USER) $(FILE).c4ga --enc $(shell cat $(FILE).c4ga.md5) --stableID EGAF$(shell cat $(FILE).md5) + +user: $(CEGA_USERS)/lega/$(USER).yml + +$(CEGA_USERS)/lega/$(USER).yml: $(CEGA_USERS)/$(USER).yml + -cd $(CEGA_USERS)/lega && ln -s ../$(USER).yml . + -cd $(CEGA_USERS)/lega_ids && ln -s ../$(USER).yml $(USER_ID).yml +$(CEGA_USERS)/$(USER).yml: + @echo --- > $@ + @echo "username: $(USER)" >> $@ + @echo "pubkey: $(shell cat $(SSH_KEY_PUB))" >> $@ + @echo "uid: $(USER_ID)" >> $@ + @echo "gecos: EGA User - $(USER)" >> $@ + +clean: + -unlink $(CEGA_USERS)/lega/$(USER).yml + -unlink $(CEGA_USERS)/lega_ids/$(USER_ID).yml + rm -rf $(FILE).c4ga $(FILE).c4ga.md5 $(FILE).md5 $(CEGA_USERS)/$(USER).yml diff --git a/docker/test/README.md b/docker/test/README.md new file mode 100644 index 00000000..93100efb --- /dev/null +++ b/docker/test/README.md @@ -0,0 +1,12 @@ +## Testing script + +Testing script is used to replicate upload and submission functionalities from an end user. +Before using the script make sure there is a key `~/.ssh/lega.pub` and `~/.ssh/lega` or replace them with +your own in the `Makefile`. Also `MAIN_REPO=~/LocalEGA` should reflect the path do the LocalEGA project. + +Using the script: +``` +make user +make upload +make submit +``` diff --git a/deployments/docker/tests/.gitignore b/docker/tests/.gitignore similarity index 100% rename from deployments/docker/tests/.gitignore rename to docker/tests/.gitignore diff --git a/deployments/docker/tests/README.md b/docker/tests/README.md similarity index 100% rename from deployments/docker/tests/README.md rename to docker/tests/README.md diff --git a/deployments/docker/tests/pom.xml b/docker/tests/pom.xml similarity index 100% rename from deployments/docker/tests/pom.xml rename to docker/tests/pom.xml diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/CommonTests.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/CommonTests.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/CommonTests.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/CommonTests.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/Context.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/Context.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/Context.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/Context.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/RobustnessTests.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/RobustnessTests.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/RobustnessTests.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/RobustnessTests.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/Utils.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/Utils.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/Utils.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/Utils.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/hooks/BeforeAfterHooks.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/hooks/BeforeAfterHooks.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/hooks/BeforeAfterHooks.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/hooks/BeforeAfterHooks.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/pojo/FileStatus.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/pojo/FileStatus.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/pojo/FileStatus.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/pojo/FileStatus.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Checksum.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Checksum.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Checksum.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Checksum.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Message.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Message.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Message.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/publisher/Message.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Authentication.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Authentication.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Authentication.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Authentication.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Ingestion.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Ingestion.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Ingestion.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Ingestion.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Robustness.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Robustness.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Robustness.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Robustness.java diff --git a/deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Uploading.java b/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Uploading.java similarity index 100% rename from deployments/docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Uploading.java rename to docker/tests/src/test/java/se/nbis/lega/cucumber/steps/Uploading.java diff --git a/deployments/docker/tests/src/test/resources/config.properties b/docker/tests/src/test/resources/config.properties similarity index 100% rename from deployments/docker/tests/src/test/resources/config.properties rename to docker/tests/src/test/resources/config.properties diff --git a/deployments/docker/tests/src/test/resources/cucumber/features/authentication.feature b/docker/tests/src/test/resources/cucumber/features/authentication.feature similarity index 100% rename from deployments/docker/tests/src/test/resources/cucumber/features/authentication.feature rename to docker/tests/src/test/resources/cucumber/features/authentication.feature diff --git a/deployments/docker/tests/src/test/resources/cucumber/features/checksums.feature b/docker/tests/src/test/resources/cucumber/features/checksums.feature similarity index 100% rename from deployments/docker/tests/src/test/resources/cucumber/features/checksums.feature rename to docker/tests/src/test/resources/cucumber/features/checksums.feature diff --git a/deployments/docker/tests/src/test/resources/cucumber/features/ingestion.feature b/docker/tests/src/test/resources/cucumber/features/ingestion.feature similarity index 100% rename from deployments/docker/tests/src/test/resources/cucumber/features/ingestion.feature rename to docker/tests/src/test/resources/cucumber/features/ingestion.feature diff --git a/deployments/docker/tests/src/test/resources/cucumber/features/robustness.feature b/docker/tests/src/test/resources/cucumber/features/robustness.feature similarity index 100% rename from deployments/docker/tests/src/test/resources/cucumber/features/robustness.feature rename to docker/tests/src/test/resources/cucumber/features/robustness.feature diff --git a/deployments/docker/tests/src/test/resources/cucumber/features/uploading.feature b/docker/tests/src/test/resources/cucumber/features/uploading.feature similarity index 100% rename from deployments/docker/tests/src/test/resources/cucumber/features/uploading.feature rename to docker/tests/src/test/resources/cucumber/features/uploading.feature diff --git a/deployments/docker/tests/src/test/resources/simplelogger.properties b/docker/tests/src/test/resources/simplelogger.properties similarity index 100% rename from deployments/docker/tests/src/test/resources/simplelogger.properties rename to docker/tests/src/test/resources/simplelogger.properties diff --git a/lega/conf/defaults.ini b/lega/conf/defaults.ini index f6437034..c5fa5a5e 100644 --- a/lega/conf/defaults.ini +++ b/lega/conf/defaults.ini @@ -11,7 +11,7 @@ verify_certificate = False [inbox] location = /ega/inbox/%s -mode = 2750 +chroot_sessions = True [vault] ########################### diff --git a/lega/conf/loggers/console.yaml b/lega/conf/loggers/console.yaml index 84cc8a52..089c7984 100644 --- a/lega/conf/loggers/console.yaml +++ b/lega/conf/loggers/console.yaml @@ -10,13 +10,10 @@ loggers: propagate: true qualname: lega asyncio: - level: INFO - handlers: [console] - aiopg: - level: INFO + level: ERROR handlers: [console] aiohttp: - level: INFO + level: ERROR handlers: [console] propagate: true qualname: aiohttp diff --git a/lega/conf/loggers/silent.yaml b/lega/conf/loggers/silent.yaml new file mode 100644 index 00000000..180f9260 --- /dev/null +++ b/lega/conf/loggers/silent.yaml @@ -0,0 +1,27 @@ +version: 1 +root: + level: NOTSET + handlers: [noHandler] + +loggers: + lega: + level: ERROR + handlers: [mainFile] + propagate: true + qualname: lega + +handlers: + noHandler: + class: logging.NullHandler + level: NOTSET + mainFile: + class: logging.FileHandler + formatter: lega + filename: '/tmp/ega.log' + mode: 'w' + +formatters: + lega: + format: '[{asctime:<20}][{name}][{process:d} {processName:>15}][{levelname}] (L:{lineno}) {funcName}: {message}' + style: '{' + datefmt: '%Y-%m-%d %H:%M:%S' diff --git a/lega/inbox.py b/lega/inbox.py index 3da16c02..787b343f 100644 --- a/lega/inbox.py +++ b/lega/inbox.py @@ -2,27 +2,23 @@ # -*- coding: utf-8 -*- ''' -FUSE layer implementation to capture when a file is uploaded to -a LocalEGA inbox. It sends a message (including filesize and -checksum) to Central EGA. +Send message to the local broker when a file is uploaded. +The message includes filesize and checksum. ''' # This is helping the helpdesk on the Central EGA side. -# NOTE: -# There are issues using the file descriptors given by fuse, so we re-open the file here in python. -# Calculating checksums and all, might make the file systems slow. -# Hopefully, not too slow. - import sys -import os import logging -import argparse -import stat -import errno -from functools import wraps +import os +import re +import socket +import asyncio +import uvloop +asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) -from fuse import FUSE, FuseOSError, Operations +host = '127.0.0.1' +port = 8888 from .conf import CONF from .utils.amqp import get_connection, publish @@ -30,234 +26,106 @@ LOG = logging.getLogger(__name__) -# def print_func(func): -# @wraps(func) -# def wrapper(*args, **kwargs): -# # Skip self -# _args = args[1:] -# LOG.debug("%s args: " + ("%s " * len(_args)), func.__name__, *_args) -# return func(*args, **kwargs) -# return wrapper - -class LegaFS(Operations): - def __init__(self, user, options, rootdir, **kwargs): - - LOG.debug('Mount options: %s', options) - self.user = user - self.root = rootdir - self.pending = set() - self.channel = None - self.connection = None - LOG.debug("# Landing location: %s", self.root) - # #self.headers = {} - - # Helpers - def real_path(self, path): - return os.path.join(self.root, path.lstrip('/')) - - def send_message(self, path): - if not self.channel: - self.connection = get_connection('broker') # local broker - self.channel = self.connection.channel() - - LOG.debug("File %s just landed", path) - real_path = self.real_path(path) - msg = { 'user': self.user, 'filepath': path } - - if path.endswith(supported_algorithms()): - with open(real_path, 'rt', encoding='utf-8') as f: +class Forwarder(asyncio.Protocol): + + buf = b'' + + def __init__(self, broker, *args, **kwargs): + super().__init__(*args, **kwargs) + self.channel = broker.channel() + self.inbox_location = CONF.get_value('inbox', 'location', raw=True) + self.isolation = CONF.get_value('inbox', 'chroot_sessions', conv=bool) + if self.isolation: + LOG.info('Using chroot isolation') + self.checksums_rkey = 'files.inbox.checksums' #CONF.get_value('inbox', 'checksum_routing_key', default='files.inbox.checksums') + self.inbox_rkey = 'files.inbox' #CONF.get_value('inbox', 'inbox_routing_key', default='files.inbox') + + def connection_made(self, transport): + peername = transport.get_extra_info('peername') + LOG.debug('Connection from {}'.format(peername)) + self.transport = transport + + # Buffering can concatenate multiple messages, especially if they arrive too quickly + # We tried to use TCP_NODELAY (to turn off the socket buffering on the sender's side) + # but that didn't help. Therefore we use an out-of-band method: + # We separate messages with a '|' character + def parse(self, data): + while True: + if data.count(b'$') < 2: + self.buf = data + return + # We have 2 bars + pos1 = data.find(b'$') + username = data[:pos1] + pos2 = data.find(b'$',pos1+1) + filename = data[pos1+1:pos2] + yield (username.decode(),filename.decode()) + data = data[pos2+1:] + + def data_received(self, data): + if self.buf: + data = self.buf + data + for username, filename in self.parse(data): + try: + LOG.info("User %s uploaded %s", username, filename) + self.send_message(username, filename) + except Exception as e: + LOG.error("Error notifying upload: %s", e) + + def send_message(self, username, filename): + inbox = self.inbox_location % username + if self.isolation: + filepath = os.path.join(inbox, filename.lstrip('/')) + else: + filepath = filename + filename = filename[len(inbox):] # there is surelt better + LOG.debug("Filepath %s", filepath) + msg = { 'user': username, 'filepath': filename } + if filename.endswith(supported_algorithms()): + routing_key = self.checksums_rkey + with open(filepath, 'rt', encoding='utf-8') as f: msg['content'] = f.read() - publish(msg, self.channel, 'cega', 'files.inbox.checksums') else: - msg['filesize'] = os.stat(real_path).st_size - c = calculate(real_path, 'md5') + routing_key = self.inbox_rkey + msg['filesize'] = os.stat(filepath).st_size + c = calculate(filepath, 'md5') if c: msg['encrypted_integrity'] = {'algorithm': 'md5', 'checksum': c} - publish(msg, self.channel, 'cega', 'files.inbox') - LOG.debug("Message sent: %s", msg) - - # Filesystem methods - # ================== + # Sending + publish(msg, self.channel, 'cega', routing_key) - def getattr(self, path, fh=None): - st = os.lstat(self.real_path(path)) - return dict((key, getattr(st, key)) for key in ('st_uid', 'st_gid', 'st_mode', 'st_size', - 'st_nlink', 'st_atime', 'st_ctime', 'st_mtime')) + def connection_lost(self, exc): + if self.buf: + LOG.error('Ignoring data still in transit: %s', self.buf) + LOG.debug('Closing the connection') + self.transport.close() - #@print_func - def readdir(self, path, fh): - yield '.' - full_path = self.real_path(path) - if full_path: - yield '..' - g = os.walk(full_path) - _, dirs, files = next(g) # Just here. Don't recurse - for name in dirs: yield name - for name in files: yield name - g.close() # cleaning - def access(self, path, mode): - if not os.access(self.real_path(path), mode): - raise FuseOSError(errno.EACCES) +def main(args=None): + if not args: + args = sys.argv[1:] - def chown(self, path, uid, gid): - return os.chown(self.real_path(path), uid, gid) + CONF.setup(args) # re-conf - def chmod(self, path, mode): - return os.chmod(self.real_path(path), mode) - - #@print_func - def rmdir(self, path): - return os.rmdir(self.real_path(path)) - - #@print_func - def mkdir(self, path, mode): - return os.mkdir(self.real_path(path), mode) - - def statfs(self, path): - stv = os.statvfs(self.real_path(path)) - return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree', - 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', - 'f_flag', - 'f_frsize', 'f_namemax')) - - def unlink(self, path): - return os.unlink(self.real_path(path)) - - #@print_func - def rename(self, old, new): - return os.rename(self.real_path(old), self.real_path(new)) - - def utimens(self, path, times=None): - return os.utime(self.real_path(path), times) - - # File methods - # ============ - - #@print_func - def open(self, path, flags): - return os.open(self.real_path(path), flags) + loop = asyncio.get_event_loop() + loop.set_debug(True) + broker = get_connection('broker') + server = loop.run_until_complete(loop.create_server(lambda: Forwarder(broker), host, port)) - #@print_func - def create(self, path, mode, fi=None): - self.pending.add(path) - return os.open(self.real_path(path), os.O_WRONLY | os.O_CREAT, mode) - - def read(self, path, length, offset, fh): - os.lseek(fh, offset, os.SEEK_SET) - return os.read(fh, length) - - #@print_func - def write(self, path, buf, offset, fh): - os.lseek(fh, offset, os.SEEK_SET) - return os.write(fh, buf) - - #@print_func - def truncate(self, path, length, fh=None): - with open(self.real_path(path), 'r+') as f: - f.truncate(length) - self.pending.add(path) - - #@print_func - def release(self, path, fh): - if path in self.pending: # Send message - self.send_message(path) - # Close file last. - return os.close(fh) - - #@print_func - def flush(self, path, fh): - return os.fsync(fh) - - #@print_func - def fsync(self, path, fdatasync, fh): - return os.fsync(fh) - - #@print_func - def destroy(self, path): - if self.connection: - self.connection.close() - - - -def parse_options(): - parser = argparse.ArgumentParser(description='LegaFS filesystem') - parser.add_argument('mountpoint', help='mountpoint for the LegaFS filesystem') - parser.add_argument('-o', metavar='mnt_options', help='mount flags: comma-separated key[=val]. The "driver" key is required.', required=True) - parser.add_argument('-f', '--foreground', help='Stay in foreground', action='store_true') - args = parser.parse_args() - - options = {} - for opt in args.o.split(','): - try: - k, v = opt.split('=') - except ValueError: - k, v = opt, True - options[k] = v - - # For the conf and logger - _args = [] - conf = options.pop('conf', None) - if conf: - _args.append('--conf') - _args.append(conf) - print('Using conf', conf) - logger = options.pop('log', None) - if logger: - _args.append('--log') - _args.append(logger) - print('Using logger', logger) - CONF.setup(_args) - - return args.mountpoint, args.foreground, options - - -def main(): - - mountpoint, foreground, options = parse_options() - - user = options.pop('user', None) - assert user, "You did not specify the user in the mount options" - LOG.info(f'Mounting inbox for EGA User "{user}"') - - rootdir = CONF.get_value('inbox', 'location', raw=True) % user - mode = int(CONF.get_value('inbox', 'mode'), 8) - uid = options.get('uid', None) - gid = options.get('gid', None) - - if not os.path.exists(mountpoint): - LOG.debug('Mountpoint missing. Creating it') - os.makedirs(mountpoint, exist_ok=True) - - # Changing ownership if newly created - if uid is not None and gid is not None: - LOG.debug('Updating ownership') - os.chown(mountpoint, int(uid), int(gid)) - os.chmod(mountpoint, mode) - - # Create rootdir for that user - if not os.path.exists(rootdir): - LOG.debug('Rootdir missing. Creating it') - os.makedirs(rootdir, exist_ok=True) - - if uid is not None and gid is not None: - LOG.debug('Updating ownership for rootdir') - os.chown(rootdir, int(uid), int(gid)) - os.chmod(rootdir, mode) - - # ....aaand cue music! + # Serve requests until Ctrl+C is pressed + LOG.info('Serving on %s', host) try: - if foreground: - options['foreground'] = True - FUSE(LegaFS(user, options, rootdir), mountpoint, **options) # options might get updated - except RuntimeError as e: - if str(e) == '1': # not empty - LOG.debug(f'Already mounted') - sys.exit(0) - else: - LOG.error(f'RuntimeError {e}') - sys.exit(2) - + loop.run_forever() + except KeyboardInterrupt: + LOG.info('Server interrupted') + broker.close() + except Exception as e: + LOG.critical(f'Error {e}') + + # Close the server + server.close() + loop.run_until_complete(server.wait_closed()) + loop.close() if __name__ == '__main__': main() diff --git a/lega/utils/db.py b/lega/utils/db.py index bf08aa1d..113e4867 100644 --- a/lega/utils/db.py +++ b/lega/utils/db.py @@ -11,8 +11,6 @@ import psycopg2 from socket import gethostname from time import sleep -import asyncio -import aiopg from enum import Enum from legacryptor import exceptions as crypt_exc @@ -44,54 +42,40 @@ def fetch_args(d): LOG.info(f"Initializing a connection to: {db_args['host']}:{db_args['port']}/{db_args['database']}") return db_args -async def _retry(run, on_failure=None, exception=psycopg2.OperationalError): - '''Main retry loop''' - nb_try = CONF.get_value('postgres', 'try', conv=int, default=1) - try_interval = CONF.get_value('postgres', 'try_interval', conv=int, default=1) - LOG.debug(f"{nb_try} attempts (every {try_interval} seconds)") - count = 0 - backoff = try_interval - while count < nb_try: - try: - return await run() - except exception as e: - LOG.debug(f"Database connection error: {e!r}") - LOG.debug(f"Retrying in {backoff} seconds") - sleep( backoff ) - count += 1 - backoff = (2 ** (count // 10)) * try_interval - # from 0 to 9, sleep 1 * try_interval secs - # from 10 to 19, sleep 2 * try_interval secs - # from 20 to 29, sleep 4 * try_interval secs ... etc - - # fail to connect - if nb_try: - LOG.error(f"Database connection fail after {nb_try} attempts ...") - else: - LOG.error("Database connection attempts was set to 0 ...") - - if on_failure: - on_failure() - - def retry_loop(on_failure=None, exception=psycopg2.OperationalError): '''Decorator retry something ``try`` times every ``try_interval`` seconds. Run the ``on_failure`` if after ``try`` attempts (configured in CONF). ''' def decorator(func): - if asyncio.iscoroutinefunction(func): - @wraps(func) - async def wrapper(*args, **kwargs): - async def _process(): - return await func(*args,**kwargs) - return await _retry(_process, on_failure=on_failure, exception=exception) - else: - @wraps(func) - def wrapper(*args, **kwargs): - async def _process(): + @wraps(func) + def wrapper(*args, **kwargs): + '''Main retry loop''' + nb_try = CONF.get_value('postgres', 'try', conv=int, default=1) + try_interval = CONF.get_value('postgres', 'try_interval', conv=int, default=1) + LOG.debug(f"{nb_try} attempts (every {try_interval} seconds)") + count = 0 + backoff = try_interval + while count < nb_try: + try: return func(*args,**kwargs) - loop = asyncio.get_event_loop() - return loop.run_until_complete(_retry(_process, on_failure=on_failure, exception=exception)) + except exception as e: + LOG.debug(f"Database connection error: {e!r}") + LOG.debug(f"Retrying in {backoff} seconds") + sleep( backoff ) + count += 1 + backoff = (2 ** (count // 10)) * try_interval + # from 0 to 9, sleep 1 * try_interval secs + # from 10 to 19, sleep 2 * try_interval secs + # from 20 to 29, sleep 4 * try_interval secs ... etc + + # fail to connect + if nb_try: + LOG.error(f"Database connection fail after {nb_try} attempts ...") + else: + LOG.error("Database connection attempts was set to 0 ...") + + if on_failure: + on_failure() return wrapper return decorator @@ -195,20 +179,6 @@ def set_info(file_id, vault_path, vault_filesize, header): 'header': header, }) -###################################### -## Async code ## -###################################### - -@retry_loop(on_failure=_do_exit) -async def create_pool(loop): - '''\ - Async function to create a pool of connection to the database. - Used by the frontend. - ''' - db_args = fetch_args(CONF) - return await aiopg.create_pool(**db_args, loop=loop, echo=True) - - ###################################### ## Decorator ## ###################################### diff --git a/requirements.txt b/requirements.txt index bab53210..ea7425b6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,12 @@ -pika==0.11.0 +pika==0.12.0 colorama==0.3.7 aiohttp==3.0.7 aiohttp-jinja2==0.13.0 -fusepy sphinx_rtd_theme -cryptography==2.3 +cryptography==2.3.1 pgpy -psycopg2-binary==2.7.4 -aiopg==0.13.0 +psycopg2-binary==2.7.5 PyYaml boto3 git+https://github.com/NBISweden/LocalEGA-cryptor.git +uvloop From 5bb0cfeac01996c93322c58c1605e0a2d549a74c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 14:05:11 +0200 Subject: [PATCH 02/26] Moving deployments/docker to docker --- deployments/docker/.gitignore | 5 - deployments/docker/Makefile | 33 -- deployments/docker/README.md | 49 --- deployments/docker/bootstrap/boot.sh | 69 --- deployments/docker/bootstrap/cega.sh | 192 --------- deployments/docker/bootstrap/defs.sh | 57 --- deployments/docker/bootstrap/lega.sh | 406 ------------------ deployments/docker/bootstrap/settings.rc | 34 -- .../docker/bootstrap/troubleshooting.md | 17 - deployments/docker/images/Makefile | 43 -- deployments/docker/images/README.md | 39 -- deployments/docker/images/base/Dockerfile | 54 --- deployments/docker/images/cega/eureka.py | 72 ---- deployments/docker/images/cega/server.py | 110 ----- deployments/docker/images/cega/users.html | 32 -- deployments/docker/images/inbox/Dockerfile | 28 -- deployments/docker/images/inbox/banner | 1 - deployments/docker/images/inbox/entrypoint.sh | 87 ---- deployments/docker/images/inbox/pam.ega | 5 - deployments/docker/images/inbox/sshd_config | 33 -- deployments/docker/images/mq/defs.json | 20 - deployments/docker/images/mq/entrypoint.sh | 98 ----- deployments/docker/images/mq/rabbitmq.config | 15 - deployments/docker/test/.gitignore | 5 - deployments/docker/test/Makefile | 57 --- deployments/docker/test/README.md | 12 - 26 files changed, 1573 deletions(-) delete mode 100644 deployments/docker/.gitignore delete mode 100644 deployments/docker/Makefile delete mode 100644 deployments/docker/README.md delete mode 100755 deployments/docker/bootstrap/boot.sh delete mode 100644 deployments/docker/bootstrap/cega.sh delete mode 100644 deployments/docker/bootstrap/defs.sh delete mode 100755 deployments/docker/bootstrap/lega.sh delete mode 100644 deployments/docker/bootstrap/settings.rc delete mode 100644 deployments/docker/bootstrap/troubleshooting.md delete mode 100644 deployments/docker/images/Makefile delete mode 100644 deployments/docker/images/README.md delete mode 100644 deployments/docker/images/base/Dockerfile delete mode 100644 deployments/docker/images/cega/eureka.py delete mode 100644 deployments/docker/images/cega/server.py delete mode 100644 deployments/docker/images/cega/users.html delete mode 100644 deployments/docker/images/inbox/Dockerfile delete mode 100644 deployments/docker/images/inbox/banner delete mode 100755 deployments/docker/images/inbox/entrypoint.sh delete mode 100644 deployments/docker/images/inbox/pam.ega delete mode 100644 deployments/docker/images/inbox/sshd_config delete mode 100644 deployments/docker/images/mq/defs.json delete mode 100644 deployments/docker/images/mq/entrypoint.sh delete mode 100644 deployments/docker/images/mq/rabbitmq.config delete mode 100644 deployments/docker/test/.gitignore delete mode 100644 deployments/docker/test/Makefile delete mode 100644 deployments/docker/test/README.md diff --git a/deployments/docker/.gitignore b/deployments/docker/.gitignore deleted file mode 100644 index b9761ef0..00000000 --- a/deployments/docker/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -.env -.env.201* -private* -.err -!bootstrap/lib diff --git a/deployments/docker/Makefile b/deployments/docker/Makefile deleted file mode 100644 index 82402818..00000000 --- a/deployments/docker/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -ARGS= - -.PHONY: help bootstrap private network up down clean ps - -help: - @echo "Usage: make \n" - @echo "where is: 'bootstrap', 'up' 'ps', 'down', 'network' or 'clean'\n" - -private/cega.yml private/lega.yml private bootstrap: - @docker run --rm -it \ - -v /dev/urandom:/dev/random \ - -v ${PWD}:/ega \ - -v ${PWD}/../../extras/db.sql:/tmp/db.sql \ - -v ${PWD}/../../extras/generate_pgp_key.py:/tmp/generate_pgp_key.py \ - -v ${PWD}/../../extras/rabbitmq_hash.py:/tmp/rabbitmq_hash.py \ - --entrypoint /ega/bootstrap/boot.sh \ - nbisweden/ega-base ${ARGS} - -network: - @docker network inspect cega &>/dev/null || docker network create cega &>/dev/null - -up:network private/cega.yml private/lega.yml - @docker-compose -f private/cega.yml -f private/lega.yml up -d - -ps: - @docker-compose ps - -down: #.env - @[[ -f private/cega.yml ]] && [[ -f private/lega.yml ]] && docker-compose down -v || echo "No recipe to bring containers down\nHave you bootstrapped? (ie make bootstrap)" - -clean: - rm -rf .env private - -docker network rm cega &>/dev/null diff --git a/deployments/docker/README.md b/deployments/docker/README.md deleted file mode 100644 index 23954e2d..00000000 --- a/deployments/docker/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Deploy LocalEGA using Docker - -## Bootstrap - -First [create the EGA docker images](images) beforehand, with `make -C images`. - -You can then [generate the private data](bootstrap), with either: - - make bootstrap - -> Note: you can run `bootstrap/boot.sh` on your host machine but -> you need the required tools installed, including Python 3.6, GnuPG -> 2.2.2, OpenSSL 1.0.2, `readlink`, `xxd`, ... - -The command will create a `.env` file and a `private` folder holding -the necessary data (ie the GnuPG key, the RSA master key pair, the SSL -certificates for internal communication, passwords, default users, -etc...) - -It will also create a docker network `cega` used by CEGA, -network that is external (more precisely a pre-existing network) to localEGA-fin and localEGA-swe. -One can also create the network manually using `docker network create cega`. - -These networks are reflected in their corresponding YML files -* `private/cega.yml` -* `private/ega_swe1.yml` -* `private/ega_fin1.yml` - -The passwords are in `private//.trace` and the errors (if -any) are in `private/.err`. - -## Running - - docker-compose up -d - -Use `docker-compose up -d --scale ingest_swe1=3` instead, if you want to -start 3 ingestion workers. - -Note that, in this architecture, we use 3 separate volumes: one for -the inbox area, one for the staging area, and one for the vault. They -will be created on-the-fly by docker-compose. - -## Stopping - - docker-compose down -v - -## Status - - docker-compose ps diff --git a/deployments/docker/bootstrap/boot.sh b/deployments/docker/bootstrap/boot.sh deleted file mode 100755 index 4a5ab129..00000000 --- a/deployments/docker/bootstrap/boot.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash -set -e - -[ ${BASH_VERSINFO[0]} -lt 4 ] && echo 'Bash 4 (or higher) is required' 1>&2 && exit 1 - -HERE=$(dirname ${BASH_SOURCE[0]}) -PRIVATE=${HERE}/../private -DOT_ENV=${HERE}/../.env -EXTRAS=${HERE}/../../../extras - -# Defaults -VERBOSE=no -FORCE=yes -OPENSSL=openssl -DEFAULT_INBOX=fuse - -function usage { - echo "Usage: $0 [options]" - echo -e "\nOptions are:" - echo -e "\t--openssl \tPath to the Openssl executable [Default: ${OPENSSL}]" - echo -e "\t--inbox \tSelect inbox \"fuse\" or \"mina\" [Default: ${DEFAULT_INBOX}]" - echo "" - echo -e "\t--verbose, -v \tShow verbose output" - echo -e "\t--polite, -p \tDo not force the re-creation of the subfolders. Ask instead" - echo -e "\t--help, -h \tOutputs this message and exits" - echo -e "\t-- ... \tAny other options appearing after the -- will be ignored" - echo "" -} - - -# While there are arguments or '--' is reached -while [[ $# -gt 0 ]]; do - case "$1" in - --help|-h) usage; exit 0;; - --verbose|-v) VERBOSE=yes;; - --polite|-p) FORCE=no;; - --inbox) DEFAULT_INBOX=$2; shift;; - --openssl) OPENSSL=$2; shift;; - --) shift; break;; - *) echo "$0: error - unrecognized option $1" 1>&2; usage; exit 1;; esac - shift -done - -[[ $VERBOSE == 'no' ]] && echo -en "Bootstrapping " - -source ${HERE}/defs.sh - -[[ -x $(readlink ${OPENSSL}) ]] && echo "${OPENSSL} is not executable. Adjust the setting with --openssl" && exit 3 - -rm_politely ${PRIVATE} -mkdir -p ${PRIVATE}/{cega,lega} -exec 2>${PRIVATE}/.err -backup ${DOT_ENV} -cat > ${DOT_ENV} < ${PRIVATE}/cega/users/john.yml < ${PRIVATE}/cega/users/jane.yml < ${PRIVATE}/cega/users/taylor.yml <> ${PRIVATE}/cega/.trace < ${PRIVATE}/cega.yml < ${PRIVATE}/cega/env < ${PRIVATE}/cega/mq/defs.json < ${PRIVATE}/cega/mq/rabbitmq.config < $1 \xF0\x9F\x91\x8D" - else - echo -e " \xF0\x9F\x91\x8D" - fi -} - - -function backup { - local target=$1 - if [[ -e $target ]] && [[ $FORCE != 'yes' ]]; then - echomsg "Backing up $target" - mv -f $target $target.$(date +"%Y-%m-%d_%H:%M:%S") - fi -} - -function rm_politely { - local FOLDER=$1 - - if [[ -d $FOLDER ]]; then - if [[ $FORCE == 'yes' ]]; then - rm -rf $FOLDER - else - # Asking - echo "[Warning] The folder \"$FOLDER\" already exists. " - while : ; do # while = In a subshell - echo -n "[Warning] " - echo -n -e "Proceed to re-create it? [y/N] " - read -t 10 yn - case $yn in - y) rm -rf $FOLDER; break;; - N) echo "Ok. Choose another private directory. Exiting"; exit 1;; - *) echo "Eh?";; - esac - done - fi - fi -} - -function generate_password { - local size=${1:-16} # defaults to 16 characters - python3.6 -c "import secrets,string;print(''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(${size})))" -} - diff --git a/deployments/docker/bootstrap/lega.sh b/deployments/docker/bootstrap/lega.sh deleted file mode 100755 index 235e50d0..00000000 --- a/deployments/docker/bootstrap/lega.sh +++ /dev/null @@ -1,406 +0,0 @@ -#!/usr/bin/env bash - -mkdir -p $PRIVATE/lega/{pgp,certs,logs} -chmod 700 $PRIVATE/lega/{pgp,certs,logs} - -echomsg "\t* the PGP key" - -# Running in a container -GEN_KEY="python3.6 /tmp/generate_pgp_key.py" - -# Python 3.6 -${GEN_KEY} "${PGP_NAME}" "${PGP_EMAIL}" "${PGP_COMMENT}" --passphrase "${PGP_PASSPHRASE}" --pub ${PRIVATE}/lega/pgp/ega.pub --priv ${PRIVATE}/lega/pgp/ega.sec --armor -chmod 644 ${PRIVATE}/lega/pgp/ega.pub - -${GEN_KEY} "${PGP_NAME}" "${PGP_EMAIL}" "${PGP_COMMENT}" --passphrase "${PGP_PASSPHRASE}" --pub ${PRIVATE}/lega/pgp/ega2.pub --priv ${PRIVATE}/lega/pgp/ega2.sec --armor -chmod 644 ${PRIVATE}/lega/pgp/ega2.pub - -######################################################################### - -echomsg "\t* the SSL certificates" -${OPENSSL} req -x509 -newkey rsa:2048 -keyout ${PRIVATE}/lega/certs/ssl.key -nodes -out ${PRIVATE}/lega/certs/ssl.cert -sha256 -days 1000 -subj ${SSL_SUBJ} - -######################################################################### - -echomsg "\t* keys.ini" -${OPENSSL} enc -aes-256-cbc -salt -out ${PRIVATE}/lega/keys.ini.enc -md md5 -k ${KEYS_PASSWORD} < ${PRIVATE}/lega/conf.ini <> ${PRIVATE}/lega/db.sql - - -######################################################################### -# Populate env-settings for docker compose -######################################################################### - -echomsg "\t* Elasticsearch configuration file" -cat > ${PRIVATE}/lega/logs/elasticsearch.yml < ${PRIVATE}/lega/logs/logstash.yml < ${PRIVATE}/lega/logs/logstash.conf < 5600 - codec => json { charset => "UTF-8" } - } - rabbitmq { - host => "mq" - port => 5672 - user => "guest" - password => "guest" - exchange => "amq.rabbitmq.trace" - key => "#" - } -} -output { - if ("_jsonparsefailure" not in [tags]) { - elasticsearch { - hosts => ["elasticsearch:9200"] - } - - } else { - file { - path => ["logs/error-%{+YYYY-MM-dd}.log"] - } - # output to console for debugging purposes - stdout { - codec => rubydebug - } - } -} -EOF - -echomsg "\t* Kibana configuration file" -cat > ${PRIVATE}/lega/logs/kibana.yml < ${PRIVATE}/lega/mq.env <> ${PRIVATE}/lega.yml <> ${PRIVATE}/lega.yml <> ${PRIVATE}/lega.yml <> ${PRIVATE}/lega.yml <> ${PRIVATE}/lega/.trace < or latest | SFTP server on top of `nbisweden/ega-base:latest` | -| nbisweden/ega-base | or latest | Base Image for all services including python 3.6.1 | - - -We also use 2 stubbing services in order to fake the necessary Central EGA components - -| Repository | Tag | Role | -|------------|:--------:|------| -| cega-users | or latest | Sets up a postgres database with appropriate tables | -| cega-mq | or latest | Sets up a RabbitMQ message broker with appropriate accounts, exchanges, queues and bindings | -| cega-eureka | or latest | Sets up a fake Eureka service discovery server in order to make the LocalEGA Keyserver register | - -## Logging - -We also make use of ELK stack for logging thus the `elasticsearch-oss` `logstash-oss` and `kibana-oss` will be pulled from Docker hub. diff --git a/deployments/docker/images/base/Dockerfile b/deployments/docker/images/base/Dockerfile deleted file mode 100644 index 7c9f42fb..00000000 --- a/deployments/docker/images/base/Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -FROM centos:7.4.1708 -LABEL maintainer "NBIS System Developers" - -ARG DEV_PACKAGES= -RUN yum -y install https://centos7.iuscommunity.org/ius-release.rpm && \ - yum -y install epel-release && \ - yum -y update && \ - yum -y install git gcc make bzip2 curl \ - zlib-devel bzip2-devel unzip \ - openssh-server openssl \ - wget dpkg \ - pam-devel libcurl-devel jq-devel fuse fuse-libs cronie \ - python36u python36u-pip ${DEV_PACKAGES} - -RUN [[ -e /lib64/libpython3.6m.so ]] || ln -s /lib64/libpython3.6m.so.1.0 /lib64/libpython3.6m.so - -# Add Tini -ENV TINI_VERSION v0.18.0 -ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini -RUN chmod +x /tini - -ENV GOSU_VERSION 1.10 -ENV GPG_KEYS B42F6819007F00F88E364FD4036A9C25BF357DD4 -RUN set -ex && \ - dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" && \ - wget -O /usr/bin/gosu "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${dpkgArch}" && \ - wget -O /tmp/gosu.asc "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-${dpkgArch}.asc" - -# verify the signature -RUN export GNUPGHOME="$(mktemp -d)" && \ - (gpg --keyserver ha.pool.sks-keyservers.net --recv-keys "$GPG_KEYS" \ - || gpg --keyserver pgp.mit.edu --recv-keys "$GPG_KEYS" \ - || gpg --keyserver keyserver.pgp.com --recv-keys "$GPG_KEYS") && \ - gpg --keyserver hkps://hkps.pool.sks-keyservers.net --recv-keys && \ - gpg --batch --verify /tmp/gosu.asc /usr/bin/gosu && \ - rm -r "$GNUPGHOME" /tmp/gosu.asc && \ - chmod +x /usr/bin/gosu - -# verify that the binary works -RUN gosu nobody true && \ - yum -y remove dpkg - -RUN yum clean all && rm -rf /var/cache/yum - -RUN groupadd -r lega && useradd -M -r -g lega lega - -ARG checkout= -ARG PIP_EGA_PACKAGES= -RUN pip3.6 install --upgrade pip && \ - pip3.6 install PyYaml ${PIP_EGA_PACKAGES} - -RUN pip3.6 install git+https://github.com/NBISweden/LocalEGA.git@${checkout} - -RUN pip3.6 install git+https://github.com/NBISweden/LocalEGA-cryptor.git diff --git a/deployments/docker/images/cega/eureka.py b/deployments/docker/images/cega/eureka.py deleted file mode 100644 index 65eca050..00000000 --- a/deployments/docker/images/cega/eureka.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python3 - -'''\ -A fake Eureka server. - -Spinning the whole Spring Framework Netflix Eureka would take too long, -thus we are going to fake the responses. -''' - -import sys -import asyncio -from aiohttp import web - -import logging as LOG - - -routes = web.RouteTableDef() - -# Followjng the responses from https://github.com/Netflix/eureka/wiki/Eureka-REST-operations - - -@routes.post('/eureka/apps/{app_name}') -async def register(request): - """No matter the app it should register with success response 204.""" - return web.HTTPNoContent() - -@routes.delete('/eureka/apps/{app_name}/{instance_id}') -async def deregister(request): - """No matter the app it should deregister with success response 200.""" - return web.HTTPOk() - -@routes.put('/eureka/apps/{app_name}/{instance_id}') -async def heartbeat(request): - """No matter the app it should renew lease with success response 200.""" - return web.HTTPOk() - -async def init(app): - '''Initialization running before the loop.run_forever''' - LOG.info('Initializing') - -async def shutdown(app): - '''Function run after a KeyboardInterrupt. After that: cleanup''' - LOG.info('Shutting down the database engine') - -async def cleanup(app): - '''Function run after a KeyboardInterrupt. Right after, the loop is closed''' - LOG.info('Cancelling all pending tasks') - - -def main(args=None): - """Where the magic happens.""" - - host = sys.argv[1] if len(sys.argv) > 1 else "0.0.0.0" - port = 8761 - sslcontext = None - - loop = asyncio.get_event_loop() - eureka = web.Application(loop=loop) - eureka.router.add_routes(routes) - - # Registering some initialization and cleanup routines - LOG.info('Setting up callbacks') - eureka.on_startup.append(init) - eureka.on_shutdown.append(shutdown) - eureka.on_cleanup.append(cleanup) - - LOG.info(f"Start fake eureka on {host}:{port}") - web.run_app(eureka, host=host, port=port, shutdown_timeout=0, ssl_context=sslcontext) - - -if __name__ == '__main__': - main() diff --git a/deployments/docker/images/cega/server.py b/deployments/docker/images/cega/server.py deleted file mode 100644 index 5395de32..00000000 --- a/deployments/docker/images/cega/server.py +++ /dev/null @@ -1,110 +0,0 @@ -#!/usr/bin/env python3.6 -# -*- coding: utf-8 -*- - -''' -Test server to act as CentralEGA endpoint for users - -:author: Frédéric Haziza -:copyright: (c) 2017, NBIS System Developers. -''' - -import sys -import os -import asyncio -import ssl -import yaml -from pathlib import Path -from functools import wraps -from base64 import b64decode - -import logging as LOG - -from aiohttp import web -import jinja2 -import aiohttp_jinja2 - -instances = {} -for instance in os.environ.get('LEGA_INSTANCES','').strip().split(','): - instances[instance] = (Path(f'/cega/users/{instance}'), os.environ[f'CEGA_REST_{instance}_PASSWORD']) - -def protected(func): - @wraps(func) - def wrapped(request): - auth_header = request.headers.get('AUTHORIZATION') - if not auth_header: - raise web.HTTPUnauthorized(text=f'Protected access\n') - _, token = auth_header.split(None, 1) # Skipping the Basic keyword - instance,passwd = b64decode(token).decode().split(':', 1) - info = instances.get(instance) - if info is not None and info[1] == passwd: - request.match_info['lega'] = instance - request.match_info['users_dir'] = info[0] - return func(request) - raise web.HTTPUnauthorized(text=f'Protected access\n') - return wrapped - - -@aiohttp_jinja2.template('users.html') -async def index(request): - users={} - for instance, (users_dir, _) in instances.items(): - users[instance]= {} - files = [f for f in users_dir.iterdir() if f.is_file()] - for f in files: - with open(f, 'r') as stream: - users[instance][f.stem] = yaml.load(stream) - return { "cega_users": users } - -@protected -async def user(request): - name = request.match_info['id'] - lega_instance = request.match_info['lega'] - users_dir = request.match_info['users_dir'] - try: - with open(f'{users_dir}/{name}.yml', 'r') as stream: - d = yaml.load(stream) - json_data = { 'password_hash': d.get("password_hash",None), 'pubkey': d.get("pubkey",None), 'expiration': d.get("expiration",None) } - return web.json_response(json_data) - except OSError: - raise web.HTTPBadRequest(text=f'No info for that user {name} in LocalEGA {lega_instance}... yet\n') - -# Unprotected access -async def pgp_public_key(request): - name = request.match_info['id'] - try: - with open(f'/cega/users/pgp/{name}.pub', 'r') as stream: # 'rb' - return web.Response(text=stream.read()) # .hex() - except OSError: - raise web.HTTPBadRequest(text=f'No info about {name} in CentralEGA... yet\n') - -def main(): - - host = sys.argv[1] if len(sys.argv) > 1 else "0.0.0.0" - - # ssl_certfile = Path(CONF.get('keyserver', 'ssl_certfile')).expanduser() - # ssl_keyfile = Path(CONF.get('keyserver', 'ssl_keyfile')).expanduser() - # LOG.debug(f'Certfile: {ssl_certfile}') - # LOG.debug(f'Keyfile: {ssl_keyfile}') - - # sslcontext = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) - # sslcontext.check_hostname = False - # sslcontext.load_cert_chain(ssl_certfile, ssl_keyfile) - sslcontext = None - - loop = asyncio.get_event_loop() - server = web.Application(loop=loop) - - template_loader = jinja2.FileSystemLoader("/cega") - aiohttp_jinja2.setup(server, loader=template_loader) - - # Registering the routes - server.router.add_get( '/' , index, name='root') - server.router.add_get( '/user/{id}', user , name='user') - server.router.add_get( '/pgp/{id}' , pgp_public_key, name='pgp') - - # And ...... cue music! - web.run_app(server, host=host, port=80, shutdown_timeout=0, ssl_context=sslcontext) - -if __name__ == '__main__': - main() - diff --git a/deployments/docker/images/cega/users.html b/deployments/docker/images/cega/users.html deleted file mode 100644 index 51141526..00000000 --- a/deployments/docker/images/cega/users.html +++ /dev/null @@ -1,32 +0,0 @@ - - - - - Central EGA - - - -

Central EGA Users

- - {% for instance, lega_users in cega_users.items() %} -

{{ instance }}

-
- {% for username, data in lega_users.items() %} -
{{ username }}
-
password_hash{{ data['password_hash'] }}
-
pubkey{{ data['pubkey'] }}
-
expiration{{ data['expiration'] }}
- {% endfor %} -
- {% endfor %} - - - diff --git a/deployments/docker/images/inbox/Dockerfile b/deployments/docker/images/inbox/Dockerfile deleted file mode 100644 index 68ffd59f..00000000 --- a/deployments/docker/images/inbox/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM nbisweden/ega-base -LABEL maintainer "NBIS System Developers" - -EXPOSE 9000 -VOLUME /ega/inbox - -# Regenerate keys (no passphrase) -RUN ssh-keygen -t rsa -N '' -f /etc/ssh/ssh_host_rsa_key && \ - ssh-keygen -t dsa -N '' -f /etc/ssh/ssh_host_dsa_key && \ - ssh-keygen -t ecdsa -N '' -f /etc/ssh/ssh_host_ecdsa_key && \ - ssh-keygen -t ed25519 -N '' -f /etc/ssh/ssh_host_ed25519_key && \ - mkdir -p /usr/local/lib/ega && \ - echo '/usr/local/lib/ega' > /etc/ld.so.conf.d/ega.conf && \ - echo 'Welcome to Local EGA' > /ega/banner && \ - cp /etc/nsswitch.conf /etc/nsswitch.conf.bak && \ - sed -i -e 's/^passwd:\(.*\)files/passwd:\1files ega/' /etc/nsswitch.conf && \ - git clone https://github.com/NBISweden/LocalEGA-auth /root/ega-auth && \ - cd /root/ega-auth/src && \ - make install clean && \ - ldconfig -v - -COPY banner /ega/banner -COPY pam.ega /etc/pam.d/ega -COPY sshd_config /etc/ega/sshd_config -RUN cp /usr/sbin/sshd /usr/sbin/ega -# COPY entrypoint.sh /usr/local/bin/entrypoint.sh -# RUN chmod 755 /usr/local/bin/entrypoint.sh -# ENTRYPOINT ["entrypoint.sh"] diff --git a/deployments/docker/images/inbox/banner b/deployments/docker/images/inbox/banner deleted file mode 100644 index be26bc09..00000000 --- a/deployments/docker/images/inbox/banner +++ /dev/null @@ -1 +0,0 @@ -Welcome to Local EGA Demo instance diff --git a/deployments/docker/images/inbox/entrypoint.sh b/deployments/docker/images/inbox/entrypoint.sh deleted file mode 100755 index 55e735e0..00000000 --- a/deployments/docker/images/inbox/entrypoint.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -set -e - -# Some env must be defined -[[ -z "${DB_INSTANCE}" ]] && echo 'Environment DB_INSTANCE is empty' 1>&2 && exit 1 -[[ -z "${CEGA_ENDPOINT}" ]] && echo 'Environment CEGA_ENDPOINT is empty' 1>&2 && exit 1 -[[ -z "${CEGA_ENDPOINT_CREDS}" ]] && echo 'Environment CEGA_ENDPOINT_CREDS is empty' 1>&2 && exit 1 -[[ -z "${CEGA_ENDPOINT_JSON_PASSWD}" ]] && echo 'Environment CEGA_ENDPOINT_JSON_PASSWD is empty' 1>&2 && exit 1 -[[ -z "${CEGA_ENDPOINT_JSON_PUBKEY}" ]] && echo 'Environment CEGA_ENDPOINT_JSON_PUBKEY is empty' 1>&2 && exit 1 - -EGA_DB_IP=$(getent hosts ${DB_INSTANCE} | awk '{ print $1 }') -EGA_UID=$(id -u lega) -EGA_GID=$(id -g lega) - -# For the home directories -mkdir -p /lega -chmod 750 /lega - -cat > /etc/ega/auth.conf <> /etc/fstab -mount /ega/cache - -# Changing permissions -echo "Changing permissions for /ega/inbox" -chown lega:lega /ega/inbox -chmod 750 /ega/inbox -chmod g+s /ega/inbox # setgid bit - -# Start cronie -echo "Starting cron" -cat > /usr/local/bin/fuse_cleanup.sh </dev/null && rmdir \${mnt}; } || : -done -EOF -chmod 750 /usr/local/bin/fuse_cleanup.sh - -cat > /etc/crontab <&2 && exit 1 - -# Initialization -rabbitmq-plugins enable --offline rabbitmq_federation -rabbitmq-plugins enable --offline rabbitmq_federation_management -rabbitmq-plugins enable --offline rabbitmq_shovel -rabbitmq-plugins enable --offline rabbitmq_shovel_management - -{ -chown rabbitmq:rabbitmq /etc/rabbitmq/rabbitmq.config -chmod 640 /etc/rabbitmq/rabbitmq.config -chown rabbitmq:rabbitmq /etc/rabbitmq/defs.json -chmod 640 /etc/rabbitmq/defs.json -} || true - -# Problem of loading the plugins and definitions out-of-orders. -# Explanation: https://github.com/rabbitmq/rabbitmq-shovel/issues/13 -# Therefore: we run the server, with some default confs -# and then we upload the cega-definitions through the HTTP API - -# We cannot add those definitions to defs.json (loaded by the -# management plugin. See /etc/rabbitmq/rabbitmq.config) -# So we use curl afterwards, to upload the extras definitions -# See also https://pulse.mozilla.org/api/ - -# dest-exchange-key is not set for the shovel, so the key is re-used. - -# For the moment, still using guest:guest -cat > /etc/rabbitmq/defs-cega.json <&1 && exit 1 - - ROUND=30 - until rabbitmqadmin import /etc/rabbitmq/defs-cega.json || ((ROUND<0)) - do - sleep 1 - $((ROUND--)) - done - ((ROUND<0)) && echo "Central EGA connections *_not_* loaded" 2>&1 && exit 1 - echo "Central EGA connections loaded" -} & - -exec "$@" # ie CMD rabbitmq-server diff --git a/deployments/docker/images/mq/rabbitmq.config b/deployments/docker/images/mq/rabbitmq.config deleted file mode 100644 index e139ce88..00000000 --- a/deployments/docker/images/mq/rabbitmq.config +++ /dev/null @@ -1,15 +0,0 @@ -%% -*- mode: erlang -*- -%% -[{rabbit,[{loopback_users, [ ] }, - {tcp_listeners, [ 5672 ] }, - {ssl_listeners, [ ] }, - {hipe_compile, false }, - {default_vhost, "/"}, - {default_user, "guest"}, - {default_pass, "guest"}, - {default_permissions, [".*", ".*",".*"]}, - {default_user_tags, [administrator]}, - {disk_free_limit, "1GB"}]}, - {rabbitmq_management, [ { listener, [ { port, 15672 }, { ssl, false }] }, - { load_definitions, "/etc/rabbitmq/defs.json"} ]} -]. diff --git a/deployments/docker/test/.gitignore b/deployments/docker/test/.gitignore deleted file mode 100644 index 5015af38..00000000 --- a/deployments/docker/test/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.bam -*.c4ga -*.c4ga.md5 -*.md5 -mq.env diff --git a/deployments/docker/test/Makefile b/deployments/docker/test/Makefile deleted file mode 100644 index df1b8173..00000000 --- a/deployments/docker/test/Makefile +++ /dev/null @@ -1,57 +0,0 @@ -.PHONY: upload submit user - -# folder for the localegarepo -MAIN_REPO=~/LocalEGA - -# Dummy key -SSH_KEY_PUB=~/.ssh/lega.pub -SSH_KEY_PRIV=~/.ssh/lega - -USER=ega-box-999 -FILE=HG00458.unmapped.ILLUMINA.bwa.CHS.low_coverage.20130415.bam - -############################## - -DOCKER_PATH=$(MAIN_REPO)/deployments/docker -INSTANCE_PORT=$(shell awk -F= '/DOCKER_PORT_inbox/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) -PGP_PUB=$(DOCKER_PATH)/private/lega/pgp/ega.pub -PGP_EMAIL=$(shell awk -F= '/PGP_EMAIL/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) -CEGA_USERS=$(DOCKER_PATH)/private/cega/users -CEGA_MQ_CONNECTION=$(shell awk -F= '/^CEGA_CONNECTION/ {print $$2}' $(DOCKER_PATH)/private/lega/mq.env) - -############################## - -all: user upload submit - -# $(FILE): -# @echo 'Hello' > $(FILE) - -$(FILE).c4ga: $(FILE) - lega-cryptor encrypt --pk $(PGP_PUB) -i $< -o $@ - -# lega-cryptor encrypt -r Sweden -i $< -o $@ - -upload: $(FILE).c4ga - cd $( $@ - -$(FILE).md5: $(FILE) - printf '%s' $(shell md5 $< | cut -d' ' -f4) > $@ - -submit: $(FILE).c4ga $(FILE).c4ga.md5 $(FILE).md5 - @echo publish.py --connection amqp://[redacted]@$(lastword $(subst @, ,$(CEGA_MQ_CONNECTION))) $(USER) dir/$(FILE).c4ga --enc ... - @python $(MAIN_REPO)/extras/publish.py --connection $(subst cega-mq,localhost,$(CEGA_MQ_CONNECTION)) $(USER) $(FILE).c4ga --enc $(shell cat $(FILE).c4ga.md5) --stableID EGAF$(shell cat $(FILE).md5) - -user: $(CEGA_USERS)/lega/$(USER).yml - -$(CEGA_USERS)/lega/$(USER).yml: $(CEGA_USERS)/$(USER).yml - -cd $(CEGA_USERS)/lega && ln -s ../$(USER).yml . -$(CEGA_USERS)/$(USER).yml: - @echo --- > $@ - @echo "pubkey: $(shell cat $(SSH_KEY_PUB))" >> $@ - -clean: - -unlink $(CEGA_USERS)/lega/$(USER).yml - rm -rf $(FILE).c4ga $(FILE).c4ga.md5 $(FILE).md5 $(CEGA_USERS)/$(USER).yml diff --git a/deployments/docker/test/README.md b/deployments/docker/test/README.md deleted file mode 100644 index 93100efb..00000000 --- a/deployments/docker/test/README.md +++ /dev/null @@ -1,12 +0,0 @@ -## Testing script - -Testing script is used to replicate upload and submission functionalities from an end user. -Before using the script make sure there is a key `~/.ssh/lega.pub` and `~/.ssh/lega` or replace them with -your own in the `Makefile`. Also `MAIN_REPO=~/LocalEGA` should reflect the path do the LocalEGA project. - -Using the script: -``` -make user -make upload -make submit -``` From 50761d8b43ee09beb61081bbb29d403ec233803e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 14:09:07 +0200 Subject: [PATCH 03/26] async db was removed --- tests/test_db.py | 12 +-- tests/test_inbox.py | 217 -------------------------------------------- 2 files changed, 1 insertion(+), 228 deletions(-) delete mode 100644 tests/test_inbox.py diff --git a/tests/test_db.py b/tests/test_db.py index 21ea39dc..97ebeecb 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -1,6 +1,6 @@ import unittest from lega.utils.db import insert_file, get_errors, set_error, get_info, set_info -from lega.utils.db import set_status, Status, fetch_args, create_pool, connect +from lega.utils.db import set_status, Status, fetch_args, connect from unittest import mock import asyncio @@ -77,16 +77,6 @@ def test_set_status(self, mock_connect): set_status('file_id', Status.In_Progress) mock_connect().__enter__().cursor().__enter__().execute.assert_called() - @mock.patch('lega.utils.db.fetch_args') - @mock.patch('lega.utils.db.aiopg.create_pool') - def test_create_pool(self, mock_aiopg, mock_args): - """Create pool should call aipg and fetch args.""" - f = asyncio.Future() - f.set_result('whatever result you want') - mock_aiopg.return_value = f - self._loop.run_until_complete(create_pool(self._loop)) - mock_args.assert_called() - mock_aiopg.assert_called() def test_fetch_args(self): """Test fetching arguments.""" diff --git a/tests/test_inbox.py b/tests/test_inbox.py deleted file mode 100644 index a7a617c5..00000000 --- a/tests/test_inbox.py +++ /dev/null @@ -1,217 +0,0 @@ -import unittest -from lega.inbox import LegaFS, FuseOSError, parse_options, main -from unittest import mock -import types -from testfixtures import TempDirectory - - -class TestLegaFS(unittest.TestCase): - """LegaFS - - Testing LocalEGA FS.""" - - def setUp(self): - """Setting things up.""" - self._dir = TempDirectory() - self._fs = LegaFS("/root/is/this/", "user", self._dir.path) - self._path = self._dir.write('test.smth', 'test'.encode('utf-8')) - self._path = self._dir.write('test.md5', 'md5'.encode('utf-8')) - - def tearDown(self): - """Remove setup variables.""" - self._dir.cleanup_all() - - # Testing these is really optional, but good to do. - - @mock.patch('os.path.join') - def test_real_path(self, mocked): - """Test retrieve real path, should call os.path.join and return a path.""" - mocked.return_value = "/root/is/this/dir/to/use" - result = self._fs.real_path('/dir/to/use') - assert result == "/root/is/this/dir/to/use" - - @mock.patch('os.lstat') - def test_getattr(self, mocked): - """Test get file attributes, should call os.lstat with mock values.""" - mocked.return_value = mock.Mock(st_mode=33188, st_ino=14551755, st_dev=64768, st_nlink=1, - st_uid=90393, st_gid=101, st_size=4170, st_atime=1525082089, - st_mtime=1525081721, st_ctime=1525081724) - expected_result = {'st_uid': 90393, 'st_gid': 101, 'st_mode': 33188, 'st_size': 4170, 'st_nlink': 1, - 'st_atime': 1525082089, 'st_ctime': 1525081724, 'st_mtime': 1525081721} - result = self._fs.getattr('/dir/to/use') - assert result == expected_result - - @mock.patch('os.statvfs') - def test_statvfs(self, mocked): - """Test statvfs, should call os.statvfs with mock values.""" - mocked.return_value = mock.Mock(f_bsize=4096, f_frsize=4096, f_blocks=59241954, f_bfree=42909971, - f_bavail=39894880, f_files=15056896, f_ffree=13746982, f_favail=13746982, - f_flag=4096, f_namemax=255) - - expected_result = {'f_bavail': 39894880, 'f_bfree': 42909971, 'f_blocks': 59241954, 'f_bsize': 4096, - 'f_favail': 13746982, 'f_ffree': 13746982, 'f_files': 15056896, 'f_flag': 4096, - 'f_frsize': 4096, 'f_namemax': 255} - result = self._fs.statfs('/dir/to/use') - assert result == expected_result - - @mock.patch('os.access') - def test_no_access(self, mocked): - """Raise FuseOSError as user does not have access.""" - # if this was not tested the errno missing module - # would not have been spotted - mocked.return_value = False - with self.assertRaises(FuseOSError): - self._fs.access('/some/paht', 'rb') - - @mock.patch('os.fsync') - def test_fsync(self, mocked): - """Test LegaFS flush call, should call os.fsync.""" - self._fs.flush('/dir/to/use.txt', 'O_WRONLY') - self.assertTrue(mocked.called) - self._fs.fsync('/dir/to/use.txt', 'fdatasync', 'O_WRONLY') - self.assertTrue(mocked.called) - - @mock.patch('os.unlink') - def test_unlink(self, mocked): - """Test LegaFS unlink call, should call os.unlink.""" - self._fs.unlink('/dir/to/use.txt') - self.assertTrue(mocked.called) - - @mock.patch('os.rmdir') - def test_rmdir(self, mocked): - """Test LegaFS rmdir call, should call os.rmdir.""" - self._fs.rmdir('/dir/to/use') - self.assertTrue(mocked.called) - - @mock.patch('os.mkdir') - def test_mkdir(self, mocked): - """Test LegaFS mkdir call.""" - self._fs.mkdir('/dir/to/use', 0o755) - self.assertTrue(mocked.called) - - @mock.patch('os.chmod') - def test_chmod(self, mocked): - """Test LegaFS chmod call, should call os.chmod.""" - self._fs.chmod('/dir/to/use', 0o755) - self.assertTrue(mocked.called) - - @mock.patch('os.chown') - def test_chown(self, mocked): - """Test LegaFS chown call, should call os.chown.""" - self._fs.chown('/dir/to/use.txt', 1, 2) - self.assertTrue(mocked.called) - - @mock.patch('os.open') - def test_open(self, mocked): - """Test LegaFS open call.""" - self._fs.open('/dir/to/use.txt', 'O_WRONLY') - self.assertTrue(mocked.called) - - @mock.patch('os.open') - def test_create(self, mocked): - """Test LegaFS create call, should call os.open.""" - self._fs.create('/dir/to/use.txt', 'O_WRONLY') - self.assertTrue(mocked.called) - - @mock.patch('os.rename') - def test_rename(self, mocked): - """Test LegaFS rename call, should call os.rename.""" - self._fs.rename('/dir/to/old.txt', '/dir/new.txt') - self.assertTrue(mocked.called) - - @mock.patch('os.utime') - def test_utimens(self, mocked): - """Test LegaFS utime call, should call os.utime.""" - self._fs.utimens('/dir/to/file.txt') - self.assertTrue(mocked.called) - - @mock.patch('os.read') - @mock.patch('os.lseek') - def test_read(self, mocklseek, mockread): - """Test LegaFS read call, should call os.read and os.leek.""" - self._fs.read('/dir/to/use.txt', 100, 0, 'O_RDONLY') - self.assertTrue(mocklseek.called) - self.assertTrue(mockread.called) - - @mock.patch('os.write') - @mock.patch('os.lseek') - def test_write(self, mocklseek, mockwrite): - """Test LegaFS write call, should call os.write and os.leek.""" - self._fs.write('/dir/to/use.txt', 100, 0, 'O_WRONLY') - self.assertTrue(mocklseek.called) - self.assertTrue(mockwrite.called) - - @mock.patch('os.walk') - def test_readdir(self, mockwalk): - """Test LegaFS readdir, should return a generator.""" - result = self._fs.readdir('/dir/to/use', 1) - self.assertTrue(isinstance(result, types.GeneratorType)) - - def test_parse_options_missing(self): - """Test parse options, should exit because args missing.""" - with self.assertRaises(SystemExit): - parse_options() - - @mock.patch('argparse.ArgumentParser.add_argument') - @mock.patch('argparse.ArgumentParser.parse_args') - def test_parse_options(self, mock_parser, mock_add): - """Test parse options, call arg parsers.""" - parse_options() - mock_add.assert_called() - mock_parser.assert_called() - - def test_truncate(self): - """Test LegaFS truncate, should add path to pending.""" - self._fs.truncate('test.md5', 1) - self.assertEqual({'test.md5'}, self._fs.pending) - - def test_destroy(self): - """Test LegaFS destroy, should close connection to broker.""" - self._fs.connection = mock.Mock() - self._fs.destroy('/random/path') - self._fs.connection.close.assert_called() - - @mock.patch('lega.inbox.publish') - @mock.patch('lega.inbox.get_connection') - @mock.patch('os.close') - def test_release(self, mock_closed, mock_broker, mock_publish): - """Test LegaFS release, should send message and close file.""" - self._fs.pending.add('test.smth') - self._fs.release('test.smth', 1) - mock_closed.assert_called() - - @mock.patch('lega.inbox.publish') - @mock.patch('lega.inbox.get_connection') - def test_send_message(self, mock_broker, mock_publish): - """Sending message should try to publish info to broker.""" - self._fs.send_message('test.smth') - self._fs.send_message('test.md5') - mock_publish.assert_called() - - @mock.patch('lega.inbox.FUSE') - @mock.patch('os.chmod') - @mock.patch('os.makedirs') - @mock.patch('lega.inbox.parse_options') - def test_main(self, mock_options, mock_makedirs, mock_chmod, mock_fs): - """Testing main inbox withouth uid and gid should call FUSE.""" - mock_options.return_value = ('mount', True, {'user': '1'}) - mock_makedirs.return_value = '' - mock_chmod.return_value = mock.Mock() - mock_fs.return_value = mock.Mock() - main() - mock_fs.assert_called() - - @mock.patch('lega.inbox.FUSE') - @mock.patch('os.chown') - @mock.patch('os.chmod') - @mock.patch('os.makedirs') - @mock.patch('lega.inbox.parse_options') - def test_main_with_uid(self, mock_options, mock_makedirs, mock_chmod, mock_chown, mock_fs): - """Testing main inbox with uid and gid should call FUSE.""" - mock_options.return_value = ('mount', True, {'user': '1', 'uid': '1', 'gid': '1'}) - mock_makedirs.return_value = '' - mock_chown.return_value = mock.Mock() - mock_chmod.return_value = mock.Mock() - mock_fs.return_value = mock.Mock() - main() - mock_fs.assert_called() From 7d10766a63d017bdff2bb3d86c19f7937e29cb5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 14:12:44 +0200 Subject: [PATCH 04/26] Awaiting the sleep coroutine --- lega/utils/eureka.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lega/utils/eureka.py b/lega/utils/eureka.py index 23e81f9c..2342562d 100644 --- a/lega/utils/eureka.py +++ b/lega/utils/eureka.py @@ -39,7 +39,7 @@ async def _retry(run, on_failure=None): asyncio.TimeoutError) as e: LOG.debug(f"Eureka connection error: {e!r}") LOG.debug(f"Retrying in {backoff} seconds") - asyncio.sleep(backoff) + await asyncio.sleep(backoff) count += 1 backoff = (2 ** (count // 10)) * try_interval From c6dcbf3fe5c18d45dea63bda84e5c09b917d8d36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 14:59:43 +0200 Subject: [PATCH 05/26] Trying to fix the eureka tests --- lega/conf/__init__.py | 11 ++--- lega/conf/defaults.ini | 3 +- lega/utils/eureka.py | 92 +++++++++++++++++------------------------- 3 files changed, 46 insertions(+), 60 deletions(-) diff --git a/lega/conf/__init__.py b/lega/conf/__init__.py index ceb1d1d2..a35d2170 100644 --- a/lega/conf/__init__.py +++ b/lega/conf/__init__.py @@ -137,15 +137,16 @@ def get_value(self, section, option, conv=str, default=None, raw=False): ``section`` and ``option`` are mandatory while ``conv``, ``default`` (fallback) and ``raw`` are optional. """ - result = os.environ.get('_'.join([section.upper(), option.upper()]), None) - if result: + result = os.environ.get(f'{section.upper()}_{option.upper()}', None) + if result is not None: # it might be empty return self._convert(result, conv) - elif result is None and self.has_option(section, option): - return self._convert(self.get(section, option, fallback=default, raw=raw), conv) + #if self.has_option(section, option): + return self._convert(self.get(section, option, fallback=default, raw=raw), conv) def _convert(self, value, conv): """Convert value properly to ``str``, ``float`` or ``int``, also consider ``bool`` type.""" if conv == bool: + assert value, "Can not convert an empty value" val = value.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return True @@ -154,7 +155,7 @@ def _convert(self, value, conv): else: raise ValueError(f"Invalid truth value: {val}") else: - return conv(value) + return conv(value) # raise error in case we can't convert an empty value CONF = Configuration() diff --git a/lega/conf/defaults.ini b/lega/conf/defaults.ini index c5fa5a5e..2c6fb71e 100644 --- a/lega/conf/defaults.ini +++ b/lega/conf/defaults.ini @@ -65,4 +65,5 @@ status_endpoint = /health [eureka] endpoint = http://localhost:8761 # in seconds -interval = 20 +# try_interval = 20 +# try = 5 diff --git a/lega/utils/eureka.py b/lega/utils/eureka.py index 2342562d..01bf5d91 100644 --- a/lega/utils/eureka.py +++ b/lega/utils/eureka.py @@ -11,6 +11,7 @@ import uuid from functools import wraps +from lega.conf import CONF eureka_status = { 0: 'UP', @@ -23,55 +24,38 @@ LOG = logging.getLogger(__name__) -async def _retry(run, on_failure=None): - # similar to the rety loop from db.py - """Main retry loop.""" - nb_try = 5 - try_interval = 20 - LOG.debug(f"{nb_try} attempts (every {try_interval} seconds)") - count = 0 - backoff = try_interval - while count < nb_try: - try: - return await run() - except (aiohttp.ClientResponseError, - aiohttp.ClientError, - asyncio.TimeoutError) as e: - LOG.debug(f"Eureka connection error: {e!r}") - LOG.debug(f"Retrying in {backoff} seconds") - await asyncio.sleep(backoff) - count += 1 - backoff = (2 ** (count // 10)) * try_interval - - # fail to connect - if nb_try: - LOG.debug(f"Eureka server connection fail after {nb_try} attempts ...") - else: - LOG.debug("Eureka server attempts was set to 0 ...") - - if on_failure: - on_failure() - - -def retry_loop(on_failure=None): +def retry_loop(func): """Decorator retry something ``try`` times every ``try_interval`` seconds.""" - def decorator(func): - if asyncio.iscoroutinefunction(func): - @wraps(func) - async def wrapper(*args, **kwargs): - async def _process(): - return await func(*args, **kwargs) - return await _retry(_process, on_failure=on_failure) - return wrapper - return decorator - - -def _do_exit(): - LOG.error("Could not connect to the Eureka.") - pass - # We don't fail right away as we expect the keysever to continue - # Under "normal deployment" this should exit ? - # sys.exit(1) + assert asyncio.iscoroutinefunction(func), "This decorator is only for coroutines" + @wraps(func) + async def wrapper(*args, **kwargs): + """Main retry loop.""" + # similar to the rety loop from db.py + nb_try = CONF.get_value('eureka', 'try', conv=int, default=1) + try_interval = CONF.get_value('eureka', 'try_interval', conv=int, default=1) + LOG.debug(f"{nb_try} attempts (initial backoff: {try_interval} seconds)") + count = 0 + backoff = try_interval + while count < nb_try: + LOG.debug(f"Attempt {count} [backoff: {backoff} seconds]") + try: + return await func(*args, **kwargs) + except (aiohttp.ClientResponseError, + aiohttp.ClientError, + asyncio.TimeoutError) as e: + LOG.debug(f"Eureka connection error: {e!r}") + LOG.debug(f"Retrying in {backoff} seconds") + await asyncio.sleep(backoff) + count += 1 + backoff = (2 ** (count // 10)) * try_interval + + # fail to connect + if nb_try: + LOG.debug(f"Eureka server connection fail after {nb_try} attempts ...") + else: + LOG.debug("Eureka server attempts was set to 0 ...") + # sys.exit(2) + return wrapper class EurekaRequests: @@ -92,7 +76,7 @@ def __init__(self, eureka_url='http://localhost:8761', loop=None): 'Content-Type': 'application/json', } - @retry_loop(on_failure=_do_exit) + @retry_loop async def out_of_service(self, app_name, instance_id): """Take an instance out of service.""" url = f'{self._eureka_url}/apps/{app_name}/{instance_id}/status?value={eureka_status[3]}' @@ -137,7 +121,7 @@ async def get_by_svip(self, svip_address): url = f'{self._eureka_url}/vips/{svip_address}' return await self._get_request(url) - @retry_loop(on_failure=_do_exit) + @retry_loop async def _get_request(self, url): """General GET request, to simplify things. Expect always JSON as headers set.""" async with aiohttp.ClientSession(headers=self._headers) as session: @@ -167,7 +151,7 @@ def __init__(self, app_name, port, ip_addr, hostname, self._health_check_url = health_check_url if health_check_url else _default_health self._status_check_url = status_check_url if status_check_url else self._health_check_url - @retry_loop(on_failure=_do_exit) + @retry_loop async def register(self, metadata=None, lease_duration=60, lease_renewal_interval=20): """Register application with Eureka.""" payload = { @@ -206,7 +190,7 @@ async def register(self, metadata=None, lease_duration=60, lease_renewal_interva return resp.status LOG.debug('Eureka register response %s' % resp.status) - @retry_loop(on_failure=_do_exit) + @retry_loop async def renew(self): """Renew the application's lease.""" url = f'{self._eureka_url}/apps/{self._app_name}/{self._instance_id}' @@ -216,7 +200,7 @@ async def renew(self): return resp.status LOG.debug('Eureka renew response %s' % resp.status) - @retry_loop(on_failure=_do_exit) + @retry_loop async def deregister(self): """Deregister with the remote server, to avoid 500 eror.""" url = f'{self._eureka_url}/apps/{self._app_name}/{self._instance_id}' @@ -226,7 +210,7 @@ async def deregister(self): return resp.status LOG.debug('Eureka deregister response %s' % resp.status) - @retry_loop(on_failure=_do_exit) + @retry_loop async def update_metadata(self, key, value): """Update metadata of application.""" url = f'{self._eureka_url}/apps/{self._app_name}/{self._instance_id}/metadata?{key}={value}' From 79fb52a0878c37a8b3725f9c4150977dbb44142e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 15:20:44 +0200 Subject: [PATCH 06/26] Cleaning --- docker/images/Makefile | 2 +- docker/images/base/Dockerfile | 4 ++-- docker/images/inbox/Dockerfile | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/images/Makefile b/docker/images/Makefile index 27468ac2..1096bc3a 100644 --- a/docker/images/Makefile +++ b/docker/images/Makefile @@ -8,7 +8,7 @@ BUILD_ARGS=--build-arg checkout=$(CHECKOUT) .PHONY: all erase delete clean cleanall base inbox -all: base lega inbox +all: base inbox base: PIP_EGA_PACKAGES=pika==0.12.0 psycopg2-binary==2.7.5 cryptography==2.3 aiohttp==3.0.7 aiohttp-jinja2==0.13.0 pgpy boto3 aiohttp-jinja2==0.13.0 inbox: BUILD_ARGS+=--build-arg AUTH_BRANCH=cega-ids diff --git a/docker/images/base/Dockerfile b/docker/images/base/Dockerfile index 8f25499f..041f2034 100644 --- a/docker/images/base/Dockerfile +++ b/docker/images/base/Dockerfile @@ -35,8 +35,8 @@ RUN gosu nobody true && \ RUN yum clean all && rm -rf /var/cache/yum -RUN groupadd -r lega -# useradd -M -r -g lega lega +RUN groupadd -r lega && \ + useradd -M -r -g lega lega ARG PIP_EGA_PACKAGES= RUN pip3.6 install --upgrade pip && \ diff --git a/docker/images/inbox/Dockerfile b/docker/images/inbox/Dockerfile index 275fb048..8466d39b 100644 --- a/docker/images/inbox/Dockerfile +++ b/docker/images/inbox/Dockerfile @@ -38,8 +38,6 @@ RUN mkdir -p /usr/local/lib/ega && \ git checkout ${AUTH_BRANCH} && \ make install clean -RUN ldconfig -v - ################################################# ## ## Install OpenSSH from sources @@ -98,6 +96,8 @@ RUN rm -rf /root/openssh && \ ################################################# +RUN ldconfig -v + COPY banner /ega/banner COPY pam.ega /etc/pam.d/ega COPY sshd_config /etc/ega/sshd_config From 864ae730ec2ec2a23ceda33fd0577a65c8121a13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 15:47:58 +0200 Subject: [PATCH 07/26] Fixing requirements in docker images --- docker/images/Makefile | 16 ++++++++++++++-- docker/images/base/Dockerfile | 35 +++++++++++++++++++++++++++++------ docker/test/Makefile | 4 ++-- 3 files changed, 45 insertions(+), 10 deletions(-) diff --git a/docker/images/Makefile b/docker/images/Makefile index 1096bc3a..c95ffeb5 100644 --- a/docker/images/Makefile +++ b/docker/images/Makefile @@ -1,8 +1,20 @@ +# Add those packages to the containers, in case DEV is defined +ifdef DEV +DEV_PACKAGES=nss-tools nc nmap tcpdump lsof strace bash-completion bash-completion-extras +endif + CHECKOUT=$(shell git rev-parse --abbrev-ref HEAD) TAG=$(shell git rev-parse --short HEAD) -TARGET_PREFIX=nbisweden/ega +ifdef TRAVIS_COMMIT +TAG=$(TRAVIS_COMMIT) +CHECKOUT=$(TAG) +endif +ifdef TRAVIS_PULL_REQUEST_SHA +TAG=$(TRAVIS_PULL_REQUEST_SHA) +endif +TARGET_PREFIX=nbisweden/ega BUILD_ARGS=--build-arg checkout=$(CHECKOUT) @@ -10,7 +22,7 @@ BUILD_ARGS=--build-arg checkout=$(CHECKOUT) all: base inbox -base: PIP_EGA_PACKAGES=pika==0.12.0 psycopg2-binary==2.7.5 cryptography==2.3 aiohttp==3.0.7 aiohttp-jinja2==0.13.0 pgpy boto3 aiohttp-jinja2==0.13.0 +base: BUILD_ARGS+=--build-arg DEV_PACKAGES="$(DEV_PACKAGES)" inbox: BUILD_ARGS+=--build-arg AUTH_BRANCH=cega-ids base inbox: docker build ${BUILD_ARGS} \ diff --git a/docker/images/base/Dockerfile b/docker/images/base/Dockerfile index 041f2034..60208217 100644 --- a/docker/images/base/Dockerfile +++ b/docker/images/base/Dockerfile @@ -1,10 +1,17 @@ FROM centos:7.4.1708 LABEL maintainer "NBIS System Developers" +################################################# +## +## Upgrade CentOS 7, and install Python 3.6 +## +################################################# + +ARG DEV_PACKAGES= RUN yum -y install https://centos7.iuscommunity.org/ius-release.rpm && \ yum -y install epel-release && \ yum -y update && \ - yum -y install git gcc make bzip2 curl \ + yum -y install git gcc make bzip2 curl ${DEV_PACKAGES} \ zlib-devel bzip2-devel unzip \ wget dpkg \ openssl \ @@ -12,6 +19,15 @@ RUN yum -y install https://centos7.iuscommunity.org/ius-release.rpm && \ RUN [[ -e /lib64/libpython3.6m.so ]] || ln -s /lib64/libpython3.6m.so.1.0 /lib64/libpython3.6m.so +RUN pip3.6 install --upgrade pip && \ + pip3.6 install PyYaml + +################################################# +## +## Install GOSU +## +################################################# + ENV GOSU_VERSION 1.10 ENV GPG_KEYS B42F6819007F00F88E364FD4036A9C25BF357DD4 RUN set -ex && \ @@ -33,17 +49,24 @@ RUN export GNUPGHOME="$(mktemp -d)" && \ RUN gosu nobody true && \ yum -y remove dpkg +################################################# +# Clean up + RUN yum clean all && rm -rf /var/cache/yum +################################################# +## +## Install LocalEGA stuff +## +################################################# + RUN groupadd -r lega && \ useradd -M -r -g lega lega -ARG PIP_EGA_PACKAGES= -RUN pip3.6 install --upgrade pip && \ - pip3.6 install PyYaml ${PIP_EGA_PACKAGES} +ARG checkout=dev +RUN pip3.6 install -r https://raw.githubusercontent.com/NBISweden/LocalEGA/${checkout}/requirements.txt RUN pip3.6 install git+https://github.com/NBISweden/LocalEGA.git@${checkout} +RUN pip3.6 install -r https://raw.githubusercontent.com/NBISweden/LocalEGA-cryptor/master/requirements.txt RUN pip3.6 install git+https://github.com/NBISweden/LocalEGA-cryptor.git - - diff --git a/docker/test/Makefile b/docker/test/Makefile index be5b81b8..2311c58c 100644 --- a/docker/test/Makefile +++ b/docker/test/Makefile @@ -1,7 +1,7 @@ .PHONY: upload submit user # folder for the localegarepo -MAIN_REPO=~/_ega +MAIN_REPO=~/LocalEGA # Dummy key SSH_KEY_PUB=~/.ssh/lega.pub @@ -13,7 +13,7 @@ FILE=HG00458.unmapped.ILLUMINA.bwa.CHS.low_coverage.20130415.bam ############################## -DOCKER_PATH=$(MAIN_REPO)/deploy +DOCKER_PATH=$(MAIN_REPO)/docker INSTANCE_PORT=$(shell awk -F= '/DOCKER_PORT_inbox/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) PGP_PUB=$(DOCKER_PATH)/private/lega/pgp/ega.pub PGP_EMAIL=$(shell awk -F= '/PGP_EMAIL/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) From 91d9885f437a18ae13847e10dd734805174b399a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 16:22:11 +0200 Subject: [PATCH 08/26] Updating the docs --- README.md | 2 +- docs/conf.py | 2 +- docs/inbox.rst | 196 ++++++++++++++++++++++++------------------------- 3 files changed, 100 insertions(+), 100 deletions(-) diff --git a/README.md b/README.md index c9f1ef67..df633ed6 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ The [code](lega) is written in Python (3.6+). You can provision and deploy the different components: -* locally, using [docker-compose](deployments/docker). +* locally, using [docker-compose](docker). * on an OpenStack cluster, using [terraform](https://github.com/NBISweden/LocalEGA-deploy-terraform). * on a Kubernetes/OpenShift cluster, using [kubernetes](https://github.com/NBISweden/LocalEGA-deploy-k8s) * on a Docker Swarm cluster, using [Gradle](https://github.com/NBISweden/LocalEGA-deploy-swarm) diff --git a/docs/conf.py b/docs/conf.py index f58e6ac3..35877c17 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -22,7 +22,7 @@ def __getattr__(cls, name): # Some modules need to be mocked -MOCK_MODULES = ['fuse', 'yaml', 'pika', 'aiohttp', 'asyncio', 'psycopg2', 'aiopg'] +MOCK_MODULES = ['yaml', 'pika', 'aiohttp', 'asyncio', 'psycopg2'] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # If your documentation needs a minimal Sphinx version, state it here. diff --git a/docs/inbox.rst b/docs/inbox.rst index 7e00bff4..8d289deb 100644 --- a/docs/inbox.rst +++ b/docs/inbox.rst @@ -8,7 +8,7 @@ Central EGA contains a database of users, with IDs and passwords. We have developed two solutions both of them allowing user authentication via either a password or an RSA key against CentralEGA database itself: -* :ref:`nss-pam-inbox` also known as FUSE based inbox; +* :ref:`openssh-inbox`; * :ref:`apache-mina-inbox`. Each solution uses CentralEGA's user IDs but can also be extended to @@ -26,26 +26,34 @@ expire. The cache has a default TTL of one hour, and is wiped clean upon reboot (as a cache should). -.. _nss-pam-inbox: +.. _openssh-inbox: -NSS+PAM Inbox +OpenSSH Inbox ------------- -The user's home directory is created when its credentials are retrieved -from CentralEGA. Moreover, for each user, we use FUSE mountpoint and -``chroot`` the user into it. The FUSE application is in charge of -detecting when the file upload is completed and computing its -checksum. This information is provided to CentralEGA via a -:doc:`shovel mechanism on the local message broker `. +We use the OpenSSH SFTP server (version 7.7p1), on a Linux +distribution (currently CentOS7). + +Authentication is performed by the Operating System, using the classic +plugable mechanism (PAM), and username resolution module (called NSS). + +The user's home directory is created when its credentials are +retrieved from CentralEGA. Moreover, we isolate each user in its +respective home directory (i.e. we ``chroot`` the user into it). + +We installed a hook inside the OpenSSH SFTP server to detect when a +file is (re)uploaded. The hook runs a checksum on the uploaded file +and notifies CentralEGA via a :doc:`shovel mechanism on the local +message broker `. Configuration ^^^^^^^^^^^^^ -The NSS and PAM modules look at ``/etc/ega/auth.conf``. +The NSS and PAM modules are configured by the file ``/etc/ega/auth.conf``. Some configuration parameters can be specified, while others have default values in case they are not specified. Some of the parameters must be -specified (mostly those for which we can invent a value!). +specified (mostly those for which we can't invent a value!). A sample configuration file can be found on the `LocalEGA-auth repository @@ -54,58 +62,64 @@ eg: .. code-block:: none - ################## - # Central EGA - # - # The username will be appended to the endpoint - # eg the endpoint for 'john' will be - # http://cega_users/user/john - # - # Note: Change the cega_creds ! - # - ################## - - enable_cega = yes - cega_endpoint = http://cega_users/user/ + ########################################## + # Remote database settings (using ReST) + ########################################## + + # The username will be appended to the endpoints + cega_endpoint_name = http://cega_users/user/ + cega_endpoint_uid = http://cega_users/id/ cega_creds = user:password - cega_json_passwd = .password - cega_json_pubkey = .public_key - - ################## - # NSS & PAM - ################## - - cache_ttl = 36000.0 # Float in seconds... Here 10 hours - prompt = Knock Knock: - cache_dir = /ega/cache - ega_gecos = EGA User - ega_shell = /sbin/nologin - - ega_uid = 1000 - ega_gid = 1000 - + + # Selects where the JSON object is rooted + # Use a dotted format à la JQ, eg level1.level2.level3 + # Default: empty + cega_json_prefix = + + ########################################## + # Local database settings (for NSS & PAM) + ########################################## + + # Absolute path to the SQLite database. + # Required setting. No default value. + db_path = /run/ega-users.db + + # Sets how long a cache entry is valid, in seconds. + # Default: 3600 (ie 1h). + # cache_ttl = 86400 + + # Per site configuration, to shift the users id range + # Default: 10000 + #ega_uid_shift = 1000 + + # The group to which all users belong. + # For the moment, only only. + # Required setting. No default. + ega_gid = 997 + + # This causes the PAM sessions to be chrooted into the user's home directory. + # Useful for SFTP connections, but complicated for regular ssh + # connections (since no proper environment exists there). + # Default: false + chroot_sessions = yes + + # Per site configuration, where the home directories are located + # The user's name will be appended. + # Required setting. No default. ega_dir = /ega/inbox ega_dir_attrs = 2750 # rwxr-s--- - - ################## - # FUSE mount - ################## - ega_fuse_exec = /usr/bin/ega-inbox - ega_fuse_flags = nodev,noexec,suid,default_permissions,allow_other,uid=1000,gid=1000 - - - -We use the following default values if the option is not specified in -the configuration file. - -.. code-block:: bash - - cache_ttl = 3600.0 // 1 hour - enable_cega = "yes" - cache_dir = "/ega/cache" - prompt = "Please, enter your EGA password: " - ega_gecos = "EGA User" - ega_shell = "/sbin/nologin" + + # sets the umask for each session (in octal format) + # Default: 027 # world-denied + #ega_dir_umask = 027 + + # When the password is asked + # Default: "Please, enter your EGA password: " + #prompt = Knock Knock: + + # The user's login shell. + # Default: /bin/bash + #ega_shell = /bin/aspshell-r .. note:: After proper configuration, there is no user maintenance, it is @@ -120,29 +134,25 @@ the configuration file. Implementation ^^^^^^^^^^^^^^ -The cache directory can be mounted as a ``ramfs`` partition of size -200M. We use a directory per user, containing files for the user's -password hash, ssh key and last access record. Files and directories -in the cache are stored in memory, not on disk, giving us an extra -performance boost. A ``ramfs`` partition does not survive a reboot, grow -dynamically and does not use the swap partition (as a ``tmpfs`` partition -would). By default such option is disabled but can be enabled in the `inbox` -entrypoint script. - -We use OpenSSH (version 7.5p1) and its ``sftp`` component. The NSS+PAM -source code has its own `repository +The cache is a SQLite database, mounted in a ``ramfs`` partition (of +initial size 200M). A ``ramfs`` partition does not survive a reboot, +grows dynamically and does not use the swap partition (as a ``tmpfs`` +partition would). By default such option is disabled but can be +enabled in the `inbox` entrypoint script. + +The NSS+PAM source code has its own `repository `_. A makefile is provided to compile and install the necessary shared libraries. -We copied the ``/sbin/sshd`` into an ``/sbin/ega`` binary and +We copied the ``sshd`` into an ``/opt/openshh/sbin/ega`` binary and configured the *ega* service by adding a file into the ``/etc/pam.d`` directory. In this case, the name of the file is ``/etc/pam.d/ega``. -.. literalinclude:: /../deployments/docker/images/inbox/pam.ega +.. literalinclude:: /../docker/images/inbox/pam.ega -The *ega* service is configured just like ``sshd`` is. We only use the -``-c`` switch to specify where the configuration file is. The service -runs for the moment on port 9000. +The *ega* service is configured using the ``-c`` switch to specify +where the configuration file is. The service runs for the moment on +port 9000. Note that when PAM is configured as above, and a user is either not found, or its authentication fails, the access to the service is @@ -159,27 +169,17 @@ algorithm. LocalEGA also supports the usual ``md5``, ``sha256`` and part of the C library). Updating a user password is not allowed (ie therefore the ``password`` -*type* is configure to deny every access). +*type* is configured to deny every access). -The ``session`` *type* handles the FUSE mount and chrooting. +The ``session`` *type* handles the chrooting. The ``account`` *type* of the PAM module is a pass-through. It -succeeds. - -"Refreshing" the last access time is done by the ``setcred`` -service. The latter is usually called before a session is open, and -after a session is closed. Since we are in a chrooted environment when -the session closes, ``setcred`` is bound to fail. However, it -succeeded on the original login, and it will again on the subsequent -logins. That way, if a user logs in again, within a cache TTL delay, -we do not re-query the CentralEGA database. After the TTL has elapsed, -we do query anew the CentralEGA database, eventually receiving new -credentials for that user. - -Note that it is unlikely that a user will keep logging in and out, -while its password and/or ssh key have been reset. If so, we can -implement a flush mechanism, given to CentralEGA, if necessary (not -complicated, and ... not a priority). +succeeds. It also "refreshes" the cache information is case it has +expired. This cache expiration mechanism will capture the situation +where the user's credentials have been reset. If the user stays logged +in and idle, the ssh session will expire. If the user is not idle, +then it is the same behaviour as if the user account was created +locally (ie. in /etc/passwd and /etc/shadow). .. _apache-mina-inbox: @@ -191,11 +191,11 @@ This solution makes use of `Apache Mina SSHD project `_. -The user's home directory is created when its credentials upon successful login. +The user's home directory is created upon successful login. Moreover, for each user, we detect when the file upload is completed and compute its checksum. This information is provided to CentralEGA via a :doc:`shovel mechanism on the local message broker `. -We can configure default cache TTL via ``CACHE_TTL`` env var. +We can configure default cache TTL via ``CACHE_TTL`` environment variable. Configuration ^^^^^^^^^^^^^ @@ -228,7 +228,7 @@ Implementation ^^^^^^^^^^^^^^ As mentioned above, the implementation is based on Java library Apache Mina SSHD. It provides a scalable and high -performance asynchronous IO API to support the SSH (and SFPT) protocols on both the client and server side. +performance asynchronous IO API to support the SSH (and SFTP) protocols. Sources are located at the separate repo: https://github.com/NBISweden/LocalEGA-inbox -Basically, it's a Spring-based Maven project, integrated to a common LocalEGA MQ bus. +Essentially, it's a Spring-based Maven project, integrated to a common LocalEGA MQ bus. From 29ef88fc5059b140dcdf857d634d1d58fc741ed8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 16:32:18 +0200 Subject: [PATCH 09/26] Make ingest+verify scalable --- docker/README.md | 7 +++---- docker/bootstrap/lega.sh | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docker/README.md b/docker/README.md index 61685e81..23ac4064 100644 --- a/docker/README.md +++ b/docker/README.md @@ -20,15 +20,14 @@ These networks are reflected in their corresponding YML files * `private/cega.yml` * `private/lega.yml` -The passwords are in `private//.trace` and the errors (if -any) are in `private/.err`. +The passwords are in `private/lega/.trace` and the errors (if any) are in `private/.err`. ## Running docker-compose up -d -Use `docker-compose up -d --scale ingest=3` instead, if you want to -start 3 ingestion workers. +Use `docker-compose up -d --scale ingest=3 --scale verify=5` instead, +if you want to start 3 ingestion and 5 verification workers. Note that, in this architecture, we use separate volumes, e.g. for the inbox area, for the vault (here backed by S3). They diff --git a/docker/bootstrap/lega.sh b/docker/bootstrap/lega.sh index 98649dc0..d56fd6f7 100755 --- a/docker/bootstrap/lega.sh +++ b/docker/bootstrap/lega.sh @@ -191,7 +191,7 @@ services: - db - mq image: nbisweden/ega-base - container_name: ingest + #container_name: ingest environment: - S3_ACCESS_KEY=${S3_ACCESS_KEY} - S3_SECRET_KEY=${S3_SECRET_KEY} @@ -240,7 +240,7 @@ services: - mq - keys hostname: verify - container_name: verify + #container_name: verify image: nbisweden/ega-base environment: - LEGA_PASSWORD=${LEGA_PASSWORD} From 9d53b8a165aea7e541696fe0ba0e38e90175bf19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 18:17:07 +0200 Subject: [PATCH 10/26] Eureka interval renaming to match database naming --- lega/keyserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lega/keyserver.py b/lega/keyserver.py index 4bc44f7f..8d85f911 100644 --- a/lega/keyserver.py +++ b/lega/keyserver.py @@ -259,7 +259,7 @@ def main(args=None): # Adding the keystore to the server keyserver['store'] = KeysConfiguration(args) - keyserver['interval'] = CONF.get_value('eureka', 'interval', conv=int) + keyserver['interval'] = CONF.get_value('eureka', 'try_interval', conv=int, default=20) keyserver['eureka'] = EurekaClient("keyserver", port=port, ip_addr=host, eureka_url=eureka_endpoint, hostname=host, health_check_url=health_check_url, From 5f3f6e9c744870b0d38beb40f1c81e091496a593 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Mon, 20 Aug 2018 22:22:32 +0200 Subject: [PATCH 11/26] Unnecessary switch removed --- docker/bootstrap/boot.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/docker/bootstrap/boot.sh b/docker/bootstrap/boot.sh index f13ba49e..cec1936b 100755 --- a/docker/bootstrap/boot.sh +++ b/docker/bootstrap/boot.sh @@ -12,13 +12,11 @@ EXTRAS=${HERE}/../../../extras VERBOSE=no FORCE=yes OPENSSL=openssl -INBOX=fuse function usage { echo "Usage: $0 [options]" echo -e "\nOptions are:" echo -e "\t--openssl \tPath to the Openssl executable [Default: ${OPENSSL}]" - echo -e "\t--inbox \tSelect inbox \"fuse\" or \"mina\" [Default: ${DEFAULT_INBOX}]" echo "" echo -e "\t--verbose, -v \tShow verbose output" echo -e "\t--polite, -p \tDo not force the re-creation of the subfolders. Ask instead" @@ -34,7 +32,6 @@ while [[ $# -gt 0 ]]; do --help|-h) usage; exit 0;; --verbose|-v) VERBOSE=yes;; --polite|-p) FORCE=no;; - --inbox) INBOX=$2; shift;; --openssl) OPENSSL=$2; shift;; --) shift; break;; *) echo "$0: error - unrecognized option $1" 1>&2; usage; exit 1;; esac From 1de5d9e3dae556267d10bed7a3ab54120a621413 Mon Sep 17 00:00:00 2001 From: Stefan Negru Date: Tue, 21 Aug 2018 07:41:57 +0300 Subject: [PATCH 12/26] fix for unit tests to fit new wrapper --- lega/utils/eureka.py | 5 +++-- tests/test_eureka.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lega/utils/eureka.py b/lega/utils/eureka.py index 01bf5d91..9606e1f7 100644 --- a/lega/utils/eureka.py +++ b/lega/utils/eureka.py @@ -25,11 +25,12 @@ def retry_loop(func): - """Decorator retry something ``try`` times every ``try_interval`` seconds.""" + """Retry connection for ``try`` times every ``try_interval`` seconds.""" assert asyncio.iscoroutinefunction(func), "This decorator is only for coroutines" + @wraps(func) async def wrapper(*args, **kwargs): - """Main retry loop.""" + """Retry loop.""" # similar to the rety loop from db.py nb_try = CONF.get_value('eureka', 'try', conv=int, default=1) try_interval = CONF.get_value('eureka', 'try_interval', conv=int, default=1) diff --git a/tests/test_eureka.py b/tests/test_eureka.py index 7c4830f8..7d0be99c 100644 --- a/tests/test_eureka.py +++ b/tests/test_eureka.py @@ -132,4 +132,4 @@ def test_generate_instance_id(self): def test_connection_error(self, mock_logger): """Assert connection error.""" self._loop.run_until_complete(self._eurekaclient.update_metadata('test', 'value')) - mock_logger.error.assert_called_with("Could not connect to the Eureka.") + mock_logger.debug.assert_called_with("Eureka server connection fail after 1 attempts ...") From e4ac6b2c1176f08de4c8e7574b169639e04796fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Tue, 21 Aug 2018 08:23:18 +0200 Subject: [PATCH 13/26] Renaming inbox script into notifications --- docker/images/inbox/entrypoint.sh | 2 +- lega/{inbox.py => notifications.py} | 0 setup.py | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename lega/{inbox.py => notifications.py} (100%) diff --git a/docker/images/inbox/entrypoint.sh b/docker/images/inbox/entrypoint.sh index 34922182..b79daecf 100755 --- a/docker/images/inbox/entrypoint.sh +++ b/docker/images/inbox/entrypoint.sh @@ -42,7 +42,7 @@ chmod 750 /ega/inbox chmod g+s /ega/inbox # setgid bit echo "Starting the FileSystem listener" -gosu lega ega-inbox & +gosu lega ega-notifications & echo "Starting the SFTP server" exec /opt/openssh/sbin/ega -D -e -f /etc/ega/sshd_config diff --git a/lega/inbox.py b/lega/notifications.py similarity index 100% rename from lega/inbox.py rename to lega/notifications.py diff --git a/setup.py b/setup.py index 379aaf94..9c611e98 100644 --- a/setup.py +++ b/setup.py @@ -23,9 +23,9 @@ entry_points={ 'console_scripts': [ 'ega-ingest = lega.ingest:main', - 'ega-inbox = lega.inbox:main', 'ega-verify = lega.verify:main', 'ega-keyserver = lega.keyserver:main', + 'ega-notifications = lega.notifications:main', 'ega-conf = lega.conf.__main__:main', ] }, From 7dfb12837d49513c0d3733408feaa61ce8903538 Mon Sep 17 00:00:00 2001 From: Stefan Negru Date: Tue, 21 Aug 2018 15:11:51 +0300 Subject: [PATCH 14/26] test notifications --- lega/notifications.py | 8 ++++---- requirements.txt | 2 +- tests/test_notifications.py | 35 +++++++++++++++++++++++++++++++++++ tests/test_storage.py | 28 +++++++++++++++++++++++++++- 4 files changed, 67 insertions(+), 6 deletions(-) create mode 100644 tests/test_notifications.py diff --git a/lega/notifications.py b/lega/notifications.py index 787b343f..79294b46 100644 --- a/lega/notifications.py +++ b/lega/notifications.py @@ -11,8 +11,8 @@ import sys import logging import os -import re -import socket +# import re +# import socket import asyncio import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) @@ -48,7 +48,7 @@ def connection_made(self, transport): # Buffering can concatenate multiple messages, especially if they arrive too quickly # We tried to use TCP_NODELAY (to turn off the socket buffering on the sender's side) # but that didn't help. Therefore we use an out-of-band method: - # We separate messages with a '|' character + # We separate messages with a '$' character def parse(self, data): while True: if data.count(b'$') < 2: @@ -111,7 +111,7 @@ def main(args=None): loop.set_debug(True) broker = get_connection('broker') server = loop.run_until_complete(loop.create_server(lambda: Forwarder(broker), host, port)) - + # Serve requests until Ctrl+C is pressed LOG.info('Serving on %s', host) try: diff --git a/requirements.txt b/requirements.txt index ea7425b6..061a292d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ pika==0.12.0 colorama==0.3.7 -aiohttp==3.0.7 +aiohttp==3.3.2 aiohttp-jinja2==0.13.0 sphinx_rtd_theme cryptography==2.3.1 diff --git a/tests/test_notifications.py b/tests/test_notifications.py new file mode 100644 index 00000000..6eb67ca1 --- /dev/null +++ b/tests/test_notifications.py @@ -0,0 +1,35 @@ +import unittest +from lega.notifications import Forwarder +from unittest import mock + + +class testForwarder(unittest.TestCase): + """Ingest + + Testing ingestion functionalities.""" + + def setUp(self): + """Initialise fixtures.""" + mock_broker = mock.MagicMock(name='channel') + mock_broker.channel.return_value = mock.Mock() + self._forwarder = Forwarder(mock_broker) + + @mock.patch('lega.notifications.LOG') + def test_connection_made(self, mock_logger): + """Test connection to socket remote address.""" + mock_transport = mock.MagicMock() + mock_transport.get_extra_info.return_value = "127.0.0.1" + self._forwarder.connection_made(mock_transport) + mock_logger.debug.assert_called_with('Connection from 127.0.0.1') + + def test_data_single_file_parsed(self): + """Test parsed data.""" + for u, f in self._forwarder.parse(b'user$file.name$'): + self.assertEqual(u, "user") + self.assertEqual(f, "file.name") + + def test_data_multiple_file_parsed(self): + """Test parsed data.""" + for u, f in self._forwarder.parse(b'john$/dir/subdir/fileA.txt$john$/dir/subdir/fileB.txt$john$/dir/subdir/fileC.txt$'): + self.assertEqual(u, "john") + self.assertIn(f, ["/dir/subdir/fileA.txt", "/dir/subdir/fileB.txt", "/dir/subdir/fileC.txt"]) diff --git a/tests/test_storage.py b/tests/test_storage.py index 4bf77754..1c668384 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -3,7 +3,7 @@ from test.support import EnvironmentVarGuard from testfixtures import TempDirectory import os -from io import UnsupportedOperation +from io import UnsupportedOperation, BufferedReader from unittest import mock import boto3 @@ -38,6 +38,12 @@ def test_copy(self): result = self._store.copy(open(path, 'rb'), path1) self.assertEqual(os.stat(path1).st_size, result) + def test_open(self): + """Test open file.""" + path = self._dir.write('test.file', 'data1'.encode('utf-8')) + with self._store.open(path) as resource: + self.assertEqual(BufferedReader, type(resource)) + class TestS3Storage(unittest.TestCase): """S3Storage @@ -46,6 +52,7 @@ class TestS3Storage(unittest.TestCase): def setUp(self): """Initialise fixtures.""" + self._dir = TempDirectory() self.env = EnvironmentVarGuard() self.env.set('VAULT_URL', 'http://localhost:5000') self.env.set('VAULT_REGION', 'lega') @@ -58,6 +65,7 @@ def tearDown(self): self.env.unset('VAULT_REGION') self.env.unset('S3_ACCESS_KEY') self.env.unset('S3_SECRET_KEY') + self._dir.cleanup_all() @mock.patch.object(boto3, 'client') def test_init_s3storage(self, mock_boto): @@ -73,6 +81,24 @@ def test_init_location(self, mock_boto): self.assertEqual('file_id', result) mock_boto.assert_called() + @mock.patch.object(boto3, 'client') + def test_upload(self, mock_boto): + """Test copy to S3, should call boto3 client.""" + path = self._dir.write('test.file', 'data1'.encode('utf-8')) + storage = S3Storage() + storage.copy(path, 'lega') + mock_boto.assert_called_with('s3', aws_access_key_id='test', aws_secret_access_key='test', + endpoint_url='http://localhost:5000', region_name='lega', + use_ssl=False, verify=False) + + @mock.patch.object(boto3, 'client') + def test_open(self, mock_boto): + """Test open , should call S3FileReader.""" + path = self._dir.write('test.file', 'data1'.encode('utf-8')) + storage = S3Storage() + with storage.open(path) as resource: + self.assertEqual(S3FileReader, type(resource)) + class TestS3FileReader(unittest.TestCase): """S3FileReader From 77142584eaa7f17ff85983ce83048fbdf304c731 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Tue, 21 Aug 2018 15:51:14 +0200 Subject: [PATCH 15/26] Colorama not used anymore --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 061a292d..32e260e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ pika==0.12.0 -colorama==0.3.7 aiohttp==3.3.2 aiohttp-jinja2==0.13.0 sphinx_rtd_theme From 52981ce5b3f33aeee7f48f15d4018a9e6eefecbf Mon Sep 17 00:00:00 2001 From: Stefan Negru Date: Tue, 21 Aug 2018 17:50:23 +0300 Subject: [PATCH 16/26] new unit tests --- lega/notifications.py | 8 +++---- tests/test_notifications.py | 38 ++++++++++++++++++++++++++---- tests/test_storage.py | 47 ++++++++++++++++++++++++++++++++++--- 3 files changed, 80 insertions(+), 13 deletions(-) diff --git a/lega/notifications.py b/lega/notifications.py index 79294b46..faf3a72e 100644 --- a/lega/notifications.py +++ b/lega/notifications.py @@ -11,8 +11,6 @@ import sys import logging import os -# import re -# import socket import asyncio import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) @@ -78,7 +76,7 @@ def send_message(self, username, filename): filepath = os.path.join(inbox, filename.lstrip('/')) else: filepath = filename - filename = filename[len(inbox):] # there is surelt better + filename = filename[len(inbox):] # there is surelt better LOG.debug("Filepath %s", filepath) msg = { 'user': username, 'filepath': filename } if filename.endswith(supported_algorithms()): @@ -105,10 +103,9 @@ def main(args=None): if not args: args = sys.argv[1:] - CONF.setup(args) # re-conf + CONF.setup(args) # re-conf loop = asyncio.get_event_loop() - loop.set_debug(True) broker = get_connection('broker') server = loop.run_until_complete(loop.create_server(lambda: Forwarder(broker), host, port)) @@ -127,5 +124,6 @@ def main(args=None): loop.run_until_complete(server.wait_closed()) loop.close() + if __name__ == '__main__': main() diff --git a/tests/test_notifications.py b/tests/test_notifications.py index 6eb67ca1..f5072553 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -1,12 +1,13 @@ import unittest -from lega.notifications import Forwarder +from lega.notifications import Forwarder, main from unittest import mock +import socket class testForwarder(unittest.TestCase): - """Ingest + """Notifications - Testing ingestion functionalities.""" + Testing Notifications functionalities.""" def setUp(self): """Initialise fixtures.""" @@ -22,14 +23,41 @@ def test_connection_made(self, mock_logger): self._forwarder.connection_made(mock_transport) mock_logger.debug.assert_called_with('Connection from 127.0.0.1') + @mock.patch('lega.notifications.LOG') + def test_connection_close(self, mock_logger): + self._forwarder.transport = mock.Mock() + self._forwarder.connection_lost('') + self._forwarder.transport.close.assert_called() + def test_data_single_file_parsed(self): - """Test parsed data.""" + """Test parsed data single file.""" for u, f in self._forwarder.parse(b'user$file.name$'): self.assertEqual(u, "user") self.assertEqual(f, "file.name") def test_data_multiple_file_parsed(self): - """Test parsed data.""" + """Test parsed data, multiple files.""" for u, f in self._forwarder.parse(b'john$/dir/subdir/fileA.txt$john$/dir/subdir/fileB.txt$john$/dir/subdir/fileC.txt$'): self.assertEqual(u, "john") self.assertIn(f, ["/dir/subdir/fileA.txt", "/dir/subdir/fileB.txt", "/dir/subdir/fileC.txt"]) + + def test_received_data(self): + """Test received data send message.""" + self._forwarder.send_message = mock.Mock() + self._forwarder.data_received(b'user$file.name$') + self._forwarder.send_message.assert_called_with('user', 'file.name') + + @mock.patch('os.stat') + @mock.patch('lega.notifications.publish') + @mock.patch('lega.notifications.calculate') + def test_send_message(self, mock_calculate, mock_publish, mock_stat): + """Test received data error.""" + mock_stat.size.return_value = 1 + self._forwarder.send_message('user', 'file.name') + mock_publish.assert_called() + + # @mock.patch('lega.notifications.asyncio') + # def test_main(self, mock_async): + # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + # s.connect(("127.0.0.1", 8888)) + # s.close() diff --git a/tests/test_storage.py b/tests/test_storage.py index 1c668384..c9271533 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -6,6 +6,7 @@ from io import UnsupportedOperation, BufferedReader from unittest import mock import boto3 +import botocore.response as br class TestFileStorage(unittest.TestCase): @@ -107,9 +108,10 @@ class TestS3FileReader(unittest.TestCase): def setUp(self): """Initialise fixtures.""" - s3 = mock.MagicMock(name='head_object') - s3.head_object.return_value = {'ContentLength': 32} - self._reader = S3FileReader(s3, 'lega', '/path', 'rb', 10) + self._s3 = mock.MagicMock(name='head_object') + self._s3.head_object.return_value = {'ContentLength': 32} + self._s3.get_object.return_value = mock.MagicMock() + self._reader = S3FileReader(self._s3, 'lega', '/path', 'rb', 10) def test_tell(self): """Test tell, should return the proper loc result.""" @@ -152,6 +154,45 @@ def test_read_error(self): with self.assertRaises(ValueError): self._reader.read() + def test_read(self): + """Test end of file.""" + self._reader.closed = False + self._reader.loc = self._reader.size = 1 + self.assertEqual(b'', self._reader.read()) + + def test_read_length(self): + """Test read file length.""" + self._reader.closed = False + self._reader._fetch = mock.MagicMock() + self._reader.loc = 1 + self._reader.size = 10 + with self._reader.read(-2): + self._reader._fetch.assert_called() + + def test_read1(self): + """Test read1.""" + self._reader.read = mock.Mock() + self._reader.read1() + self._reader.read.assert_called() + + def test_readinto(self): + """Test readinto.""" + self._reader.read = mock.MagicMock() + data = [] + self.assertEqual(0, self._reader.readinto(data)) + + def test_readinto1(self): + """Test readinto1.""" + self._reader.readinto = mock.Mock() + self._reader.readinto1([]) + self._reader.readinto.assert_called() + + def test_fetch(self): + """Test fetch.""" + self._reader.size = 10 + self._reader._fetch(1, 9, max_attempts=1) + self._s3.get_object.assert_called() + def test_close(self): """Testing close of the file reader.""" self._reader.close() From 0fb4145470efce8cf1988d1a6d84086561e3d550 Mon Sep 17 00:00:00 2001 From: Stefan Negru Date: Tue, 21 Aug 2018 18:09:58 +0300 Subject: [PATCH 17/26] multi part message --- tests/test_notifications.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/tests/test_notifications.py b/tests/test_notifications.py index f5072553..92f8ae06 100644 --- a/tests/test_notifications.py +++ b/tests/test_notifications.py @@ -1,7 +1,6 @@ import unittest -from lega.notifications import Forwarder, main +from lega.notifications import Forwarder from unittest import mock -import socket class testForwarder(unittest.TestCase): @@ -47,6 +46,17 @@ def test_received_data(self): self._forwarder.data_received(b'user$file.name$') self._forwarder.send_message.assert_called_with('user', 'file.name') + def test_received_data_2(self): + """Test received data send message.""" + self._forwarder.send_message = mock.Mock() + self._forwarder.data_received(b'user$file.name$user$/to') + self._forwarder.send_message.assert_called_with('user', 'file.name') + self._forwarder.data_received(b'to4.txt$us') + self._forwarder.send_message.assert_called_with('user', '/toto4.txt') + self._forwarder.data_received(b'er$test.fi') + self._forwarder.data_received(b'le$') + self._forwarder.send_message.assert_called_with('user', 'test.file') + @mock.patch('os.stat') @mock.patch('lega.notifications.publish') @mock.patch('lega.notifications.calculate') @@ -55,9 +65,3 @@ def test_send_message(self, mock_calculate, mock_publish, mock_stat): mock_stat.size.return_value = 1 self._forwarder.send_message('user', 'file.name') mock_publish.assert_called() - - # @mock.patch('lega.notifications.asyncio') - # def test_main(self, mock_async): - # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - # s.connect(("127.0.0.1", 8888)) - # s.close() From d5aafcd2bbdd216e607893ad43292e4c5ce0c6e1 Mon Sep 17 00:00:00 2001 From: Stefan Negru Date: Tue, 21 Aug 2018 18:16:22 +0300 Subject: [PATCH 18/26] update docs --- docker/tests/README.md | 10 +++++----- docs/bootstrap.rst | 5 ++--- docs/setup.rst | 4 ++-- docs/tests.rst | 14 +++++++------- 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/docker/tests/README.md b/docker/tests/README.md index a8d04001..2f36361d 100644 --- a/docker/tests/README.md +++ b/docker/tests/README.md @@ -41,7 +41,7 @@ Next step is about mapping Gherkin scenarios to executable code. Currently we us Given("^I am a user \"([^\"]*)\"$", (String user) -> this.user = user); Given("^I have a private key$", - () -> privateKey = new File(Paths.get("").toAbsolutePath().getParent().toString() + String.format("deployments/docker/bootstrap/private/cega/users/%s.sec", user))); + () -> privateKey = new File(Paths.get("").toAbsolutePath().getParent().toString() + String.format("docker/bootstrap/private/cega/users/%s.sec", user))); When("^I connect to the LocalEGA inbox via SFTP using private key$", () -> { try { @@ -87,7 +87,7 @@ Test-suite is executed using Maven: `mvn clean test` from within the `tests` fol ``` Feature: Uploading As a user I want to be able to upload files to the LocalEGA inbox - + Scenario: Upload files to the LocalEGA inbox # src/test/resources/cucumber/features/uploading.feature:4 Given I am a user "john" # Definitions.java:55 And I have a private key # Definitions.java:57 @@ -116,9 +116,9 @@ Flow -------------------- Behavior-driven development is a software development methodology which essentially states that for each feature of software, a software developer must: - - define a scenarios set for the feature first; - - make the scenarios fail; - - then implement the feature; + - define a scenarios set for the feature first; + - make the scenarios fail; + - then implement the feature; - finally verify that the implementation of the feature makes the scenarios succeed. So *ideally* one should always contribute new functionality along with a correspondent implemented test-case. diff --git a/docs/bootstrap.rst b/docs/bootstrap.rst index f0b7bb20..2a9132f6 100644 --- a/docs/bootstrap.rst +++ b/docs/bootstrap.rst @@ -20,6 +20,5 @@ like Docker Swarm, Kubernetes, Openstack or a local-machine. .. note:: More on that coming... -.. _Docker: https://github.com/NBISweden/LocalEGA/tree/dev/deployments/docker -.. _OpenStack cloud: https://github.com/NBISweden/LocalEGA/tree/dev/deployments/terraform - +.. _Docker: https://github.com/NBISweden/LocalEGA/tree/dev/docker +.. _OpenStack cloud: https://github.com/NBISweden/LocalEGA-deploy-terraform diff --git a/docs/setup.rst b/docs/setup.rst index 29e4dd59..2863c2a4 100644 --- a/docs/setup.rst +++ b/docs/setup.rst @@ -83,5 +83,5 @@ interface. .. _NBIS Github repo: https://github.com/NBISweden/LocalEGA -.. _Docker: https://github.com/NBISweden/LocalEGA/tree/dev/deployments/docker -.. _OpenStack cloud: https://github.com/NBISweden/LocalEGA/tree/dev/deployments/terraform +.. _Docker: https://github.com/NBISweden/LocalEGA/tree/dev/docker +.. _OpenStack cloud: https://github.com/NBISweden/LocalEGA-deploy-terraform diff --git a/docs/tests.rst b/docs/tests.rst index 61dc3762..c21e2615 100644 --- a/docs/tests.rst +++ b/docs/tests.rst @@ -29,14 +29,14 @@ Integration tests are more involved and simulate how a user will use the system. Therefore, we have develop a `bootstrap script `_ to kickstart the system, and we execute a set of scenarii in it. `The implementation -`_ +`_ is in Java, and we target a docker-based environment. We have grouped the integration around 2 targets: *Common tests* and *Robustness tests*. .. code-block:: console - $ cd [git-repo]/deployments/docker/tests + $ cd [git-repo]/docker/tests $ mvn test -Dtest=CommonTests -B $ mvn test -Dtest=RobustnessTests -B @@ -45,21 +45,21 @@ Scenarii Here follow the different scenarii we currently test, using a Gherkin-style description. -.. literalinclude:: /../deployments/docker/tests/src/test/resources/cucumber/features/authentication.feature +.. literalinclude:: /../docker/tests/src/test/resources/cucumber/features/authentication.feature :language: gherkin :lines: 1-20 -.. literalinclude:: /../deployments/docker/tests/src/test/resources/cucumber/features/ingestion.feature +.. literalinclude:: /../docker/tests/src/test/resources/cucumber/features/ingestion.feature :language: gherkin :lines: 1-25,38- -.. literalinclude:: /../deployments/docker/tests/src/test/resources/cucumber/features/uploading.feature +.. literalinclude:: /../docker/tests/src/test/resources/cucumber/features/uploading.feature :language: gherkin .. - .. literalinclude:: /../deployments/docker/tests/src/test/resources/cucumber/features/checksums.feature + .. literalinclude:: /../docker/tests/src/test/resources/cucumber/features/checksums.feature :language: gherkin -.. literalinclude:: /../deployments/docker/tests/src/test/resources/cucumber/features/robustness.feature +.. literalinclude:: /../docker/tests/src/test/resources/cucumber/features/robustness.feature :language: gherkin :lines: 1-15 From 21f90ed7d6988c7addeb3d5cc8b335564f654e49 Mon Sep 17 00:00:00 2001 From: Stefan Negru Date: Tue, 21 Aug 2018 18:17:15 +0300 Subject: [PATCH 19/26] update travis folders --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index fd812562..f34efc54 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,7 @@ jobs: script: tox - stage: integration tests before_script: - - cd deployments/docker + - cd docker - make bootstrap ARGS="--inbox mina" - sudo chown -R travis private - docker network create cega @@ -34,7 +34,7 @@ jobs: - mvn test -Dtest=CommonTests -B - stage: integration tests before_script: - - cd deployments/docker + - cd docker - make bootstrap ARGS="--inbox mina" - sudo chown -R travis private - docker network create cega From 004f24b81b17f4db7295b2f238a6de0f253e7fa3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Tue, 21 Aug 2018 18:04:28 +0200 Subject: [PATCH 20/26] Travis update --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index fd812562..3b9eede5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ jobs: - stage: integration tests before_script: - cd deployments/docker - - make bootstrap ARGS="--inbox mina" + - make bootstrap - sudo chown -R travis private - docker network create cega - make up @@ -35,7 +35,7 @@ jobs: - stage: integration tests before_script: - cd deployments/docker - - make bootstrap ARGS="--inbox mina" + - make bootstrap - sudo chown -R travis private - docker network create cega - make up From 93e2e9a0bb3ef3ee7ace9fe2f7216fd8b8f8e162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Wed, 22 Aug 2018 12:20:44 +0200 Subject: [PATCH 21/26] Adding the --inbox switch back in, temporarily --- .travis.yml | 4 ++-- docker/bootstrap/boot.sh | 3 +++ docker/bootstrap/lega.sh | 33 ++++++++++++++++++++++++--------- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0db903a9..6ede004f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ jobs: - stage: integration tests before_script: - cd docker - - make bootstrap + - make bootstrap ARGS='--inbox mina' - sudo chown -R travis private - docker network create cega - make up @@ -35,7 +35,7 @@ jobs: - stage: integration tests before_script: - cd docker - - make bootstrap + - make bootstrap ARGS='--inbox mina' - sudo chown -R travis private - docker network create cega - make up diff --git a/docker/bootstrap/boot.sh b/docker/bootstrap/boot.sh index cec1936b..4c932e36 100755 --- a/docker/bootstrap/boot.sh +++ b/docker/bootstrap/boot.sh @@ -12,11 +12,13 @@ EXTRAS=${HERE}/../../../extras VERBOSE=no FORCE=yes OPENSSL=openssl +INBOX=openssh function usage { echo "Usage: $0 [options]" echo -e "\nOptions are:" echo -e "\t--openssl \tPath to the Openssl executable [Default: ${OPENSSL}]" + echo -e "\t--inbox \tSelect inbox \"openssh\" or \"mina\" [Default: ${INBOX}]" echo "" echo -e "\t--verbose, -v \tShow verbose output" echo -e "\t--polite, -p \tDo not force the re-creation of the subfolders. Ask instead" @@ -33,6 +35,7 @@ while [[ $# -gt 0 ]]; do --verbose|-v) VERBOSE=yes;; --polite|-p) FORCE=no;; --openssl) OPENSSL=$2; shift;; + --inbox) INBOX=$2; shift;; --) shift; break;; *) echo "$0: error - unrecognized option $1" 1>&2; usage; exit 1;; esac shift diff --git a/docker/bootstrap/lega.sh b/docker/bootstrap/lega.sh index d56fd6f7..26b7f7d1 100755 --- a/docker/bootstrap/lega.sh +++ b/docker/bootstrap/lega.sh @@ -84,8 +84,6 @@ try = 30 endpoint = http://cega-eureka:8761 EOF -# echomsg "\t* SFTP Inbox port" - echomsg "\t* db.sql" # Running in container cat /tmp/db.sql >> ${PRIVATE}/lega/db.sql @@ -164,27 +162,44 @@ services: # SFTP inbox inbox: hostname: ega-inbox - #depends_on: - # - mq + depends_on: + - mq # Required external link external_links: - cega-users:cega-users + container_name: inbox + restart: on-failure:3 + networks: + - lega + - cega +EOF +if [[ $INBOX == 'mina' ]]; then +cat >> ${PRIVATE}/lega.yml <> ${PRIVATE}/lega.yml <> ${PRIVATE}/lega.yml < Date: Wed, 22 Aug 2018 12:35:20 +0200 Subject: [PATCH 22/26] Trying to _not_ install the python dependencies on the travis host --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index 6ede004f..5e814d12 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,8 @@ language: python python: 3.6 +install: + - echo "Dependencies on Travis? No, thanks" services: docker From 05f01931e835010162eca7ad153adde47d3772bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Wed, 22 Aug 2018 12:44:34 +0200 Subject: [PATCH 23/26] Adding one more stage --- .travis.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 5e814d12..e80c3701 100644 --- a/.travis.yml +++ b/.travis.yml @@ -46,7 +46,16 @@ jobs: - sleep 10 - cd tests - mvn test -Dtest=RobustnessTests -B - + - stage: integration tests + before_script: + - cd docker + - make bootstrap + - sudo chown -R travis private + - make up + - make ps + script: + - sleep 5 + - make -C test notifications: email: false slack: From 7223a6b86a578958a7107c4d7eee43136fb88bd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Wed, 22 Aug 2018 12:49:44 +0200 Subject: [PATCH 24/26] Fair enough, I thought make up would create the cega network but it did not. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index e80c3701..bf0d2f7f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -51,6 +51,7 @@ jobs: - cd docker - make bootstrap - sudo chown -R travis private + - docker network create cega - make up - make ps script: From 34e5064078108c49814fa70b95fccd829c3e5ba6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Wed, 22 Aug 2018 12:52:47 +0200 Subject: [PATCH 25/26] Locking postgres version to 9.6 and not "latest" --- docker/bootstrap/lega.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/bootstrap/lega.sh b/docker/bootstrap/lega.sh index 26b7f7d1..96b6f918 100755 --- a/docker/bootstrap/lega.sh +++ b/docker/bootstrap/lega.sh @@ -151,7 +151,7 @@ services: - PGDATA=/ega/data hostname: db container_name: db - image: postgres:latest + image: postgres:9.6 volumes: - db:/ega/data - ./lega/db.sql:/docker-entrypoint-initdb.d/ega.sql:ro From 399751c4ffde9872f07f023a412463f1d92b5b83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Haziza?= Date: Wed, 22 Aug 2018 13:03:14 +0200 Subject: [PATCH 26/26] Makefile update. No need to update MAIN_REPO Using dd for file creation. FILESIZE is a variable. Adding a target to check the MQ messages, for successful ingestion --- .travis.yml | 14 +- docker/test/.gitignore | 1 + docker/test/Makefile | 73 +- docker/test/README.md | 23 +- docker/test/rabbitmqadmin | 1050 +++++++++++++++++ docker/tests/pom.xml | 5 +- .../src/test/resources/config.properties | 2 +- extras/publish.py | 3 +- lega/notifications.py | 9 +- 9 files changed, 1140 insertions(+), 40 deletions(-) create mode 100755 docker/test/rabbitmqadmin diff --git a/.travis.yml b/.travis.yml index bf0d2f7f..eaf70ca7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,10 @@ python: 3.6 install: - echo "Dependencies on Travis? No, thanks" +git: + depth: false + quiet: true + services: docker # command to install dependencies @@ -52,11 +56,17 @@ jobs: - make bootstrap - sudo chown -R travis private - docker network create cega - - make up - - make ps + - docker-compose up -d --scale ingest=3 --scale verify=5 + - docker-compose ps script: - sleep 5 + - pip install -r https://raw.githubusercontent.com/NBISweden/LocalEGA-cryptor/master/requirements.txt + - pip install git+https://github.com/NBISweden/LocalEGA-cryptor.git + - pip install pika - make -C test + - sleep 10 + - make -C test check + notifications: email: false slack: diff --git a/docker/test/.gitignore b/docker/test/.gitignore index 5015af38..6979bfe6 100644 --- a/docker/test/.gitignore +++ b/docker/test/.gitignore @@ -3,3 +3,4 @@ *.c4ga.md5 *.md5 mq.env +file.txt diff --git a/docker/test/Makefile b/docker/test/Makefile index 2311c58c..60e251bd 100644 --- a/docker/test/Makefile +++ b/docker/test/Makefile @@ -1,63 +1,90 @@ -.PHONY: upload submit user +SHELL := /bin/bash -# folder for the localegarepo -MAIN_REPO=~/LocalEGA +.PHONY: upload submit user check -# Dummy key -SSH_KEY_PUB=~/.ssh/lega.pub -SSH_KEY_PRIV=~/.ssh/lega +HERE:=$(dir $(realpath $(lastword $(MAKEFILE_LIST)))) +MAIN_REPO:=$(realpath $(HERE)/../..) USER=ega-box-999 USER_ID=100 -FILE=HG00458.unmapped.ILLUMINA.bwa.CHS.low_coverage.20130415.bam +FILE=file.txt +FILESIZE=10 # in MB + +get_md5 = \ + if which md5 &> /dev/null; then \ + md5 $(1) | cut -d' ' -f4 > $(2); \ + else \ + md5sum $(1) | cut -d' ' -f1 > $(2); \ + fi ############################## DOCKER_PATH=$(MAIN_REPO)/docker +CEGA_USERS=$(DOCKER_PATH)/private/cega/users + INSTANCE_PORT=$(shell awk -F= '/DOCKER_PORT_inbox/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) PGP_PUB=$(DOCKER_PATH)/private/lega/pgp/ega.pub PGP_EMAIL=$(shell awk -F= '/PGP_EMAIL/ {print $$2}' $(DOCKER_PATH)/bootstrap/settings.rc) -CEGA_USERS=$(DOCKER_PATH)/private/cega/users -CEGA_MQ_CONNECTION=$(shell awk -F= '/^CEGA_CONNECTION/ {print $$2}' $(DOCKER_PATH)/private/lega/mq.env) +CEGA_MQ_CONNECTION=$(subst cega-mq,localhost,$(shell awk -F= '/^CEGA_CONNECTION/ {print $$2}' $(DOCKER_PATH)/private/lega/mq.env)) + +SSH_KEY=$(CEGA_USERS)/$(USER).sshkey +SSH_PASS="" # no passphrase ############################## all: user upload submit -# $(FILE): -# @echo 'Hello' > $(FILE) +$(FILE): + dd if=/dev/zero of=$@ count=$(FILESIZE) bs=1048576 $(FILE).c4ga: $(FILE) lega-cryptor encrypt --pk $(PGP_PUB) -i $< -o $@ -# lega-cryptor encrypt -r Sweden -i $< -o $@ +upload: $(FILE).c4ga $(SSH_KEY) user + sftp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -P $(INSTANCE_PORT) -i $(SSH_KEY) $(USER)@localhost <<< $$"put $<" -upload: $(FILE).c4ga - cd $( $@ - -$(FILE).md5: $(FILE) - printf '%s' $(shell md5 $< | cut -d' ' -f4) > $@ + $(call get_md5,$<,$@) submit: $(FILE).c4ga $(FILE).c4ga.md5 $(FILE).md5 - @echo publish.py --connection amqp://[redacted]@$(lastword $(subst @, ,$(CEGA_MQ_CONNECTION))) $(USER) dir/$(FILE).c4ga --enc ... - @python $(MAIN_REPO)/extras/publish.py --connection $(subst cega-mq,localhost,$(CEGA_MQ_CONNECTION)) $(USER) $(FILE).c4ga --enc $(shell cat $(FILE).c4ga.md5) --stableID EGAF$(shell cat $(FILE).md5) + python $(MAIN_REPO)/extras/publish.py --connection $(CEGA_MQ_CONNECTION) $(USER) $(FILE).c4ga --enc $(shell cat $(FILE).c4ga.md5) --stableID EGAF$(shell cat $(FILE).md5) user: $(CEGA_USERS)/lega/$(USER).yml $(CEGA_USERS)/lega/$(USER).yml: $(CEGA_USERS)/$(USER).yml -cd $(CEGA_USERS)/lega && ln -s ../$(USER).yml . -cd $(CEGA_USERS)/lega_ids && ln -s ../$(USER).yml $(USER_ID).yml -$(CEGA_USERS)/$(USER).yml: + +$(CEGA_USERS)/$(USER).yml: $(SSH_KEY) @echo --- > $@ @echo "username: $(USER)" >> $@ - @echo "pubkey: $(shell cat $(SSH_KEY_PUB))" >> $@ + @echo "pubkey: $(shell cat $<.pub)" >> $@ @echo "uid: $(USER_ID)" >> $@ @echo "gecos: EGA User - $(USER)" >> $@ +$(SSH_KEY): + ssh-keygen -t rsa -b 2048 -f $@ -N $(SSH_PASS) -C "$(USER)@LocalEGA" + chmod 400 $@ + +# We only read the first message from the completed queue +# If it contains COMPLETED, and it is the right user/filepath/stableID, +# then we say it's ok +# That'll do it for the moment +check: CEGA_MQ_USER=$(shell awk -F= '/CEGA_MQ_USER/ {print $$2}' $(DOCKER_PATH)/private/lega/.trace) +check: CEGA_MQ_PASSWORD=$(shell awk -F= '/CEGA_MQ_PASSWORD/ {print $$2}' $(DOCKER_PATH)/private/lega/.trace) +check: MESSAGE=$(shell ./rabbitmqadmin -H localhost -P 15670 -V lega -u lega -p $(CEGA_MQ_PASSWORD) get queue=completed count=1 requeue=true) +check: $(FILE).c4ga + @echo "$(MESSAGE)" | grep 'state: COMPLETED' &>/dev/null + @echo "$(MESSAGE)" | grep 'user: $(USER)' &>/dev/null + @echo "$(MESSAGE)" | grep 'filepath: $(/dev/null + @echo "$(MESSAGE)" | grep 'details: EGAF$(shell cat $(FILE).md5)' &>/dev/null + @echo "Message found in the CentralEGA broker" + clean: -unlink $(CEGA_USERS)/lega/$(USER).yml -unlink $(CEGA_USERS)/lega_ids/$(USER_ID).yml - rm -rf $(FILE).c4ga $(FILE).c4ga.md5 $(FILE).md5 $(CEGA_USERS)/$(USER).yml + rm -rf $(FILE){,.c4ga,.c4ga.md5,.md5} + rm -rf $(CEGA_USERS)/$(USER).yml $(SSH_KEY) $(SSH_KEY).pub diff --git a/docker/test/README.md b/docker/test/README.md index 93100efb..e14b2135 100644 --- a/docker/test/README.md +++ b/docker/test/README.md @@ -1,12 +1,21 @@ ## Testing script -Testing script is used to replicate upload and submission functionalities from an end user. -Before using the script make sure there is a key `~/.ssh/lega.pub` and `~/.ssh/lega` or replace them with -your own in the `Makefile`. Also `MAIN_REPO=~/LocalEGA` should reflect the path do the LocalEGA project. +This is used to simulate an upload and a submission from an end-user. + +Run the script with -Using the script: ``` -make user -make upload -make submit +cd +make ``` + +Internally, it will: + +1) create some fake user named `ega-box-999`, including its ssh-key. +2) Download the file `HG00458.unmapped.ILLUMINA.bwa.CHS.low_coverage.20130415.bam` from EBI. +3) Encrypt the file in the Crypt4GH format +4) Upload the encrypted file to the LocalEGA inbox +5) Trigger a fake submission on CentralEGA side + +If all goes as expected, the CentralEGA (fake) message broker should +have received a message of completion. diff --git a/docker/test/rabbitmqadmin b/docker/test/rabbitmqadmin new file mode 100755 index 00000000..a2783d39 --- /dev/null +++ b/docker/test/rabbitmqadmin @@ -0,0 +1,1050 @@ +#!/usr/bin/env python + +# The contents of this file are subject to the Mozilla Public License +# Version 1.1 (the "License"); you may not use this file except in +# compliance with the License. You may obtain a copy of the License at +# http://www.mozilla.org/MPL/ +# +# Software distributed under the License is distributed on an "AS IS" +# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +# License for the specific language governing rights and limitations +# under the License. +# +# The Original Code is RabbitMQ Management Plugin. +# +# The Initial Developer of the Original Code is GoPivotal, Inc. +# Copyright (c) 2007-2016 Pivotal Software, Inc. All rights reserved. +from __future__ import print_function + +try: + from signal import signal, SIGPIPE, SIG_DFL + signal(SIGPIPE, SIG_DFL) +except ImportError: + pass + +import sys +if sys.version_info[0] < 2 or (sys.version_info[0] == 2 and sys.version_info[1] < 6): + eprint("Sorry, rabbitmqadmin requires at least Python 2.6 (2.7.9 when HTTPS is enabled).") + sys.exit(1) + +from optparse import OptionParser, TitledHelpFormatter +import urllib +import base64 +import json +import os +import socket +import ssl +import traceback + +if sys.version_info[0] == 2: + from ConfigParser import ConfigParser, NoSectionError + import httplib + import urlparse + from urllib import quote_plus + from urllib import quote + def b64(s): + return base64.b64encode(s) +else: + from configparser import ConfigParser, NoSectionError + import http.client as httplib + import urllib.parse as urlparse + from urllib.parse import quote_plus + from urllib.parse import quote + def b64(s): + return base64.b64encode(s.encode('utf-8')).decode('utf-8') + +VERSION = '%%VSN%%' + +LISTABLE = {'connections': {'vhost': False, 'cols': ['name','user','channels']}, + 'channels': {'vhost': False, 'cols': ['name', 'user']}, + 'consumers': {'vhost': True}, + 'exchanges': {'vhost': True, 'cols': ['name', 'type']}, + 'queues': {'vhost': True, 'cols': ['name', 'messages']}, + 'bindings': {'vhost': True, 'cols': ['source', 'destination', + 'routing_key']}, + 'users': {'vhost': False}, + 'vhosts': {'vhost': False, 'cols': ['name', 'messages']}, + 'permissions': {'vhost': False}, + 'nodes': {'vhost': False, 'cols': ['name','type','mem_used']}, + 'parameters': {'vhost': False, 'json': ['value']}, + 'policies': {'vhost': False, 'json': ['definition']}} + +SHOWABLE = {'overview': {'vhost': False, 'cols': ['rabbitmq_version', + 'cluster_name', + 'queue_totals.messages', + 'object_totals.queues']}} + +PROMOTE_COLUMNS = ['vhost', 'name', 'type', + 'source', 'destination', 'destination_type', 'routing_key'] + +URIS = { + 'exchange': '/exchanges/{vhost}/{name}', + 'queue': '/queues/{vhost}/{name}', + 'binding': '/bindings/{vhost}/e/{source}/{destination_char}/{destination}', + 'binding_del':'/bindings/{vhost}/e/{source}/{destination_char}/{destination}/{properties_key}', + 'vhost': '/vhosts/{name}', + 'user': '/users/{name}', + 'permission': '/permissions/{vhost}/{user}', + 'parameter': '/parameters/{component}/{vhost}/{name}', + 'policy': '/policies/{vhost}/{name}' + } + +DECLARABLE = { + 'exchange': {'mandatory': ['name', 'type'], + 'json': ['arguments'], + 'optional': {'auto_delete': 'false', 'durable': 'true', + 'internal': 'false', 'arguments': {}}}, + 'queue': {'mandatory': ['name'], + 'json': ['arguments'], + 'optional': {'auto_delete': 'false', 'durable': 'true', + 'arguments': {}, 'node': None}}, + 'binding': {'mandatory': ['source', 'destination'], + 'json': ['arguments'], + 'optional': {'destination_type': 'queue', + 'routing_key': '', 'arguments': {}}}, + 'vhost': {'mandatory': ['name'], + 'optional': {'tracing': None}}, + 'user': {'mandatory': ['name', 'password', 'tags'], + 'optional': {}}, + 'permission': {'mandatory': ['vhost', 'user', 'configure', 'write', 'read'], + 'optional': {}}, + 'parameter': {'mandatory': ['component', 'name', 'value'], + 'json': ['value'], + 'optional': {}}, + # Priority is 'json' to convert to int + 'policy': {'mandatory': ['name', 'pattern', 'definition'], + 'json': ['definition', 'priority'], + 'optional': {'priority' : 0, 'apply-to': None}} + } + +DELETABLE = { + 'exchange': {'mandatory': ['name']}, + 'queue': {'mandatory': ['name']}, + 'binding': {'mandatory': ['source', 'destination_type', 'destination', + 'properties_key']}, + 'vhost': {'mandatory': ['name']}, + 'user': {'mandatory': ['name']}, + 'permission': {'mandatory': ['vhost', 'user']}, + 'parameter': {'mandatory': ['component', 'name']}, + 'policy': {'mandatory': ['name']} + } + +CLOSABLE = { + 'connection': {'mandatory': ['name'], + 'optional': {}, + 'uri': '/connections/{name}'} + } + +PURGABLE = { + 'queue': {'mandatory': ['name'], + 'optional': {}, + 'uri': '/queues/{vhost}/{name}/contents'} + } + +EXTRA_VERBS = { + 'publish': {'mandatory': ['routing_key'], + 'optional': {'payload': None, + 'properties': {}, + 'exchange': 'amq.default', + 'payload_encoding': 'string'}, + 'json': ['properties'], + 'uri': '/exchanges/{vhost}/{exchange}/publish'}, + 'get': {'mandatory': ['queue'], + 'optional': {'count': '1', 'requeue': 'true', + 'payload_file': None, 'encoding': 'auto'}, + 'uri': '/queues/{vhost}/{queue}/get'} +} + +for k in DECLARABLE: + DECLARABLE[k]['uri'] = URIS[k] + +for k in DELETABLE: + DELETABLE[k]['uri'] = URIS[k] + DELETABLE[k]['optional'] = {} +DELETABLE['binding']['uri'] = URIS['binding_del'] + +def short_usage(): + return "rabbitmqadmin [options] subcommand" + +def title(name): + return "\n%s\n%s\n\n" % (name, '=' * len(name)) + +def subcommands_usage(): + usage = """Usage +===== + """ + short_usage() + """ + + where subcommand is one of: +""" + title("Display") + + for l in LISTABLE: + usage += " list {0} [...]\n".format(l) + for s in SHOWABLE: + usage += " show {0} [...]\n".format(s) + usage += title("Object Manipulation") + usage += fmt_usage_stanza(DECLARABLE, 'declare') + usage += fmt_usage_stanza(DELETABLE, 'delete') + usage += fmt_usage_stanza(CLOSABLE, 'close') + usage += fmt_usage_stanza(PURGABLE, 'purge') + usage += title("Broker Definitions") + usage += """ export + import +""" + usage += title("Publishing and Consuming") + usage += fmt_usage_stanza(EXTRA_VERBS, '') + usage += """ + * If payload is not specified on publish, standard input is used + + * If payload_file is not specified on get, the payload will be shown on + standard output along with the message metadata + + * If payload_file is specified on get, count must not be set +""" + return usage + +def config_usage(): + usage = "Usage\n=====\n" + short_usage() + usage += "\n" + title("Configuration File") + usage += """ It is possible to specify a configuration file from the command line. + Hosts can be configured easily in a configuration file and called + from the command line. +""" + usage += title("Example") + usage += """ # rabbitmqadmin.conf.example START + + [host_normal] + hostname = localhost + port = 15672 + username = guest + password = guest + declare_vhost = / # Used as default for declare / delete only + vhost = / # Used as default for declare / delete / list + + [host_ssl] + hostname = otherhost + port = 15672 + username = guest + password = guest + ssl = True + ssl_key_file = /path/to/key.pem + ssl_cert_file = /path/to/cert.pem + + # rabbitmqadmin.conf.example END +""" + usage += title("Use") + usage += """ rabbitmqadmin -c rabbitmqadmin.conf.example -N host_normal ...""" + return usage + +def more_help(): + return """ +More Help +========= + +For more help use the help subcommand: + + rabbitmqadmin help subcommands # For a list of available subcommands + rabbitmqadmin help config # For help with the configuration file +""" + +def fmt_usage_stanza(root, verb): + def fmt_args(args): + res = " ".join(["{0}=...".format(a) for a in args['mandatory']]) + opts = " ".join("{0}=...".format(o) for o in args['optional'].keys()) + if opts != "": + res += " [{0}]".format(opts) + return res + + text = "" + if verb != "": + verb = " " + verb + for k in root.keys(): + text += " {0} {1} {2}\n".format(verb, k, fmt_args(root[k])) + return text + +default_options = { "hostname" : "localhost", + "port" : "15672", + "path_prefix" : "", + "declare_vhost" : "/", + "username" : "guest", + "password" : "guest", + "ssl" : False, + "verbose" : True, + "format" : "table", + "depth" : 1, + "bash_completion" : False } + + +class MyFormatter(TitledHelpFormatter): + def format_epilog(self, epilog): + return epilog + +parser = OptionParser(usage=short_usage(), + formatter=MyFormatter(), + epilog=more_help()) + +def make_parser(): + def add(*args, **kwargs): + key = kwargs['dest'] + if key in default_options: + default = " [default: %s]" % default_options[key] + kwargs['help'] = kwargs['help'] + default + parser.add_option(*args, **kwargs) + + add("-c", "--config", dest="config", + help="configuration file [default: ~/.rabbitmqadmin.conf]", + metavar="CONFIG") + add("-N", "--node", dest="node", + help="node described in the configuration file [default: 'default'" + \ + " only if configuration file is specified]", + metavar="NODE") + add("-H", "--host", dest="hostname", + help="connect to host HOST" , + metavar="HOST") + add("-P", "--port", dest="port", + help="connect to port PORT", + metavar="PORT") + add("--path-prefix", dest="path_prefix", + help="use specific URI path prefix for the RabbitMQ HTTP API. /api and operation path will be appended to it. (default: blank string)") + add("-V", "--vhost", dest="vhost", + help="connect to vhost VHOST [default: all vhosts for list, '/' for declare]", + metavar="VHOST") + add("-u", "--username", dest="username", + help="connect using username USERNAME", + metavar="USERNAME") + add("-p", "--password", dest="password", + help="connect using password PASSWORD", + metavar="PASSWORD") + add("-U", "--base-uri", dest="base_uri", + help="connect using a base HTTP API URI. /api and operation path will be appended to it. Path will be ignored. --vhost has to be provided separately.", + metavar="URI") + add("-q", "--quiet", action="store_false", dest="verbose", + help="suppress status messages") + add("-s", "--ssl", action="store_true", dest="ssl", + help="connect with ssl") + add("--ssl-key-file", dest="ssl_key_file", + help="PEM format key file for SSL") + add("--ssl-cert-file", dest="ssl_cert_file", + help="PEM format certificate file for SSL") + add("--ssl-ca-cert-file", dest="ssl_ca_cert_file", + help="PEM format CA certificate file for SSL") + add("--ssl-disable-hostname-verification", dest="ssl_disable_hostname_verification", + help="Disables peer hostname verification", default=False, action="store_true") + add("-k", "--ssl-insecure", dest="ssl_insecure", + help="Disables all SSL validations like curl's '-k' argument", default=False, action="store_true") + add("-f", "--format", dest="format", + help="format for listing commands - one of [" + ", ".join(FORMATS.keys()) + "]") + add("-S", "--sort", dest="sort", help="sort key for listing queries") + add("-R", "--sort-reverse", action="store_true", dest="sort_reverse", + help="reverse the sort order") + add("-d", "--depth", dest="depth", + help="maximum depth to recurse for listing tables") + add("--bash-completion", action="store_true", + dest="bash_completion", + help="Print bash completion script") + add("--version", action="store_true", + dest="version", + help="Display version and exit") + +def default_config(): + home = os.getenv('USERPROFILE') or os.getenv('HOME') + if home is not None: + config_file = home + os.sep + ".rabbitmqadmin.conf" + if os.path.isfile(config_file): + return config_file + return None + +def make_configuration(): + make_parser() + (options, args) = parser.parse_args() + setattr(options, "declare_vhost", None) + if options.version: + print_version() + if options.config is None: + config_file = default_config() + if config_file is not None: + setattr(options, "config", config_file) + else: + if not os.path.isfile(options.config): + assert_usage(False, + "Could not read config file '%s'" % options.config) + + if options.node is None and options.config: + options.node = "default" + else: + options.node = options.node + for (key, val) in default_options.items(): + if getattr(options, key) is None: + setattr(options, key, val) + + if options.config is not None: + config = ConfigParser() + try: + config.read(options.config) + new_conf = dict(config.items(options.node)) + except NoSectionError as error: + if options.node == "default": + pass + else: + assert_usage(False, ("Could not read section '%s' in config file" + + " '%s':\n %s") % + (options.node, options.config, error)) + else: + for key, val in new_conf.items(): + if key == 'ssl': + setattr(options, key, val == "True") + else: + setattr(options, key, val) + + # if --base-uri is passed, set connection parameters from it + if options.base_uri is not None: + u = urlparse.urlparse(options.base_uri) + for key in ["hostname", "port", "username", "password"]: + if getattr(u, key) is not None: + setattr(options, key, getattr(u, key)) + + if u.path is not None and (u.path != "") and (u.path != "/"): + eprint("WARNING: path in --base-uri is ignored. Please specify --vhost and/or --path-prefix separately.\n") + + return (options, args) + +def assert_usage(expr, error): + if not expr: + eprint("\nERROR: {0}\n".format(error)) + eprint("{0} --help for help\n".format(os.path.basename(sys.argv[0]))) + sys.exit(1) + +def print_version(): + print("rabbitmqadmin {0}".format(VERSION)) + sys.exit(0) + +def column_sort_key(col): + if col in PROMOTE_COLUMNS: + return (1, PROMOTE_COLUMNS.index(col)) + else: + return (2, col) + +def main(): + (options, args) = make_configuration() + if options.bash_completion: + print_bash_completion() + sys.exit(0) + assert_usage(len(args) > 0, 'Action not specified') + mgmt = Management(options, args[1:]) + mode = "invoke_" + args[0] + assert_usage(hasattr(mgmt, mode), + 'Action {0} not understood'.format(args[0])) + method = getattr(mgmt, "invoke_%s" % args[0]) + method() + +def eprint(*args, **kwargs): + print(*args, file=sys.stderr, **kwargs) + +def die(s): + eprint("*** {0}\n".format(s)) + sys.exit(1) + +def maybe_utf8(s): + if isinstance(s, int): + # s can be also an int for ex messages count + return str(s) + if isinstance(s, float): + # s can be also a float for message rate + return str(s) + if sys.version_info[0] == 3: + # It will have an encoding, which Python will respect + return s + else: + # It won't have an encoding, and Python will pick ASCII by default + return s.encode('utf-8') + + +class Management: + def __init__(self, options, args): + self.options = options + self.args = args + + def get(self, path): + return self.http("GET", "%s/api%s" % (self.options.path_prefix, path), "") + + def put(self, path, body): + return self.http("PUT", "%s/api%s" % (self.options.path_prefix, path), body) + + def post(self, path, body): + return self.http("POST", "%s/api%s" % (self.options.path_prefix, path), body) + + def delete(self, path): + return self.http("DELETE", "%s/api%s" % (self.options.path_prefix, path), "") + + def __initialize_https_connection(self, hostname, port): + # Python 2.7.9+ + if hasattr(ssl, 'create_default_context'): + return httplib.HTTPSConnection(hostname, port, + context = self.__initialize_tls_context()) + # Python < 2.7.8, note: those versions still have SSLv3 enabled + # and other limitations. See rabbitmq/rabbitmq-management#225 + else: + eprint("WARNING: rabbitmqadmin requires Python 2.7.9+ when HTTPS is used.") + return httplib.HTTPSConnection(hostname, port, + cert_file = self.options.ssl_cert_file, + key_file = self.options.ssl_key_file) + + def __initialize_tls_context(self): + # Python 2.7.9+ only + ssl_ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) + ssl_ctx.options &= ~ssl.OP_NO_SSLv3 + + ssl_insecure = self.options.ssl_insecure + ssl_disable_hostname_verification = ssl_insecure or \ + self.options.ssl_disable_hostname_verification + # Note: you must set check_hostname prior to verify_mode + if ssl_disable_hostname_verification: + ssl_ctx.check_hostname = False + if ssl_insecure: + ssl_ctx.verify_mode = ssl.CERT_NONE + + if self.options.ssl_key_file: + ssl_ctx.load_cert_chain(self.options.ssl_cert_file, + self.options.ssl_key_file) + if self.options.ssl_ca_cert_file: + ssl_ctx.load_verify_locations(self.options.ssl_ca_cert_file) + return ssl_ctx + + def http(self, method, path, body): + if self.options.ssl: + conn = self.__initialize_https_connection(self.options.hostname, + self.options.port) + else: + conn = httplib.HTTPConnection(self.options.hostname, + self.options.port) + auth = (self.options.username + ":" + self.options.password) + + headers = {"Authorization": "Basic " + b64(auth)} + if body != "": + headers["Content-Type"] = "application/json" + try: + conn.request(method, path, body, headers) + except socket.error as e: + traceback.print_exc(e) + die("Could not connect: {0}".format(e)) + resp = conn.getresponse() + if resp.status == 400: + die(json.loads(resp.read())['reason']) + if resp.status == 401: + die("Access refused: {0}".format(path)) + if resp.status == 404: + die("Not found: {0}".format(path)) + if resp.status == 301: + url = urlparse.urlparse(resp.getheader('location')) + [host, port] = url.netloc.split(':') + self.options.hostname = host + self.options.port = int(port) + return self.http(method, url.path + '?' + url.query, body) + if resp.status < 200 or resp.status > 400: + raise Exception("Received %d %s for path %s\n%s" + % (resp.status, resp.reason, path, resp.read())) + return resp.read().decode('utf-8') + + def verbose(self, string): + if self.options.verbose: + print(string) + + def get_arg(self): + assert_usage(len(self.args) == 1, 'Exactly one argument required') + return self.args[0] + + def use_cols(self): + # Deliberately do not cast to int here; we only care about the + # default, not explicit setting. + return self.options.depth == 1 and not 'json' in self.options.format + + def invoke_help(self): + if len(self.args) == 0: + parser.print_help() + else: + help_cmd = self.get_arg() + if help_cmd == 'subcommands': + usage = subcommands_usage() + elif help_cmd == 'config': + usage = config_usage() + else: + assert_usage(False, """help topic must be one of: + subcommands + config""") + print(usage) + sys.exit(0) + + def invoke_publish(self): + (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['publish']) + if not 'payload' in upload: + data = sys.stdin.read() + upload['payload'] = b64(data) + upload['payload_encoding'] = 'base64' + resp = json.loads(self.post(uri, json.dumps(upload))) + if resp['routed']: + self.verbose("Message published") + else: + self.verbose("Message published but NOT routed") + + def invoke_get(self): + (uri, upload) = self.parse_args(self.args, EXTRA_VERBS['get']) + payload_file = 'payload_file' in upload and upload['payload_file'] or None + assert_usage(not payload_file or upload['count'] == '1', + 'Cannot get multiple messages using payload_file') + result = self.post(uri, json.dumps(upload)) + if payload_file: + write_payload_file(payload_file, result) + columns = ['routing_key', 'exchange', 'message_count', + 'payload_bytes', 'redelivered'] + format_list(result, columns, {}, self.options) + else: + format_list(result, [], {}, self.options) + + def invoke_export(self): + path = self.get_arg() + uri = "/definitions" + if self.options.vhost: + uri += "/%s" % quote_plus(self.options.vhost) + definitions = self.get(uri) + f = open(path, 'w') + f.write(definitions) + f.close() + self.verbose("Exported definitions for %s to \"%s\"" + % (self.options.hostname, path)) + + def invoke_import(self): + path = self.get_arg() + f = open(path, 'r') + definitions = f.read() + f.close() + uri = "/definitions" + if self.options.vhost: + uri += "/%s" % quote_plus(self.options.vhost) + self.post(uri, definitions) + self.verbose("Imported definitions for %s from \"%s\"" + % (self.options.hostname, path)) + + def invoke_list(self): + (uri, obj_info, cols) = self.list_show_uri(LISTABLE, 'list') + format_list(self.get(uri), cols, obj_info, self.options) + + def invoke_show(self): + (uri, obj_info, cols) = self.list_show_uri(SHOWABLE, 'show') + format_list('[{0}]'.format(self.get(uri)), cols, obj_info, self.options) + + def list_show_uri(self, obj_types, verb): + obj_type = self.args[0] + assert_usage(obj_type in obj_types, + "Don't know how to {0} {1}".format(verb, obj_type)) + obj_info = obj_types[obj_type] + uri = "/%s" % obj_type + query = [] + if obj_info['vhost'] and self.options.vhost: + uri += "/%s" % quote_plus(self.options.vhost) + cols = self.args[1:] + if cols == [] and 'cols' in obj_info and self.use_cols(): + cols = obj_info['cols'] + if cols != []: + query.append("columns=" + ",".join(cols)) + sort = self.options.sort + if sort: + query.append("sort=" + sort) + if self.options.sort_reverse: + query.append("sort_reverse=true") + query = "&".join(query) + if query != "": + uri += "?" + query + return (uri, obj_info, cols) + + def invoke_declare(self): + (obj_type, uri, upload) = self.declare_delete_parse(DECLARABLE) + if obj_type == 'binding': + self.post(uri, json.dumps(upload)) + else: + self.put(uri, json.dumps(upload)) + self.verbose("{0} declared".format(obj_type)) + + def invoke_delete(self): + (obj_type, uri, upload) = self.declare_delete_parse(DELETABLE) + self.delete(uri) + self.verbose("{0} deleted".format(obj_type)) + + def invoke_close(self): + (obj_type, uri, upload) = self.declare_delete_parse(CLOSABLE) + self.delete(uri) + self.verbose("{0} closed".format(obj_type)) + + def invoke_purge(self): + (obj_type, uri, upload) = self.declare_delete_parse(PURGABLE) + self.delete(uri) + self.verbose("{0} purged".format(obj_type)) + + def declare_delete_parse(self, root): + assert_usage(len(self.args) > 0, 'Type not specified') + obj_type = self.args[0] + assert_usage(obj_type in root, + 'Type {0} not recognised'.format(obj_type)) + obj = root[obj_type] + (uri, upload) = self.parse_args(self.args[1:], obj) + return (obj_type, uri, upload) + + def parse_args(self, args, obj): + mandatory = obj['mandatory'] + optional = obj['optional'] + uri_template = obj['uri'] + upload = {} + for k in optional.keys(): + if optional[k] is not None: + upload[k] = optional[k] + for arg in args: + assert_usage("=" in arg, + 'Argument "{0}" not in format name=value'.format(arg)) + (name, value) = arg.split("=", 1) + assert_usage(name in mandatory or name in optional.keys(), + 'Argument "{0}" not recognised'.format(name)) + if 'json' in obj and name in obj['json']: + upload[name] = self.parse_json(value) + else: + upload[name] = value + for m in mandatory: + assert_usage(m in upload.keys(), + 'mandatory argument "{0}" required'.format(m)) + if 'vhost' not in mandatory: + upload['vhost'] = self.options.vhost or self.options.declare_vhost + uri_args = {} + for k in upload: + v = upload[k] + if v and isinstance(v, (str, bytes)): + uri_args[k] = quote(v, '') + if k == 'destination_type': + uri_args['destination_char'] = v[0] + uri = uri_template.format(**uri_args) + return (uri, upload) + + def parse_json(self, text): + try: + return json.loads(text) + except ValueError: + eprint("ERROR: Could not parse JSON:\n {0}".format(text)) + sys.exit(1) + +def format_list(json_list, columns, args, options): + format = options.format + formatter = None + if format == "raw_json": + print(json_list) + return + elif format == "pretty_json": + enc = json.JSONEncoder(False, False, True, True, True, 2) + print(enc.encode(json.loads(json_list))) + return + else: + formatter = FORMATS[format] + assert_usage(formatter != None, + "Format {0} not recognised".format(format)) + formatter_instance = formatter(columns, args, options) + formatter_instance.display(json_list) + +class Lister: + def verbose(self, string): + if self.options.verbose: + print(string) + + def display(self, json_list): + depth = sys.maxsize + if len(self.columns) == 0: + depth = int(self.options.depth) + (columns, table) = self.list_to_table(json.loads(json_list), depth) + if len(table) > 0: + self.display_list(columns, table) + else: + self.verbose("No items") + + def list_to_table(self, items, max_depth): + columns = {} + column_ix = {} + row = None + table = [] + + def add(prefix, depth, item, fun): + for key in item: + column = prefix == '' and key or (prefix + '.' + key) + subitem = item[key] + if type(subitem) == dict: + if 'json' in self.obj_info and key in self.obj_info['json']: + fun(column, json.dumps(subitem)) + else: + if depth < max_depth: + add(column, depth + 1, subitem, fun) + elif type(subitem) == list: + # The first branch has slave nodes in queues in + # mind (which come out looking decent); the second + # one has applications in nodes (which look less + # so, but what would look good?). + if [x for x in subitem if type(x) != str] == []: + serialised = " ".join(subitem) + else: + serialised = json.dumps(subitem) + fun(column, serialised) + else: + fun(column, subitem) + + def add_to_columns(col, val): + columns[col] = True + + def add_to_row(col, val): + if col in column_ix: + if val is not None: + row[column_ix[col]] = maybe_utf8(val) + else: + row[column_ix[col]] = None + + if len(self.columns) == 0: + for item in items: + add('', 1, item, add_to_columns) + columns = list(columns.keys()) + columns.sort(key=column_sort_key) + else: + columns = self.columns + + for i in range(0, len(columns)): + column_ix[columns[i]] = i + for item in items: + row = len(columns) * [''] + add('', 1, item, add_to_row) + table.append(row) + + return (columns, table) + +class TSVList(Lister): + def __init__(self, columns, obj_info, options): + self.columns = columns + self.obj_info = obj_info + self.options = options + + def display_list(self, columns, table): + head = "\t".join(columns) + self.verbose(head) + + for row in table: + line = "\t".join(row) + print(line) + +class LongList(Lister): + def __init__(self, columns, obj_info, options): + self.columns = columns + self.obj_info = obj_info + self.options = options + + def display_list(self, columns, table): + sep = "\n" + "-" * 80 + "\n" + max_width = 0 + for col in columns: + max_width = max(max_width, len(col)) + fmt = "{0:>" + str(max_width) + "}: {1}" + print(sep) + for i in range(0, len(table)): + for j in range(0, len(columns)): + print(fmt.format(columns[j], table[i][j])) + print(sep) + +class TableList(Lister): + def __init__(self, columns, obj_info, options): + self.columns = columns + self.obj_info = obj_info + self.options = options + + def display_list(self, columns, table): + total = [columns] + total.extend(table) + self.ascii_table(total) + + def ascii_table(self, rows): + table = "" + col_widths = [0] * len(rows[0]) + for i in range(0, len(rows[0])): + for j in range(0, len(rows)): + col_widths[i] = max(col_widths[i], len(rows[j][i])) + self.ascii_bar(col_widths) + self.ascii_row(col_widths, rows[0], "^") + self.ascii_bar(col_widths) + for row in rows[1:]: + self.ascii_row(col_widths, row, "<") + self.ascii_bar(col_widths) + + def ascii_row(self, col_widths, row, align): + txt = "|" + for i in range(0, len(col_widths)): + fmt = " {0:" + align + str(col_widths[i]) + "} " + txt += fmt.format(row[i]) + "|" + print(txt) + + def ascii_bar(self, col_widths): + txt = "+" + for w in col_widths: + txt += ("-" * (w + 2)) + "+" + print(txt) + +class KeyValueList(Lister): + def __init__(self, columns, obj_info, options): + self.columns = columns + self.obj_info = obj_info + self.options = options + + def display_list(self, columns, table): + for i in range(0, len(table)): + row = [] + for j in range(0, len(columns)): + row.append("{0}=\"{1}\"".format(columns[j], table[i][j])) + print(" ".join(row)) + +# TODO handle spaces etc in completable names +class BashList(Lister): + def __init__(self, columns, obj_info, options): + self.columns = columns + self.obj_info = obj_info + self.options = options + + def display_list(self, columns, table): + ix = None + for i in range(0, len(columns)): + if columns[i] == 'name': + ix = i + if ix is not None: + res = [] + for row in table: + res.append(row[ix]) + print(" ".join(res)) + +FORMATS = { + 'raw_json' : None, # Special cased + 'pretty_json' : None, # Ditto + 'tsv' : TSVList, + 'long' : LongList, + 'table' : TableList, + 'kvp' : KeyValueList, + 'bash' : BashList +} + +def write_payload_file(payload_file, json_list): + result = json.loads(json_list)[0] + payload = result['payload'] + payload_encoding = result['payload_encoding'] + f = open(payload_file, 'w') + if payload_encoding == 'base64': + data = base64.b64decode(payload) + else: + data = payload + f.write(data) + f.close() + +def print_bash_completion(): + script = """# This is a bash completion script for rabbitmqadmin. +# Redirect it to a file, then source it or copy it to /etc/bash_completion.d +# to get tab completion. rabbitmqadmin must be on your PATH for this to work. +_rabbitmqadmin() +{ + local cur prev opts base + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + + opts="list show declare delete close purge import export get publish help" + fargs="--help --host --port --vhost --username --password --format --depth --sort --sort-reverse" + + case "${prev}" in + list) + COMPREPLY=( $(compgen -W '""" + " ".join(LISTABLE) + """' -- ${cur}) ) + return 0 + ;; + show) + COMPREPLY=( $(compgen -W '""" + " ".join(SHOWABLE) + """' -- ${cur}) ) + return 0 + ;; + declare) + COMPREPLY=( $(compgen -W '""" + " ".join(DECLARABLE.keys()) + """' -- ${cur}) ) + return 0 + ;; + delete) + COMPREPLY=( $(compgen -W '""" + " ".join(DELETABLE.keys()) + """' -- ${cur}) ) + return 0 + ;; + close) + COMPREPLY=( $(compgen -W '""" + " ".join(CLOSABLE.keys()) + """' -- ${cur}) ) + return 0 + ;; + purge) + COMPREPLY=( $(compgen -W '""" + " ".join(PURGABLE.keys()) + """' -- ${cur}) ) + return 0 + ;; + export) + COMPREPLY=( $(compgen -f ${cur}) ) + return 0 + ;; + import) + COMPREPLY=( $(compgen -f ${cur}) ) + return 0 + ;; + help) + opts="subcommands config" + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + ;; + -H) + COMPREPLY=( $(compgen -A hostname ${cur}) ) + return 0 + ;; + --host) + COMPREPLY=( $(compgen -A hostname ${cur}) ) + return 0 + ;; + -V) + opts="$(rabbitmqadmin -q -f bash list vhosts)" + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + ;; + --vhost) + opts="$(rabbitmqadmin -q -f bash list vhosts)" + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + ;; + -u) + opts="$(rabbitmqadmin -q -f bash list users)" + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + ;; + --username) + opts="$(rabbitmqadmin -q -f bash list users)" + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + ;; + -f) + COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) ) + return 0 + ;; + --format) + COMPREPLY=( $(compgen -W \"""" + " ".join(FORMATS.keys()) + """\" -- ${cur}) ) + return 0 + ;; + +""" + for l in LISTABLE: + key = l[0:len(l) - 1] + script += " " + key + """) + opts="$(rabbitmqadmin -q -f bash list """ + l + """)" + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + ;; +""" + script += """ *) + ;; + esac + + COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur})) + return 0 +} +complete -F _rabbitmqadmin rabbitmqadmin +""" + print(script) + +if __name__ == "__main__": + main() diff --git a/docker/tests/pom.xml b/docker/tests/pom.xml index 5dabc1de..1a8105bd 100644 --- a/docker/tests/pom.xml +++ b/docker/tests/pom.xml @@ -12,6 +12,7 @@ org.apache.maven.plugins maven-compiler-plugin + 3.8.0 1.8 1.8 @@ -39,12 +40,12 @@ org.slf4j slf4j-api - LATEST + 1.7.25 org.slf4j slf4j-simple - LATEST + 1.7.25 commons-io diff --git a/docker/tests/src/test/resources/config.properties b/docker/tests/src/test/resources/config.properties index 95b24be0..f8563452 100644 --- a/docker/tests/src/test/resources/config.properties +++ b/docker/tests/src/test/resources/config.properties @@ -4,7 +4,7 @@ trace.file.name = .trace inbox.folder.path = /ega/inbox ingest.max-timeout = 100000 -images.name.db = postgres:latest +images.name.db = postgres:9.6 images.name.inbox = nbisweden/ega-mina-inbox images.name.ingest = nbisweden/ega-base images.name.keys = nbisweden/ega-base diff --git a/extras/publish.py b/extras/publish.py index 6b85cd7e..fd2c5159 100644 --- a/extras/publish.py +++ b/extras/publish.py @@ -8,10 +8,11 @@ import argparse import uuid import json -import pika import string import secrets +import pika + parser = argparse.ArgumentParser(description='''Publish message to the broker on this machine.''') parser.add_argument('--connection', diff --git a/lega/notifications.py b/lega/notifications.py index faf3a72e..a41d7f80 100644 --- a/lega/notifications.py +++ b/lega/notifications.py @@ -17,6 +17,7 @@ host = '127.0.0.1' port = 8888 +delim = b'$' from .conf import CONF from .utils.amqp import get_connection, publish @@ -46,16 +47,16 @@ def connection_made(self, transport): # Buffering can concatenate multiple messages, especially if they arrive too quickly # We tried to use TCP_NODELAY (to turn off the socket buffering on the sender's side) # but that didn't help. Therefore we use an out-of-band method: - # We separate messages with a '$' character + # We separate messages with a delim character def parse(self, data): while True: - if data.count(b'$') < 2: + if data.count(delim) < 2: self.buf = data return # We have 2 bars - pos1 = data.find(b'$') + pos1 = data.find(delim) username = data[:pos1] - pos2 = data.find(b'$',pos1+1) + pos2 = data.find(delim,pos1+1) filename = data[pos1+1:pos2] yield (username.decode(),filename.decode()) data = data[pos2+1:]