diff --git a/irods_audit_elk_stack/Dockerfile b/irods_audit_elk_stack/Dockerfile index 193478c..3de27bf 100644 --- a/irods_audit_elk_stack/Dockerfile +++ b/irods_audit_elk_stack/Dockerfile @@ -17,6 +17,7 @@ RUN apt-get update && \ # To mark all installed packages as manually installed: #apt-mark showauto | xargs -r apt-mark manual +# Install some standard stuff RUN apt-get update && \ apt-get install -y \ apt-transport-https \ @@ -25,13 +26,22 @@ RUN apt-get update && \ && \ apt-get install --no-install-recommends -y \ software-properties-common \ - systemd \ - systemd-sysv \ - dbus \ + gosu \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* +# Install yq, needed for init scripts +RUN add-apt-repository --no-update -y ppa:rmescandon/yq +RUN apt-get update && \ + apt-get install --no-install-recommends -y \ + yq \ + && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* + +# Install JDK/JRE +COPY java-excludes.dpkg.cfg /etc/dpkg/dpkg.cfg.d/java-excludes ADD https://packages.adoptium.net/artifactory/api/gpg/key/public /usr/share/keyrings/adoptium.asc ADD https://adoptopenjdk.jfrog.io/adoptopenjdk/api/gpg/key/public /usr/share/keyrings/adoptopenjdk.asc RUN gpg --dearmor -o /usr/share/keyrings/adoptium.gpg /usr/share/keyrings/adoptium.asc && \ @@ -44,7 +54,6 @@ RUN gpg --dearmor -o /usr/share/keyrings/adoptium.gpg /usr/share/keyrings/adopti && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* - #ARG java_ver=8 #ARG java_ver=11 #ARG java_vendor=adoptopenjdk @@ -52,9 +61,8 @@ RUN gpg --dearmor -o /usr/share/keyrings/adoptium.gpg /usr/share/keyrings/adopti ARG java_ver=17 ARG java_vendor=temurin ARG java_dist=jdk - RUN apt-get update && \ - apt-get install -y \ + apt-get install --no-install-recommends -y \ ${java_vendor}-${java_ver}-${java_dist} \ && \ apt-get clean && \ @@ -63,38 +71,34 @@ ENV JAVA_HOME=/usr/lib/jvm/${java_vendor}-${java_ver}-${java_dist}-amd64 RUN update-java-alternatives --set ${JAVA_HOME} ENV ES_JAVA_HOME=${JAVA_HOME} +# Install Elasticsearch and Kibana #ARG es_ver=6 #ARG es_ver=7 ARG es_ver=8 +COPY elasticsearch/exclude-jvm.dpkg.cfg /etc/dpkg/dpkg.cfg.d/elasticsearch-exclude-jvm +COPY kibana/exclude-node-stuff.dpkg.cfg /etc/dpkg/dpkg.cfg.d/kibana-exclude-node-stuff ADD https://artifacts.elastic.co/GPG-KEY-elasticsearch /usr/share/keyrings/elasticsearch-keyring.asc RUN gpg --dearmor -o /usr/share/keyrings/elasticsearch-keyring.gpg /usr/share/keyrings/elasticsearch-keyring.asc && \ - echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/${es_ver}.x/apt stable main" | tee /etc/apt/sources.list.d/elastic-${es_ver}.x.list && \ - echo 'path-exclude=/usr/share/elasticsearch/jdk' >> /etc/dpkg/dpkg.cfg.d/excludes-elasticsearch-jvm && \ - echo 'path-exclude=/usr/share/elasticsearch/jdk/*' >> /etc/dpkg/dpkg.cfg.d/excludes-elasticsearch-jvm - + echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/${es_ver}.x/apt stable main" | tee /etc/apt/sources.list.d/elastic-${es_ver}.x.list RUN apt-get update && \ - apt-get install -y \ + apt-get install --no-install-recommends -y \ elasticsearch \ kibana \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* - RUN echo "ES_JAVA_HOME=\"${ES_JAVA_HOME}\"" >> /etc/default/elasticsearch +# Install RabbitMQ ADD https://packagecloud.io/rabbitmq/rabbitmq-server/gpgkey /usr/share/keyrings/rabbitmq_rabbitmq-server.asc +ADD https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc /usr/share/keyrings/erlang_solutions.asc RUN add-apt-repository --no-update -y ppa:rabbitmq/rabbitmq-erlang && \ gpg --dearmor -o /usr/share/keyrings/rabbitmq_rabbitmq-server.gpg /usr/share/keyrings/rabbitmq_rabbitmq-server.asc && \ + gpg --dearmor -o /usr/share/keyrings/erlang_solutions.gpg /usr/share/keyrings/erlang_solutions.asc && \ echo "deb [signed-by=/usr/share/keyrings/rabbitmq_rabbitmq-server.gpg] https://packagecloud.io/rabbitmq/rabbitmq-server/ubuntu/ $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) main" | tee /etc/apt/sources.list.d/rabbitmq_rabbitmq-server.list && \ + echo "deb [signed-by=/usr/share/keyrings/erlang_solutions.gpg] https://packages.erlang-solutions.com/ubuntu $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) contrib" | tee /etc/apt/sources.list.d/erlang-solutions.list && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* - -ADD https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc /usr/share/keyrings/erlang_solutions.asc -#RUN gpg --dearmor -o /usr/share/keyrings/erlang_solutions.gpg /usr/share/keyrings/erlang_solutions.asc && \ -# echo "deb [signed-by=/usr/share/keyrings/erlang_solutions.gpg] https://packages.erlang-solutions.com/ubuntu $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) contrib" | tee /etc/apt/sources.list.d/erlang-solutions.list -RUN gpg --dearmor -o /usr/share/keyrings/erlang_solutions.gpg /usr/share/keyrings/erlang_solutions.asc && \ - echo "deb [signed-by=/usr/share/keyrings/erlang_solutions.gpg] http://binaries.erlang-solutions.com/debian $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) contrib" | tee /etc/apt/sources.list.d/erlang-solutions.list - RUN apt-get update && \ apt-get install -y \ rabbitmq-server \ @@ -102,27 +106,16 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* -# Hopefully these are new enough +# Install Python modules for Logstash stand-in RUN apt-get update && \ - apt-get install -y \ + apt-get install --no-install-recommends -y \ python3-qpid-proton \ python3-elasticsearch \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* -RUN rabbitmq-plugins enable rabbitmq_amqp1_0 && \ - rabbitmq-plugins enable rabbitmq_management - -RUN echo "server.host: \"0.0.0.0\"" >> /etc/kibana/kibana.yml - -COPY elasticsearch.yml /etc/elasticsearch/elasticsearch.yml -RUN /usr/share/elasticsearch/bin/elasticsearch-keystore remove \ - xpack.security.http.ssl.keystore.secure_password \ - xpack.security.transport.ssl.keystore.secure_password \ - xpack.security.transport.ssl.truststore.secure_password - -# utils +# Install some utils RUN apt-get update && \ apt-get install -y \ procps \ @@ -134,97 +127,57 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* -# from ubi8-init -STOPSIGNAL SIGRTMIN+3 - -# from ubi8-init -RUN systemctl mask \ - systemd-remount-fs.service \ - dev-hugepages.mount \ - sys-fs-fuse-connections.mount \ - systemd-logind.service \ - getty.target \ - console-getty.service \ - systemd-udev-trigger.service \ - systemd-udevd.service \ - systemd-random-seed.service - -# from ubi8-init -#mask systemd-machine-id-commit.service - partial fix for https://bugzilla.redhat.com/show_bug.cgi?id=1472439 -RUN systemctl mask systemd-machine-id-commit.service - -RUN systemctl mask \ - unattended-upgrades.service \ - packagekit-offline-update.service \ - systemd-timesyncd.service \ - systemd-resolved.service \ - apt-daily-upgrade.service \ - apt-daily-upgrade.timer \ - apt-daily.service \ - apt-daily.timer \ - e2scrub_reap.service \ - e2scrub_all.service \ - e2scrub_all.timer \ - ondemand.service \ - systemd-modules-load.service \ - fstrim.service \ - fstrim.timer - -#RUN systemctl mask \ -# remote-fs.target \ -# systemd-pstore.service \ -# cryptsetup.target - -RUN systemctl mask \ - getty-static.service \ - networkd-dispatcher.service - -#RUN systemctl mask \ -# kmod-static-nodes.service -# proc-sys-fs-binfmt_misc.mount \ -# proc-sys-fs-binfmt_misc.automount \ -# dev-mqueue.mount \ -# sys-kernel-config.mount \ -# sys-kernel-debug.mount \ -# sys-kernel-tracing.mount \ -# systemd-ask-password-console.path \ -# systemd-binfmt.service \ -# systemd-boot-system-token.service \ -# systemd-sysctl.service \ -# systemd-sysusers.service \ -# systemd-update-utmp.service \ -# systemd-initctl.socket \ -# systemd-update-utmp-runlevel.service \ -# systemd-ask-password-wall.path \ -# systemd-user-sessions.service - -#RUN systemctl mask \ -# systemd-tmpfiles-setup-dev.service \ -# systemd-tmpfiles-setup.service \ -# systemd-tmpfiles-clean.timer \ -# systemd-tmpfiles-clean.service - - -COPY startup-script.sh /var/lib/irods-elk/ -CMD ["/var/lib/irods-elk/startup-script.sh"] - -RUN mkdir -p /etc/systemd/system/kibana.service.d && \ - echo "[Unit]" >> /etc/systemd/system/kibana.service.d/elasticsearch.conf && \ - echo "After=elasticsearch.service" >> /etc/systemd/system/kibana.service.d/elasticsearch.conf && \ - echo "Wants=elasticsearch.service" >> /etc/systemd/system/kibana.service.d/elasticsearch.conf - -COPY not-logstash.service /etc/systemd/system/ -COPY not-logstash.py /var/lib/irods-elk/ - -COPY elk-firstrun.service /etc/systemd/system/ -COPY example_kibana_dashboard.ndjson /var/lib/irods-elk/ -COPY firstrun.sh /var/lib/irods-elk/ - -RUN systemctl enable \ - elasticsearch \ - rabbitmq-server \ - kibana \ - not-logstash \ - elk-firstrun +# Install RabbitMQ plugins and create administrator account +RUN rabbitmq-plugins enable \ + rabbitmq_amqp1_0 \ + rabbitmq_management \ + && \ + /etc/init.d/rabbitmq-server start && \ + rabbitmqctl add_user test test && \ + rabbitmqctl set_user_tags test administrator && \ + rabbitmqctl set_permissions -p / test ".*" ".*" ".*" && \ + /etc/init.d/rabbitmq-server stop + +# Elasticsearch init script and config files +COPY --chown=root:elasticsearch elasticsearch/elasticsearch.yml /etc/elasticsearch/elasticsearch.yml +COPY --chown=root:elasticsearch elasticsearch/jvm.options.d/oom_heap_dump.options /etc/elasticsearch/jvm.options.d/ +COPY elasticsearch/elasticsearch.init /etc/init.d/elasticsearch +RUN chmod +x /etc/init.d/elasticsearch +# Since we have disabled security, we must purge our keystore of secure passwords +RUN /usr/share/elasticsearch/bin/elasticsearch-keystore remove \ + xpack.security.http.ssl.keystore.secure_password \ + xpack.security.transport.ssl.keystore.secure_password \ + xpack.security.transport.ssl.truststore.secure_password -WORKDIR /root +# Kibana init script and config files +COPY --chown=root:kibana kibana/kibana.yml /etc/kibana/kibana.yml +COPY kibana/kibana.init /etc/init.d/kibana +RUN chmod +x /etc/init.d/kibana + +# Initialize Elasticsearch and Kibana +COPY kibana/irods_dashboard.ndjson /var/lib/irods-elk/irods_dashboard.ndjson +RUN ES_JAVA_OPTS="-Xms512m -Xmx512m" /etc/init.d/elasticsearch start && \ + curl -sLSf -XPUT "http://localhost:9200/irods_audit" && echo && \ + curl -sLSf -XPUT "http://localhost:9200/irods_audit/_settings" \ + -H 'Content-Type: application/json' \ + -d'{"index.mapping.total_fields.limit": 2000}' \ + && echo && \ + /etc/init.d/kibana start && \ + curl -sLSf -X POST "http://localhost:5601/api/saved_objects/_import" \ + -H "kbn-xsrf: true" \ + --form file=@/var/lib/irods-elk/irods_dashboard.ndjson \ + && echo && \ + /etc/init.d/kibana stop && \ + /etc/init.d/elasticsearch stop + +# not-logstash script and init script +COPY not-logstash/not-logstash.py /var/lib/irods-elk/bin/not-logstash +COPY not-logstash/not-logstash.init /etc/init.d/not-logstash +RUN chmod +x /var/lib/irods-elk/bin/not-logstash \ + /etc/init.d/not-logstash + +WORKDIR /var/lib/irods-elk + +COPY startup-script.sh /var/lib/irods-elk/startup-script.sh +RUN chmod +x /var/lib/irods-elk/startup-script.sh +ENTRYPOINT ["/var/lib/irods-elk/startup-script.sh"] diff --git a/irods_audit_elk_stack/elasticsearch/elasticsearch.init b/irods_audit_elk_stack/elasticsearch/elasticsearch.init new file mode 100755 index 0000000..a6826c1 --- /dev/null +++ b/irods_audit_elk_stack/elasticsearch/elasticsearch.init @@ -0,0 +1,194 @@ +#!/bin/bash +# +# /etc/init.d/elasticsearch -- startup script for Elasticsearch +# +### BEGIN INIT INFO +# Provides: elasticsearch +# Required-Start: $network $remote_fs $named +# Required-Stop: $network $remote_fs $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Starts elasticsearch +# Description: Starts elasticsearch using start-stop-daemon +### END INIT INFO + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +NAME=elasticsearch +DESC="Elasticsearch Server" +DEFAULT=/etc/default/$NAME + +if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 1 +fi + + +. /lib/lsb/init-functions + +if [ -r /etc/default/rcS ]; then + . /etc/default/rcS +fi + + +# The following variables can be overwritten in $DEFAULT + +# How many seconds to wait for Elasticsearch to start +ES_STARTUP_TIMEOUT=75 + +# Run Elasticsearch as this user ID and group ID +ES_USER=elasticsearch +ES_GROUP=elasticsearch + +# Directory where the Elasticsearch binary distribution resides +ES_HOME=/usr/share/$NAME + +# Directory containing Java +ES_JAVA_HOME=$ES_HOME/jdk + +# Additional Java OPTS +#ES_JAVA_OPTS= + +# Maximum number of open files +MAX_OPEN_FILES=65535 + +# Maximum amount of locked memory +#MAX_LOCKED_MEMORY= + +# Elasticsearch configuration directory +ES_PATH_CONF=/etc/$NAME + +# Maximum number of VMA (Virtual Memory Areas) a process can own +MAX_MAP_COUNT=262144 + +# Elasticsearch PID file directory +PID_DIR="/var/run/elasticsearch" + +# End of variables that can be overwritten in $DEFAULT + +# overwrite settings from default file +if [ -f "$DEFAULT" ]; then + . "$DEFAULT" +fi + +# Define other required variables +PID_FILE="$PID_DIR/$NAME.pid" +DAEMON=$ES_HOME/bin/elasticsearch +DAEMON_OPTS="-d -p $PID_FILE" +DAEMON_ENV_VARS="ES_PATH_CONF=$ES_PATH_CONF" + +export ES_JAVA_OPTS +export ES_JAVA_HOME +export JAVA_HOME="$ES_JAVA_HOME" +export ES_PATH_CONF +export ES_INCLUDE +export ES_JVM_OPTIONS +export PID_DIR + +if [ -n "$ES_SD_NOTIFY" ]; then + export ES_SD_NOTIFY=false +else + export ES_SD_NOTIFY +fi + +if [ -n "$LIBFFI_TMPDIR" ]; then + export LIBFFI_TMPDIR +fi + +if [ ! -x "$DAEMON" ]; then + echo "The elasticsearch startup script does not exists or it is not executable, tried: $DAEMON" + exit 1 +fi + +case "$1" in + start) + log_daemon_msg "Starting $DESC" + + pid=`pidofproc -p $PID_FILE elasticsearch` + if [ -n "$pid" ] ; then + log_begin_msg "Already running." + log_end_msg 0 + exit 0 + fi + + # Ensure that the PID_DIR exists (it is cleaned at OS startup time) + if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then + mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR" + fi + if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then + touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE" + fi + + if [ -n "$MAX_OPEN_FILES" ]; then + ulimit -n $MAX_OPEN_FILES + fi + + if [ -n "$MAX_LOCKED_MEMORY" ]; then + ulimit -l $MAX_LOCKED_MEMORY + fi + + if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ] && [ "$MAX_MAP_COUNT" -gt $(cat /proc/sys/vm/max_map_count) ]; then + sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT + fi + + # Start Daemon + start-stop-daemon --start \ + --chdir "$ES_HOME" \ + --user "$ES_USER" --group "$ES_GROUP" -c "$ES_USER":"$ES_GROUP" \ + --pidfile "$PID_FILE" \ + --exec /usr/bin/env $DAEMON_ENV_VARS $DAEMON -- $DAEMON_OPTS + return=$? + if [ $return -eq 0 ]; then + i=0 + timeout=$ES_STARTUP_TIMEOUT + # Wait for the process to be properly started before exiting + until { kill -0 `cat "$PID_FILE"`; } >/dev/null 2>&1 + do + sleep 1 + i=$(($i + 1)) + if [ $i -gt $timeout ]; then + log_end_msg 1 + exit 1 + fi + done + fi + log_end_msg $return + exit $return + ;; + stop) + log_daemon_msg "Stopping $DESC" + + if [ -f "$PID_FILE" ]; then + start-stop-daemon --stop \ + --pidfile "$PID_FILE" \ + --user "$ES_USER" --group "$ES_GROUP" \ + --quiet \ + --retry TERM/60/KILL/5 > /dev/null + if [ $? -eq 1 ]; then + log_progress_msg "$DESC is not running but pid file exists, cleaning up" + elif [ $? -eq 3 ]; then + PID="`cat $PID_FILE`" + log_failure_msg "Failed to stop $DESC (pid $PID)" + exit 1 + fi + rm -f "$PID_FILE" + else + log_progress_msg "(not running)" + fi + log_end_msg 0 + ;; + status) + status_of_proc -p $PID_FILE elasticsearch elasticsearch && exit 0 || exit $? + ;; + restart) + if [ -f "$PID_FILE" ]; then + $0 stop + fi + $0 start + ;; + *) + log_success_msg "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/irods_audit_elk_stack/elasticsearch.yml b/irods_audit_elk_stack/elasticsearch/elasticsearch.yml similarity index 88% rename from irods_audit_elk_stack/elasticsearch.yml rename to irods_audit_elk_stack/elasticsearch/elasticsearch.yml index 1dab07c..1f387d1 100644 --- a/irods_audit_elk_stack/elasticsearch.yml +++ b/irods_audit_elk_stack/elasticsearch/elasticsearch.yml @@ -14,13 +14,13 @@ # # Use a descriptive name for your cluster: # -#cluster.name: my-application +cluster.name: irods-elk # # ------------------------------------ Node ------------------------------------ # # Use a descriptive name for the node: # -node.name: irods-elk +node.name: irods-elk-node # # Add custom attributes to the node: # @@ -53,7 +53,6 @@ path.logs: /var/log/elasticsearch # By default Elasticsearch is only accessible on localhost. Set a different # address here to expose this node on the network: # -#network.host: 192.168.0.1 network.host: 0.0.0.0 # # By default Elasticsearch listens for HTTP traffic on the first free port it @@ -61,10 +60,19 @@ network.host: 0.0.0.0 # http.port: 9200 # +# By default Elasticsearch listens for traffic from other nodes on the first +# free port it finds starting at 9300. Set a specific port here: +# +transport.port: 9300 +# # For more information, consult the network module documentation. # # --------------------------------- Discovery ---------------------------------- # +# Specify whether a multi-node cluster should be formed +# +discovery.type: single-node +# # Pass an initial list of hosts to perform discovery when this node is started: # The default list of hosts is ["127.0.0.1", "[::1]"] # @@ -72,8 +80,7 @@ http.port: 9200 # # Bootstrap the cluster using an initial set of master-eligible nodes: # -#cluster.initial_master_nodes: ["node-1", "node-2"] -cluster.initial_master_nodes: ["irods-elk"] +#cluster.initial_master_nodes: ["irods-elk-node"] # # For more information, consult the discovery and cluster formation module documentation. # @@ -89,16 +96,12 @@ cluster.initial_master_nodes: ["irods-elk"] # #action.destructive_requires_name: false # +# Enable machine learning APIs on the node: +# +xpack.ml.enabled: false +# # ---------------------------------- Security ---------------------------------- # # Enable/disable security (enabled by default since version 8.0) # xpack.security.enabled: false - - - - -http.host: 0.0.0.0 - -transport.host: localhost -transport.port: 9300 diff --git a/irods_audit_elk_stack/elasticsearch/exclude-jvm.dpkg.cfg b/irods_audit_elk_stack/elasticsearch/exclude-jvm.dpkg.cfg new file mode 100644 index 0000000..d8b8c51 --- /dev/null +++ b/irods_audit_elk_stack/elasticsearch/exclude-jvm.dpkg.cfg @@ -0,0 +1,3 @@ +# Drop Elasticsearch's bundled JVM +path-exclude=/usr/share/elasticsearch/jdk +path-exclude=/usr/share/elasticsearch/jdk/* diff --git a/irods_audit_elk_stack/elasticsearch/jvm.options.d/oom_heap_dump.options b/irods_audit_elk_stack/elasticsearch/jvm.options.d/oom_heap_dump.options new file mode 100644 index 0000000..9bfea03 --- /dev/null +++ b/irods_audit_elk_stack/elasticsearch/jvm.options.d/oom_heap_dump.options @@ -0,0 +1,2 @@ +# Do not dump heap on OOM +-XX:-HeapDumpOnOutOfMemoryError diff --git a/irods_audit_elk_stack/elk-firstrun.service b/irods_audit_elk_stack/elk-firstrun.service deleted file mode 100644 index 9830647..0000000 --- a/irods_audit_elk_stack/elk-firstrun.service +++ /dev/null @@ -1,16 +0,0 @@ -[Unit] -Description=iRODS elk stack first-run setup -After=rabbitmq-server.service -After=elasticsearch.service -After=kibana.service -Requires=rabbitmq-server.service -Requires=elasticsearch.service -Requires=kibana.service - -[Service] -Type=oneshot -ExecStart=/var/lib/irods-elk/firstrun.sh -TimeoutSec=600s - -[Install] -WantedBy=multi-user.target diff --git a/irods_audit_elk_stack/firstrun.sh b/irods_audit_elk_stack/firstrun.sh deleted file mode 100755 index 0216af2..0000000 --- a/irods_audit_elk_stack/firstrun.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/bin/bash - -if [ ! -f /var/lib/irods-elk/.firstrun_rmq_done ]; then - echo "<5>Performing rabbitmq first-run setup..." - - rabbitmqctl add_user test test - rabbitmqctl set_user_tags test administrator - rabbitmqctl set_permissions -p / test ".*" ".*" ".*" - - echo "<5>Completed rabbitmq first-run setup" - touch /var/lib/irods-elk/.firstrun_rmq_done -else - echo "<5>Skipping rabbitmq first-run setup (already done)..." -fi - -if [ ! -f /var/lib/irods-elk/.firstrun_es_done ]; then - echo "<5>Performing elasticsearch first-run setup..." - - curl -sLS http://localhost:9200 - curl -sLS -XPUT "http://localhost:9200/irods_audit" - curl -sLS -XPUT http://localhost:9200/irods_audit/_settings -H 'Content-Type: application/json' -d'{"index.mapping.total_fields.limit": 2000}' - - echo "<5>Completed elasticsearch first-run setup" - touch /var/lib/irods-elk/.firstrun_es_done -else - echo "<5>Skipping elasticsearch first-run setup (already done)..." -fi - -if [ ! -f /var/lib/irods-elk/.firstrun_kb_done ]; then - while true; do - echo "<5>Checking kibana status..." - - status_code="$(curl -sLSI -w "%{http_code}" -o /dev/null "http://localhost:5601/api/features" -H 'kbn-xsrf: true')" - curl_ret=$? - - if [[ "$curl_ret" != "0" ]]; then - echo "<4>Could not reach kibana (curl return code ${curl_ret})" - elif [[ "$status_code" != "200" ]]; then - echo "<4>Kibana is unhappy (got HTTP status ${status_code})" - else - echo "<5>Kibana seems ready" - break - fi - echo "<5>Waiting 3 seconds and trying again..." - sleep 3s - done - - echo "<5>Performing kibana first-run setup..." - - #curl -sLS -XPOST "http://localhost:5601/api/saved_objects/index-pattern/irods-audit-pattern" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "attributes": { "title": "irods_audit*", "timeFieldName": "@timestamp" } }' - curl -sLS -X POST "http://localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/var/lib/irods-elk/example_kibana_dashboard.ndjson - - echo "<5>Completed kibana first-run setup" - touch /var/lib/irods-elk/.firstrun_kb_done -else - echo "<5>Skipping kibana first-run setup (already done)..." -fi diff --git a/irods_audit_elk_stack/java-excludes.dpkg.cfg b/irods_audit_elk_stack/java-excludes.dpkg.cfg new file mode 100644 index 0000000..8017ff5 --- /dev/null +++ b/irods_audit_elk_stack/java-excludes.dpkg.cfg @@ -0,0 +1,12 @@ +# Drop includes +path-exclude=/usr/lib/jvm/*/include/* + +# Drop manpages +path-exclude=/usr/lib/jvm/*/man/* + +# Drop source zips +path-exclude=/usr/lib/jvm/*/src.zip +path-exclude=/usr/lib/jvm/*/lib/src.zip + +# Drop samples +path-exclude=/usr/lib/jvm/*/sample/ diff --git a/irods_audit_elk_stack/kibana/exclude-node-stuff.dpkg.cfg b/irods_audit_elk_stack/kibana/exclude-node-stuff.dpkg.cfg new file mode 100644 index 0000000..53cec29 --- /dev/null +++ b/irods_audit_elk_stack/kibana/exclude-node-stuff.dpkg.cfg @@ -0,0 +1,6 @@ +# Drop includes from Kibana's bundled nodejs +path-exclude=/usr/share/kibana/node/include/* + +# Drop man pages from Kibana's bundled nodejs +path-exclude=/usr/share/kibana/node/share/man/* +path-exclude=/usr/share/kibana/node/share/man diff --git a/irods_audit_elk_stack/example_kibana_dashboard.ndjson b/irods_audit_elk_stack/kibana/irods_dashboard.ndjson similarity index 100% rename from irods_audit_elk_stack/example_kibana_dashboard.ndjson rename to irods_audit_elk_stack/kibana/irods_dashboard.ndjson diff --git a/irods_audit_elk_stack/kibana/kibana.init b/irods_audit_elk_stack/kibana/kibana.init new file mode 100755 index 0000000..1966769 --- /dev/null +++ b/irods_audit_elk_stack/kibana/kibana.init @@ -0,0 +1,333 @@ +#!/bin/bash +# +# /etc/init.d/kibana -- startup script for Kibana +# +### BEGIN INIT INFO +# Provides: kibana +# Required-Start: $network $remote_fs $named elasticsearch +# Required-Stop: $network $remote_fs $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Starts kibana +# Description: Starts kibana using start-stop-daemon +### END INIT INFO + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +NAME=kibana +DESC="Kibana Server" +DEFAULT=/etc/default/$NAME +# Ignoring /etc/sysconfig/$name + +if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 1 +fi + + +. /lib/lsb/init-functions + +if [ -r /etc/default/rcS ]; then + . /etc/default/rcS +fi + + +# The following variables can be overwritten in $DEFAULT + +# How many seconds to wait for Kibana to start +KBN_STARTUP_TIMEOUT=75 + +# Run Kibana as this user ID and group ID +KBN_USER=kibana +KBN_GROUP=kibana + +# Directory where the Kibana distribution resides +KBN_HOME=/usr/share/$NAME + +# Kibana configuration directory +KBN_PATH_CONF=/etc/$NAME + +NODE_OPTIONS="--max-old-space-size=4096" + +# End of variables that can be overwritten in $DEFAULT + +# overwrite settings from default file +if [ -f "$DEFAULT" ]; then + . "$DEFAULT" +fi + +# Variables defined from configuration +# I don't think either yq implementation actually does this right so we have to get nasty + +# Kibana log file +KBN_LOG_FILE="$(yq e '.logging.appenders.file.fileName' "$KBN_PATH_CONF/kibana.yml")" +if [ "$KBN_LOG_FILE" != "null" ]; then + KBN_LOG_DIR="$(dirname "$KBN_LOG_FILE")" +fi + +# Kibana PID file +KBN_PID_FILE="$(yq e '.pid.file' "$KBN_PATH_CONF/kibana.yml")" +if [ "$KBN_PID_FILE" == "null" ]; then + KBN_PID_FILE="$(yq e '."pid.file"' "$KBN_PATH_CONF/kibana.yml")" +fi +if [ "$KBN_PID_FILE" == "null" ]; then + PID_DIR="/run/kibana" + PID_FILE="$PID_DIR/$NAME.pid" +else + PID_FILE="$KBN_PID_FILE" + PID_DIR="$(dirname "$KBN_PID_FILE")" +fi + +# Address/Host to which Kibana binds +KBN_SERVER_HOST="$(yq e '.server.host' "$KBN_PATH_CONF/kibana.yml")" +if [ "$KBN_SERVER_HOST" == "null" ]; then + KBN_SERVER_HOST="$(yq e '."server.host"' "$KBN_PATH_CONF/kibana.yml")" +fi +if [ "$KBN_SERVER_HOST" == "null" ]; then + KBN_SERVER_HOST="localhost" +fi + +# Port on which Kibana listens +KBN_SERVER_PORT="$(yq e '.server.port' "$KBN_PATH_CONF/kibana.yml")" +if [ "$KBN_SERVER_PORT" == "null" ]; then + KBN_SERVER_PORT="$(yq e '."server.port"' "$KBN_PATH_CONF/kibana.yml")" +fi +if [ "$KBN_SERVER_PORT" == "null" ]; then + KBN_SERVER_PORT=5601 +fi + +# Whether or not Kibana uses TLS +KBN_SERVER_TLS="$(yq e '.server.ssl.enabled' "$KBN_PATH_CONF/kibana.yml")" +if [ "$KBN_SERVER_TLS" == "null" ]; then + KBN_SERVER_TLS="$(yq e '."server.ssl.enabled"' "$KBN_PATH_CONF/kibana.yml")" +fi +if [ "$KBN_SERVER_TLS" == "null" ]; then + KBN_SERVER_TLS="$(yq e '.server."ssl.enabled"' "$KBN_PATH_CONF/kibana.yml")" +fi +if [ "$KBN_SERVER_TLS" == "null" ]; then + KBN_SERVER_TLS="$(yq e '."server.ssl".enabled' "$KBN_PATH_CONF/kibana.yml")" +fi +if [ "$KBN_SERVER_TLS" == "null" ]; then + KBN_SERVER_TLS="false" +fi + +# Define other required variables +DAEMON=$KBN_HOME/bin/kibana +DAEMON_OPTS="" + +export KBN_PATH_CONF +export NODE_OPTIONS + +if [ ! -x "$DAEMON" ]; then + echo "The kibana startup script does not exists or it is not executable, tried: $DAEMON" + exit 1 +fi + +if [ "$KBN_SERVER_TLS" == "true" ]; then + KBN_SERVER_PROTO="https" +else + KBN_SERVER_PROTO="http" +fi + +# returns 0 for (probably) happy kibana +# returns 150 for connection refused +# returns 151 for (probably) degraded (or still starting) kibana +# returns 152 if health check times out +# returns 153 for unhandled curl error +# returns 154 for unhandled HTTP status code +kbn_hc_max_reruns=12 +kbn_healthcheck() { + retry_code="" + rerun_qty=0 + + while [[ "$#" -gt "0" ]]; do + case $1 in + retry) shift; retry_code="$1" ;; + rerun) shift; rerun_qty="$1" ;; + *) ;; + esac + shift + done + + if [[ "$rerun_qty" -gt "$kbn_hc_max_reruns" ]]; then + return 153 + fi + + unset curl_out curl_err curl_ret status_code + eval "$( curl -sLIk -w "%{http_code}" -o /dev/null "${KBN_SERVER_PROTO}://${KBN_SERVER_HOST}:${KBN_SERVER_PORT}/api/features" -H 'kbn-xsrf: true' \ + 2> >(curl_err=$(cat); typeset -p curl_err) \ + > >(curl_out=$(cat); typeset -p curl_out); \ + curl_ret=$?; typeset -p curl_ret )" + status_code="$curl_out" + + if [[ "$curl_ret" != "0" ]]; then + case $curl_ret in + 7) kbn_hc_failure_reason="Cannot connect to Kibana"; return 150 ;; + 18) # Incomplete response, try again + kbn_hc_failure_reason="${curl_err}" + if [ "$retry_code" != "18" ]; then + kbn_healthcheck retry 18 rerun $(($rerun_qty + 1)) + return $? + fi + return 153 + ;; + 52) # Completely empty response, try again + kbn_hc_failure_reason="${curl_err}" + if [ "$retry_code" != "52" ]; then + kbn_healthcheck retry 52 rerun $(($rerun_qty + 1)) + return $? + fi + return 153 + ;; + 22) # HTTP status code indicates error + if [[ "$status_code" == "503" ]]; then + kbn_hc_failure_reason="Kibana is degraded or still starting up (HTTP status 503)" + return 151 + fi + kbn_hc_failure_reason="Kibana responded with HTTP status ${status_code}" + return 154 + ;; + 28) kbn_hc_failure_reason="Timeout"; return 152 ;; + *) kbn_hc_failure_reason="${curl_err}"; return 153 ;; + esac + elif [[ "$status_code" != "200" ]]; then + kbn_hc_failure_reason="Kibana responded with HTTP status ${status_code}" + return 154 + fi + + return 0 +} + +case "$1" in + start) + log_daemon_msg "Starting $DESC" + + pid=`pidofproc -p $PID_FILE kibana` + if [ -n "$pid" ]; then + log_begin_msg "Already running." + log_end_msg 0 + exit 0 + else + rm -f "$PID_FILE" + fi + + # Ensure that the PID_DIR exists (it is cleaned at OS startup time) + if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then + mkdir -p "$PID_DIR" && chown "$KBN_USER":"$KBN_GROUP" "$PID_DIR" + fi + + # Ensure that the KBN_LOG_DIR exists (it is cleaned at OS startup time) + if [ -n "$KBN_LOG_DIR" ] && [ ! -e "$KBN_LOG_DIR" ]; then + mkdir -p "$KBN_LOG_DIR" && chown "$KBN_USER":"$KBN_GROUP" "$KBN_LOG_DIR" + fi + + if [ "$KBN_PID_FILE" == "null" ]; then + MAKE_PIDFILE_ARG="--make-pidfile" + else + MAKE_PIDFILE_ARG="" + fi + + # Start Daemon + start-stop-daemon --start \ + --chdir "$KBN_HOME" \ + --user "$KBN_USER" --group "$KBN_GROUP" -c "$KBN_USER":"$KBN_GROUP" \ + --pidfile "$PID_FILE" $MAKE_PIDFILE_ARG \ + --background \ + --exec $DAEMON -- $DAEMON_OPTS + return=$? + + if [ $return -eq 0 ]; then + # Wait for the process to be properly started before exiting + i=0 + timeout=$KBN_STARTUP_TIMEOUT + + # Wait for kibana to create its pidfile + if [ "$KBN_PID_FILE" != "null" ]; then + until [ -e "$PID_FILE" ]; do + sleep 1 + i=$(($i + 1)) + if [ $i -gt $timeout ]; then + log_failure_msg "Failed to start $DESC (timeout waiting for pidfile)" + log_end_msg 1 + exit 1 + fi + done + fi + + # Wait for kibana to respond properly to requests + while true; do + kbn_healthcheck + hc_ret=$? + if [[ "$hc_ret" == "0" ]]; then + break + fi + i=$(($i + 1)) + if [ $i -gt $timeout ]; then + log_failure_msg "$DESC appears to be running, but healthcheck failed: $kbn_hc_failure_reason" + log_end_msg $hc_ret + exit $hc_ret + fi + sleep 1 + done + fi + + log_end_msg $return + exit $return + ;; + + stop) + log_daemon_msg "Stopping $DESC" + + if [ -f "$PID_FILE" ]; then + start-stop-daemon --stop \ + --pidfile "$PID_FILE" \ + --user "$KBN_USER" --group "$KBN_GROUP" \ + --quiet \ + --retry TERM/60/KILL/5 >/dev/null + if [ $? -eq 1 ]; then + log_progress_msg "$DESC is not running but pid file exists, cleaning up" + elif [ $? -eq 3 ]; then + PID="`cat $PID_FILE`" + log_failure_msg "Failed to stop $DESC (pid $PID)" + exit 1 + fi + rm -f "$PID_FILE" + else + log_progress_msg "(not running)" + fi + log_end_msg 0 + ;; + + status) + pidofproc -p $PID_FILE kibana + status=$? + if [ "$status" == "4" ]; then + log_failure_msg "could not access PID file for $NAME" + exit $status + elif [ "$status" != "0" ]; then + log_failure_msg "$NAME is not running" + exit $status + fi + kbn_healthcheck + hc_ret=$? + if [[ "$hc_ret" != "0" ]]; then + log_failure_msg "$NAME is running, but healthcheck failed: $kbn_hc_failure_reason" + exit $hc_ret + fi + log_success_msg "$NAME is running" + exit 0 + ;; + + restart) + if [ -f "$PID_FILE" ]; then + $0 stop + fi + $0 start + ;; + + *) + log_success_msg "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/irods_audit_elk_stack/kibana/kibana.yml b/irods_audit_elk_stack/kibana/kibana.yml new file mode 100644 index 0000000..888b1be --- /dev/null +++ b/irods_audit_elk_stack/kibana/kibana.yml @@ -0,0 +1,174 @@ +# For more configuration options see the configuration guide for Kibana in +# https://www.elastic.co/guide/index.html + +# =================== System: Kibana Server =================== +# Kibana is served by a back end server. This setting specifies the port to use. +#server.port: 5601 + +# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values. +# The default is 'localhost', which usually means remote machines will not be able to connect. +# To allow connections from remote users, set this parameter to a non-loopback address. +server.host: "0.0.0.0" + +# Enables you to specify a path to mount Kibana at if you are running behind a proxy. +# Use the `server.rewriteBasePath` setting to tell Kibana if it should remove the basePath +# from requests it receives, and to prevent a deprecation warning at startup. +# This setting cannot end in a slash. +#server.basePath: "" + +# Specifies whether Kibana should rewrite requests that are prefixed with +# `server.basePath` or require that they are rewritten by your reverse proxy. +# Defaults to `false`. +#server.rewriteBasePath: false + +# Specifies the public URL at which Kibana is available for end users. If +# `server.basePath` is configured this URL should end with the same basePath. +#server.publicBaseUrl: "" + +# The maximum payload size in bytes for incoming server requests. +#server.maxPayload: 1048576 + +# The Kibana server's name. This is used for display purposes. +#server.name: "your-hostname" + +# =================== System: Kibana Server (Optional) =================== +# Enables SSL and paths to the PEM-format SSL certificate and SSL key files, respectively. +# These settings enable SSL for outgoing requests from the Kibana server to the browser. +#server.ssl.enabled: false +#server.ssl.certificate: /path/to/your/server.crt +#server.ssl.key: /path/to/your/server.key + +# =================== System: Elasticsearch =================== +# The URLs of the Elasticsearch instances to use for all your queries. +#elasticsearch.hosts: ["http://localhost:9200"] + +# If your Elasticsearch is protected with basic authentication, these settings provide +# the username and password that the Kibana server uses to perform maintenance on the Kibana +# index at startup. Your Kibana users still need to authenticate with Elasticsearch, which +# is proxied through the Kibana server. +#elasticsearch.username: "kibana_system" +#elasticsearch.password: "pass" + +# Kibana can also authenticate to Elasticsearch via "service account tokens". +# Service account tokens are Bearer style tokens that replace the traditional username/password based configuration. +# Use this token instead of a username/password. +# elasticsearch.serviceAccountToken: "my_token" + +# Time in milliseconds to wait for Elasticsearch to respond to pings. Defaults to the value of +# the elasticsearch.requestTimeout setting. +#elasticsearch.pingTimeout: 1500 + +# Time in milliseconds to wait for responses from the back end or Elasticsearch. This value +# must be a positive integer. +#elasticsearch.requestTimeout: 30000 + +# The maximum number of sockets that can be used for communications with elasticsearch. +# Defaults to `Infinity`. +#elasticsearch.maxSockets: 1024 + +# Specifies whether Kibana should use compression for communications with elasticsearch +# Defaults to `false`. +#elasticsearch.compression: false + +# List of Kibana client-side headers to send to Elasticsearch. To send *no* client-side +# headers, set this value to [] (an empty list). +#elasticsearch.requestHeadersWhitelist: [ authorization ] + +# Header names and values that are sent to Elasticsearch. Any custom headers cannot be overwritten +# by client-side headers, regardless of the elasticsearch.requestHeadersWhitelist configuration. +#elasticsearch.customHeaders: {} + +# Time in milliseconds for Elasticsearch to wait for responses from shards. Set to 0 to disable. +#elasticsearch.shardTimeout: 30000 + +# =================== System: Elasticsearch (Optional) =================== +# These files are used to verify the identity of Kibana to Elasticsearch and are required when +# xpack.security.http.ssl.client_authentication in Elasticsearch is set to required. +#elasticsearch.ssl.certificate: /path/to/your/client.crt +#elasticsearch.ssl.key: /path/to/your/client.key + +# Enables you to specify a path to the PEM file for the certificate +# authority for your Elasticsearch instance. +#elasticsearch.ssl.certificateAuthorities: [ "/path/to/your/CA.pem" ] + +# To disregard the validity of SSL certificates, change this setting's value to 'none'. +#elasticsearch.ssl.verificationMode: full + +# =================== System: Logging =================== +# Set the value of this setting to off to suppress all logging output, or to debug to log everything. Defaults to 'info' +#logging.root.level: debug + +# Enables you to specify a file where Kibana stores log output. +logging: + appenders: + file: + type: file + fileName: /var/log/kibana/kibana.log + layout: + type: json + root: + appenders: + - default + - file +# layout: +# type: json + +# Logs queries sent to Elasticsearch. +#logging.loggers: +# - name: elasticsearch.query +# level: debug + +# Logs http responses. +#logging.loggers: +# - name: http.server.response +# level: debug + +# Logs system usage information. +#logging.loggers: +# - name: metrics.ops +# level: debug + +# =================== System: Other =================== +# The path where Kibana stores persistent data not saved in Elasticsearch. Defaults to data +#path.data: data + +# Specifies the path where Kibana creates the process ID file. +pid.file: /run/kibana/kibana.pid + +# Set the interval in milliseconds to sample system and process performance +# metrics. Minimum is 100ms. Defaults to 5000ms. +#ops.interval: 5000 + +# Specifies locale to be used for all localizable strings, dates and number formats. +# Supported languages are the following: English (default) "en", Chinese "zh-CN", Japanese "ja-JP", French "fr-FR". +#i18n.locale: "en" + +# =================== Frequently used (Optional)=================== + +# =================== Saved Objects: Migrations =================== +# Saved object migrations run at startup. If you run into migration-related issues, you might need to adjust these settings. + +# The number of documents migrated at a time. +# If Kibana can't start up or upgrade due to an Elasticsearch `circuit_breaking_exception`, +# use a smaller batchSize value to reduce the memory pressure. Defaults to 1000 objects per batch. +#migrations.batchSize: 1000 + +# The maximum payload size for indexing batches of upgraded saved objects. +# To avoid migrations failing due to a 413 Request Entity Too Large response from Elasticsearch. +# This value should be lower than or equal to your Elasticsearch cluster’s `http.max_content_length` +# configuration option. Default: 100mb +#migrations.maxBatchSizeBytes: 100mb + +# The number of times to retry temporary migration failures. Increase the setting +# if migrations fail frequently with a message such as `Unable to complete the [...] step after +# 15 attempts, terminating`. Defaults to 15 +#migrations.retryAttempts: 15 + +# =================== Search Autocomplete =================== +# Time in milliseconds to wait for autocomplete suggestions from Elasticsearch. +# This value must be a whole number greater than zero. Defaults to 1000ms +#unifiedSearch.autocomplete.valueSuggestions.timeout: 1000 + +# Maximum number of documents loaded by each shard to generate autocomplete suggestions. +# This value must be a whole number greater than zero. Defaults to 100_000 +#unifiedSearch.autocomplete.valueSuggestions.terminateAfter: 100000 diff --git a/irods_audit_elk_stack/not-logstash.service b/irods_audit_elk_stack/not-logstash.service deleted file mode 100644 index 972e820..0000000 --- a/irods_audit_elk_stack/not-logstash.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=fake logstash -After=rabbitmq-server.service -After=elasticsearch.service -Requires=rabbitmq-server.service -Requires=elasticsearch.service - -[Service] -Type=simple -User=elasticsearch -Group=elasticsearch -ExecStart=/var/lib/irods-elk/not-logstash.py -Restart=always -WorkingDirectory=/var/lib/irods-elk -LimitNOFILE=16384 - -[Install] -WantedBy=multi-user.target diff --git a/irods_audit_elk_stack/not-logstash/not-logstash.init b/irods_audit_elk_stack/not-logstash/not-logstash.init new file mode 100755 index 0000000..e97c177 --- /dev/null +++ b/irods_audit_elk_stack/not-logstash/not-logstash.init @@ -0,0 +1,129 @@ +#!/bin/bash +# +# /etc/init.d/not-logstash -- startup script for fake Logstash +# +### BEGIN INIT INFO +# Provides: not-logstash +# Required-Start: $network $remote_fs $named rabbitmq-server elasticsearch +# Required-Stop: $network $remote_fs $named +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Starts not-logstash +# Description: Starts not-logstash using start-stop-daemon +### END INIT INFO + +PATH=/bin:/usr/bin:/sbin:/usr/sbin +NAME=not-logstash +DESC="Fake Logstash" +DEFAULT=/etc/default/$NAME + +if [ `id -u` -ne 0 ]; then + echo "You need root privileges to run this script" + exit 1 +fi + + +. /lib/lsb/init-functions + +if [ -r /etc/default/rcS ]; then + . /etc/default/rcS +fi + + +# The following variables can be overwritten in $DEFAULT + +# How many seconds to wait for not-logstash to start +LS_STARTUP_TIMEOUT=75 + +# Run not-logstash as this user ID and group ID +LS_USER=elasticsearch +LS_GROUP=elasticsearch + +# Directory where the not-logstash binary distribution resides +LS_HOME=/var/lib/irods-elk + +# not-logstash PID file directory +PID_DIR="/var/run/$NAME" + +# End of variables that can be overwritten in $DEFAULT + +# overwrite settings from default file +if [ -f "$DEFAULT" ]; then + . "$DEFAULT" +fi + +# Define other required variables +PID_FILE="$PID_DIR/$NAME.pid" +DAEMON=$LS_HOME/bin/not-logstash +DAEMON_OPTS="" + +if [ ! -x "$DAEMON" ]; then + echo "The not-logstash script does not exist or it is not executable, tried: $DAEMON" + exit 1 +fi + +case "$1" in + start) + log_daemon_msg "Starting $DESC" + + pid=`pidofproc -p $PID_FILE not-logstash` + if [ -n "$pid" ] ; then + log_begin_msg "Already running." + log_end_msg 0 + exit 0 + fi + + # Ensure that the PID_DIR exists (it is cleaned at OS startup time) + if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then + mkdir -p "$PID_DIR" && chown "$LS_USER":"$LS_GROUP" "$PID_DIR" + fi + + # Start Daemon + start-stop-daemon --start \ + --chdir "$LS_HOME" \ + --user "$LS_USER" --group "$LS_GROUP" -c "$LS_USER":"$LS_GROUP" \ + --pidfile "$PID_FILE" --make-pidfile \ + --background \ + --exec /usr/bin/env $DAEMON_ENV_VARS $DAEMON -- $DAEMON_OPTS + return=$? + log_end_msg $return + exit $return + ;; + stop) + log_daemon_msg "Stopping $DESC" + + if [ -f "$PID_FILE" ]; then + start-stop-daemon --stop \ + --pidfile "$PID_FILE" \ + --user "$LS_USER" --group "$LS_GROUP" \ + --quiet \ + --retry TERM/60/KILL/5 > /dev/null + if [ $? -eq 1 ]; then + log_progress_msg "$DESC is not running but pid file exists, cleaning up" + elif [ $? -eq 3 ]; then + PID="`cat $PID_FILE`" + log_failure_msg "Failed to stop $DESC (pid $PID)" + exit 1 + fi + rm -f "$PID_FILE" + else + log_progress_msg "(not running)" + fi + log_end_msg 0 + ;; + status) + status_of_proc -p $PID_FILE not-logstash not-logstash && exit 0 || exit $? + ;; + restart) + if [ -f "$PID_FILE" ]; then + $0 stop + fi + $0 start + ;; + *) + log_success_msg "Usage: $0 {start|stop|restart|status}" + exit 1 + ;; +esac + +exit 0 diff --git a/irods_audit_elk_stack/not-logstash.py b/irods_audit_elk_stack/not-logstash/not-logstash.py similarity index 100% rename from irods_audit_elk_stack/not-logstash.py rename to irods_audit_elk_stack/not-logstash/not-logstash.py diff --git a/irods_audit_elk_stack/startup-script.sh b/irods_audit_elk_stack/startup-script.sh index 16a0cff..4062c0b 100755 --- a/irods_audit_elk_stack/startup-script.sh +++ b/irods_audit_elk_stack/startup-script.sh @@ -1,5 +1,69 @@ #!/bin/bash +es_java_heap_size="512m" + +usage() { +cat << EOF +Options: + -m, --es-java-heap-size= Elasticsearch Java heap size (default: '${es_java_heap_size}') + '[g|G|m|M|k|K]': Run Elasticsearch with the given heap size + 'auto': Let Elasticsearch/Java decide on a heap size + --help Print usage +EOF +} + +die_usage() { + printf '%s\n' "$1" >&2 + usage + exit 64 +} + +while [[ "$#" -gt "0" ]]; do + case $1 in + -m|--es-java-heap-size) + if [ -z "$2" ]; then + die_usage 'ERROR: "'$1'" requires a non-empty option argument.' + fi + es_java_heap_size="$2" + shift + ;; + --es-java-heap-size=?*) + es_java_heap_size="${1#*=}" # Delete everything up to "=" + ;; + --es-java-heap-size=) + die_usage 'ERROR: "--es-java-heap-size" requires a non-empty option argument.' + ;; + --help) + usage + exit 0 + ;; + *) + printf 'WARN: Unknown option (ignored): %s\n' "$1" >&2 + ;; + esac + shift +done + +es_java_heap_size_option_file="/etc/elasticsearch/jvm.options.d/heap_size.options" + +# Set Elasticsearch Java heap size +if [[ "$es_java_heap_size" == "auto" ]]; then + # Let Elasticsearch/Java handle it + rm -f "${es_java_heap_size_option_file}" +else + echo "-Xms${es_java_heap_size}" > "${es_java_heap_size_option_file}" + echo "-Xmx${es_java_heap_size}" >> "${es_java_heap_size_option_file}" + chown root:elasticsearch /etc/elasticsearch/jvm.options.d/heap_size.options +fi + +# Start services +/etc/init.d/rabbitmq-server start +/etc/init.d/elasticsearch start +/etc/init.d/not-logstash start +/etc/init.d/kibana start + +# Print IP addresses ip addr -exec /sbin/init +# keep alive +exec tail -f /dev/null