diff --git a/.github/workflows/build-mid.yml b/.github/workflows/build-mid.yml new file mode 100644 index 0000000..8659052 --- /dev/null +++ b/.github/workflows/build-mid.yml @@ -0,0 +1,41 @@ +# via https://docs.github.com/en/actions/publishing-packages/publishing-docker-images + +name: Publish MID server image +on: workflow_dispatch + +env: + REGISTRY: ghcr.io + IMAGE_NAME: lightstep/sn-collector/mid-experimental + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Log in to the Container registry + uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + - name: Build and push Docker image + id: push + uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + with: + context: mid + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} \ No newline at end of file diff --git a/docs/monitor-kubernetes.md b/docs/monitor-kubernetes.md index 4bfb635..d45f13c 100644 --- a/docs/monitor-kubernetes.md +++ b/docs/monitor-kubernetes.md @@ -22,12 +22,13 @@ To monitor the cluster, make sure you have the following before proceeding: * ability to pull from the public Docker image repository `ghcr.io/lightstep/sn-collector` * `ClusterRole` -#### 1. Add OpenTelemetry helm repository +#### 1. Add OpenTelemetry and ServiceNow helm repository We use the OpenTelemetry Helm charts to configure collectors for Kubernetes monitoring. Helm charts make it easy to deploy and configure Kubernetes manifests. ```sh helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +helm repo add servicenow https://install.service-now.com/glide/distribution/builds/package/informer/informer-helm/repo helm repo update ``` @@ -57,6 +58,12 @@ kubectl create configmap servicenow-events-url \ -n servicenow --from-literal=url=$SERVICENOW_EVENTS_URL ``` +(__Optional__) Set username and password for CNO with a user that has the `discovery_admin` role, replacing INSTANCE_NAME with your instance name. +```sh +kubectl create secret generic k8s-informer-cred-INSTANCE_NAME -n servicenow \ + --from-literal=.user=USERNAME --from-literal=.password=PASSWORD +``` + (__Optional__) Set username for Event Manangement: ```sh export SERVICENOW_EVENTS_USERNAME='your-mid-username' @@ -71,7 +78,7 @@ kubectl create secret generic servicenow-events-password \ -n servicenow --from-literal="password=$SERVICENOW_EVENTS_PASSWORD" ``` -#### 4. Deploy ServiceNow Collector for Cluster Monitoring +#### 4. Deploy ServiceNow Collector for Cluster Monitoring and CNO for Visibility You're now ready to deploy a collector to your cluster to collect cluster-level metrics and events. To preview the generated manifest before deploying, add the `--dry-run` option to the below command: @@ -81,6 +88,14 @@ helm upgrade otel-collector-cluster open-telemetry/opentelemetry-collector \ --values https://raw.githubusercontent.com/lightstep/sn-collector/main/collector/config-k8s/values-cluster.yaml ``` +Next, install CNO for visibility. Additional install instructions for CNO are on the ServiceNow documentation [portal](https://docs.servicenow.com/bundle/washingtondc-it-operations-management/page/product/cloud-native-operations-visibility/task/cnov-deploy-install.html). By sending `Y` you accept the terms and conditions of ServiceNow CNO. + +```sh +helm upgrade k8s-informer servicenow/k8s-informer-chart \ + --set acceptEula=Y --set instance.name=INSTANCE_NAME --set clusterName="CLUSTER_NAME" \ + --install --namespace servicenow +``` + The pod will deploy after a few seconds, to check status and for errors, run: ```sh diff --git a/mid/Dockerfile b/mid/Dockerfile new file mode 100644 index 0000000..6fc537a --- /dev/null +++ b/mid/Dockerfile @@ -0,0 +1,114 @@ +# ################ +# 1st Stage: Use openjdk 8 to verify signature w/ jarsigner +# https://developers.redhat.com/articles/2022/09/16/updating-docker-hubs-openjdk-image#openjdk_and_java_se_updates +# ################ +FROM eclipse-temurin:8-jdk-alpine AS download_verification + +RUN apk -q update && \ + apk add bash && \ + apk add -q wget && \ + rm -rf /tmp/* + +ARG MID_INSTALLATION_URL=https://install.service-now.com/glide/distribution/builds/package/app-signed/mid/2024/03/06/mid.vancouver-07-06-2023__patch7-hotfix1-03-05-2024_03-06-2024_1225.linux.x86-64.zip +ARG MID_INSTALLATION_FILE +ARG MID_SIGNATURE_VERIFICATION="TRUE" + +WORKDIR /opt/snc_mid_server/ + +COPY asset/* /opt/snc_mid_server/ + +# download.sh and validate_signature.sh +RUN chmod 6750 /opt/snc_mid_server/*.sh + +RUN echo "Check MID installer URL: ${MID_INSTALLATION_URL} or Local installer: ${MID_INSTALLATION_FILE}" + +# Download the installation ZIP file or using the local one +RUN if [ -z "$MID_INSTALLATION_FILE" ] ; \ + then /opt/snc_mid_server/download.sh $MID_INSTALLATION_URL ; \ + else echo "Use local file: $MID_INSTALLATION_FILE" && ls -alF /opt/snc_mid_server/ && mv /opt/snc_mid_server/$MID_INSTALLATION_FILE /tmp/mid.zip ; fi + +# Verify mid.zip signature +RUN if [ "$MID_SIGNATURE_VERIFICATION" = "TRUE" ] || [ "$MID_SIGNATURE_VERIFICATION" = "true" ] ; \ + then echo "Verify the signature of the installation file" && /opt/snc_mid_server/validate_signature.sh /tmp/mid.zip; \ + else echo "Skip signature validation of the installation file "; fi + +RUN unzip -d /opt/snc_mid_server/ /tmp/mid.zip && rm -f /tmp/mid.zip + +# ################ +# Final Stage (using the downloaded ZIP file from previous stage) +# ################ +FROM almalinux:9.1 + +RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm + +RUN dnf update -y && \ + dnf install -y --allowerasing bind-utils \ + xmlstarlet \ + curl \ + procps \ + net-tools \ + iputils &&\ + dnf clean packages -y && \ + rm -rf /tmp/* + +# ########################## +# Build argument definition +# ########################## + + +ARG MID_USERNAME=mid + +ARG GROUP_ID=1001 + +ARG USER_ID=1001 + + +# ############################ +# Runtime Env Var Definition +# ############################ + +# Ensure UTF-8 Encoding +ENV LANG en_US.UTF-8 + +# Mandatory Env Var +ENV MID_INSTANCE_URL "" \ + MID_INSTANCE_USERNAME "" \ + MID_INSTANCE_PASSWORD "" \ + MID_SERVER_NAME "" \ +# Optional Env Var + MID_PROXY_HOST "" \ + MID_PROXY_PORT "" \ + MID_PROXY_USERNAME "" \ + MID_PROXY_PASSWORD "" \ + MID_SECRETS_FILE "" \ + MID_MUTUAL_AUTH_PEM_FILE "" \ + MID_SSL_BOOTSTRAP_CERT_REVOCATION_CHECK "" \ + MID_SSL_USE_INSTANCE_SECURITY_POLICY "" + + +RUN if [[ -z "${GROUP_ID}" ]]; then GROUP_ID=1001; fi && \ + if [[ -z "${USER_ID}" ]]; then USER_ID=1001; fi && \ + echo "Add GROUP id: ${GROUP_ID}, USER id: ${USER_ID} for username: ${MID_USERNAME}" + + +RUN groupadd -g $GROUP_ID $MID_USERNAME && \ + useradd -c "MID container user" -r -m -u $USER_ID -g $MID_USERNAME $MID_USERNAME + +# only copy needed scripts and .container +COPY asset/init asset/.container asset/check_health.sh asset/post_start.sh asset/pre_stop.sh /opt/snc_mid_server/ + +# 6:setuid + setgid, 750: a:rwx, g:rx, o: +RUN chmod 6750 /opt/snc_mid_server/* && chown -R $MID_USERNAME:$MID_USERNAME /opt/snc_mid_server/ + +# Copy agent/ from download_verification +COPY --chown=$MID_USERNAME:$MID_USERNAME --from=download_verification /opt/snc_mid_server/agent/ /opt/snc_mid_server/agent/ + +# Check if the wrapper PID file exists and a HeartBeat is processed in the last 30 minutes +HEALTHCHECK --interval=5m --start-period=3m --retries=3 --timeout=15s \ + CMD bash check_health.sh || exit 1 + +WORKDIR /opt/snc_mid_server/ + +USER $MID_USERNAME + +ENTRYPOINT ["/opt/snc_mid_server/init", "start"] diff --git a/mid/asset/.container b/mid/asset/.container new file mode 100644 index 0000000..a7558a6 --- /dev/null +++ b/mid/asset/.container @@ -0,0 +1,5 @@ +# MID Server Container, ServiceNow Inc. +StartAsProcess=true +ContainerDeploymentName= +DeploymentMidId= +ContainerDeploymentNamespace= diff --git a/mid/asset/check_health.sh b/mid/asset/check_health.sh new file mode 100644 index 0000000..f14b9ca --- /dev/null +++ b/mid/asset/check_health.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# + +if [ ! -f agent/work/mid.pid ] +then + echo "agent/work/mid.pid doesn't exist" + exit 1 +fi + +if [ ! -f agent/.healthcheck ] +then + echo "agent/.healthcheck doesn't exist" + exit 1 +fi + +# check if currentTime - lastModifiedTime of .healthcheck is >= 30 min (1800 sec) \ +currentTime=`date '+%s'` +lastModifiedTime=`date -r agent/.healthcheck '+%s'` + +if [ $(($currentTime-$lastModifiedTime)) -gt 1800 ] +then + exit 1 +fi + +exit 0 \ No newline at end of file diff --git a/mid/asset/download.sh b/mid/asset/download.sh new file mode 100755 index 0000000..82f628e --- /dev/null +++ b/mid/asset/download.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Sample variable +# mid_installation_url = + +set -e + +mid_installation_url=$1 + +if [[ ! -z "$mid_installation_url" ]] +then + echo "Downloading $mid_installation_url" + wget $mid_installation_url -O /tmp/mid.zip +else + echo "ERROR: Downloading [$mid_installation_url] failed!" + exit 1 +fi diff --git a/mid/asset/init b/mid/asset/init new file mode 100644 index 0000000..69d208b --- /dev/null +++ b/mid/asset/init @@ -0,0 +1,450 @@ +#!/bin/bash +set -e + +MID_HOME="/opt/snc_mid_server/agent" +CONF_FILE="${MID_HOME}/config.xml" +WRAPPER_CONF_FILE="${MID_HOME}/conf/wrapper-override.conf" + +# Mandatory ENV +MID_INSTANCE_URL=${MID_INSTANCE_URL:-} +MID_INSTANCE_USERNAME=${MID_INSTANCE_USERNAME:-} +MID_INSTANCE_PASSWORD=${MID_INSTANCE_PASSWORD:-} +MID_SERVER_NAME=${MID_SERVER_NAME:-} + +# Secret +MID_SECRETS_FILE=${MID_SECRETS_FILE:-} + +# Container Deploymane Name +MID_CONTAINER_DEPLOYMENT_NAME=${MID_CONTAINER_DEPLOYMENT_NAME:-} + +# Container Deploymane Namespace +MID_CONTAINER_DEPLOYMENT_NAMESPACE=${MID_CONTAINER_DEPLOYMENT_NAMESPACE:-} + +# Deployment Mid SysId +DEPLOYMENT_MID_ID=${DEPLOYMENT_MID_ID:-} + +# Operational ENV +MID_PROXY_HOST=${MID_PROXY_HOST:-} +MID_PROXY_PORT=${MID_PROXY_PORT:-} +MID_PROXY_USERNAME=${MID_PROXY_USERNAME:-} +MID_PROXY_PASSWORD=${MID_PROXY_PASSWORD:-} +MID_MUTUAL_AUTH_PEM_FILE=${MID_MUTUAL_AUTH_PEM_FILE:-} + +MID_SSL_BOOTSTRAP_CERT_REVOCATION_CHECK=${MID_SSL_BOOTSTRAP_CERT_REVOCATION_CHECK:-} +MID_SSL_USE_INSTANCE_SECURITY_POLICY=${MID_SSL_USE_INSTANCE_SECURITY_POLICY:-} + +EXIT_CODE_NORMAL_SHUTDOWN=0 +EXIT_CODE_DOWNLOAD_FAILURE=1 +EXIT_CODE_MISSING_CONFIG_PARAM=2 +EXIT_CODE_MUTUAL_AUTH_SETUP_FAILURE=3 + +# Flag to indicate if mutual auth is used +IS_MUTUAL_AUTH=0 + +# File to track setup completion +MID_INITIALIZED_FILE="${MID_HOME}/.initialized" + +# Set up log file to the persistent volume if available +MID_CONTAINER_DIR="/opt/snc_mid_server/mid_container" +LOG_FILE="/opt/snc_mid_server/mid-container.log" +if [[ -d $MID_CONTAINER_DIR ]] +then + LOG_FILE="${MID_CONTAINER_DIR}/mid-container.log"; +fi + +logInfo () { + msg="$(date '+%Y-%m-%dT%T.%3N') ${1}" + echo "$msg" | tee -a ${LOG_FILE} +} + +generateConfigXml () { + logInfo "DOCKER: setting up config.xml" + + # For Rome compatibility we add the following parameters as defaults. + # Mandatory config + replaceConfigParameter 1 url ${MID_INSTANCE_URL} + replaceConfigParameter 1 name ${MID_SERVER_NAME} + + # Optional config + # Proxy related + # These parameters require a value because they are bootstrapped + replaceConfigParameter 1 mid.proxy.host ${MID_PROXY_HOST} + replaceConfigParameter 1 mid.proxy.port ${MID_PROXY_PORT} + replaceConfigParameter 1 mid.proxy.username ${MID_PROXY_USERNAME} + replaceConfigParameter 1 mid.proxy.password ${MID_PROXY_PASSWORD} + + # Cert revocation check and whether to use instance security policy + # These parameters require a value because they are bootstrapped + # mid.ssl.bootstrap.default.check_cert_revocation: default val is False + # mid.ssl.use.instance.security.policy: default val is False + replaceConfigParameter 1 mid.ssl.bootstrap.default.check_cert_revocation ${MID_SSL_BOOTSTRAP_CERT_REVOCATION_CHECK} + replaceConfigParameter 1 mid.ssl.use.instance.security.policy ${MID_SSL_USE_INSTANCE_SECURITY_POLICY} + + # Handle generic config parameters from MID server profile + envConfigRegex="MID_CONFIG_(.*)=(.*)" + declare -A envVars + getEnvVars $envConfigRegex $envVars + + for param in "${!envVars[@]}" + do + # not sure why the associative array contains an element with the key "0". Remove it for now. + # I'll try to figure it out and clean this up. + if [[ $param == "0" ]] + then + continue; + fi + if [[ -n $param ]] + then + logInfo "DOCKER: updating profile parameter: $param" + replaceConfigParameter 0 $param ${envVars[$param]} + fi + done + + # Fill in $MID_CONTAINER_DEPLOYMENT_NAME in .container + sed -i "s/ContainerDeploymentName=.*/ContainerDeploymentName=${MID_CONTAINER_DEPLOYMENT_NAME}/g" .container + + # Fill in $MID_CONTAINER_DEPLOYMENT_NAMESPACE in .container + sed -i "s/ContainerDeploymentNamespace=.*/ContainerDeploymentNamespace=${MID_CONTAINER_DEPLOYMENT_NAMESPACE}/g" .container + + # Fill in $DEPLOYMENT_MID_ID in .container + sed -i "s/DeploymentMidId=.*/DeploymentMidId=${DEPLOYMENT_MID_ID}/g" .container + + + # Handle secrets file. Secrets file has priority over default and generic prarams + if [[ ! -z "$MID_SECRETS_FILE" && -f "$MID_SECRETS_FILE" ]] + then + logInfo "DOCKER: processing secrets file" + propRegex="^(.*)=(.*)$" + while read line + do + if [[ $line =~ $propRegex ]] + then + name=${BASH_REMATCH[1]} + value=${BASH_REMATCH[2]} + replaceConfigParameter 0 $name $value + fi + done < $MID_SECRETS_FILE + fi + + if [[ ! -z "$MID_MUTUAL_AUTH_PEM_FILE" && -f "$MID_MUTUAL_AUTH_PEM_FILE" ]] + then + # If Cert (PEM) file is set and exists, proceed with mutual auth + logInfo "DOCKER: mutual auth cert file found: $MID_MUTUAL_AUTH_PEM_FILE" + + # Ensure log file exists for logger in java classes + touch /home/mid/java0.log + + cd /opt/snc_mid_server/agent && \ + sh bin/scripts/manage-certificates.sh -a "DefaultSecurityKeyPairHandle" $MID_MUTUAL_AUTH_PEM_FILE + + if [[ $? != 0 ]] + then + logInfo "DOCKER: Error adding certificate. Please ensure PEM file is valid. Abandoning setup." + exit $EXIT_CODE_MUTUAL_AUTH_SETUP_FAILURE + fi + + cd /opt/snc_mid_server/agent && \ + sh bin/scripts/manage-certificates.sh -m + + if [[ $? != 0 ]] + then + logInfo "DOCKER: Error enabling mutual auth. Please ensure config.xml can be modified. Abandoning setup." + exit $EXIT_CODE_MUTUAL_AUTH_SETUP_FAILURE + fi + + IS_MUTUAL_AUTH=1 + logInfo "DOCKER: mutual auth enabled on MID" + else + # mutual auth is not set, proceed with basic authentication + logInfo "DOCKER: mutual auth cert file not found: $MID_MUTUAL_AUTH_PEM_FILE" + replaceConfigParameter 1 mid.instance.username ${MID_INSTANCE_USERNAME} + replaceConfigParameter 1 mid.instance.password ${MID_INSTANCE_PASSWORD} + fi + + # Verify that mandatory parameters have been added + validateMandatoryParameters + + logInfo "DOCKER: Update configuration DONE" +} + +updateWrapperConfFromEnvVars() { + + logInfo "DOCKER: setting up wrapper-override.conf" + + envVarWrapperConfRegex="^MID_WRAPPER_(.*)=(.*)$" + wrapperConfRegex="^#?(.*)=(.*)$" + + declare -A envVars + getEnvVars $envVarWrapperConfRegex $envVars + + updatedFileContent="" + + # Iterate each line of wrapper file, concat updated lines to memory, then write final string back to file + while read line + do + # If file line is a property attempt to update it with env variable value + if [[ $line =~ $wrapperConfRegex ]] + then + name=${BASH_REMATCH[1]} + value=${BASH_REMATCH[2]} + newValue=${envVars[$name]} + newLine="" + + # If property exists in env vars then update with new value + if [[ -n $newValue ]] + then + newLine="${name}=${newValue}" + # remove so envvars contains only variables that still need to be written to file" + unset -v 'envVars['$name']' + else + newLine=$line + fi + updatedFileContent="${updatedFileContent}$newLine\n" + + # If line isn't a property just write it back to file without modification + else + updatedFileContent="${updatedFileContent}$line\n" + fi + done < ${WRAPPER_CONF_FILE} + + # envVars may contain additional variables whose name isn't already contained in wrapper-override.conf + # For example, java.wrapper.additional.1 is a name found in wrapper-override.conf and would have + # already been updated in the previous loop. + # java.wrapper.additional.10 is not found in wrapper-override.conf and so would be added here. + for prop in "${!envVars[@]}" + do + if [[ $prop == "0" ]] + then + continue + fi + if [[ -n $prop ]] + then + newLine="${prop}=${envVars[$prop]}" + updatedFileContent="${updatedFileContent}$newLine\n" + fi + done + + # Write back to file + printf "$updatedFileContent" > ${WRAPPER_CONF_FILE} +} + +getEnvVars() { + regex=$1 + envVars=$2 + + for var in $(printenv) + do + if [[ $var =~ $regex ]] + then + name=${BASH_REMATCH[1]} + value=${BASH_REMATCH[2]} + # Bash environment variable names can only contain alpha-numeric characeters and the underscore + # and as such, the period (.) is an offending character and has been remapped to + # two consecutive underscores (__). + # example: mid.log.level is stored as mid__log__level + # The following line restores the original name + name=${name//"__"/"."} + envVars[$name]=$value + fi + done +} + +replaceConfigParameter() { + requiresValue=$1 + name=$2 + val=$3 + + # process the mid server name template + if [[ "${name}" == "name" ]] + then + if [[ "${val}" == *"_AUTO_GENERATED_UUID_" ]] + then + _replacement=$(uuidgen) + val=${val/_AUTO_GENERATED_UUID_/${_replacement}} + logInfo "DOCKER: UUID ${_replacement} is generated and the new value ${val} is assigned to the ${name} parameter" + elif [[ "${val}" == *"_NAMESPACE_HOSTNAME_" ]] + then + [[ "${MID_CONTAINER_DEPLOYMENT_NAMESPACE}" == "default" ]] && _replacement="$(hostname)" || _replacement="${MID_CONTAINER_DEPLOYMENT_NAMESPACE}_$(hostname)" + val=${val/_NAMESPACE_HOSTNAME_/${_replacement}} + logInfo "DOCKER: _NAMESPACE_HOSTNAME_ ${_replacement} is generated and the new value ${val} is assigned to the ${name} parameter" + elif [[ "${val}" == *"_HOSTNAME_NAMESPACE_" ]] + then + [[ "${MID_CONTAINER_DEPLOYMENT_NAMESPACE}" == "default" ]] && _replacement="$(hostname)" || _replacement="$(hostname)_${MID_CONTAINER_DEPLOYMENT_NAMESPACE}" + val=${val/_HOSTNAME_NAMESPACE_/${_replacement}} + logInfo "DOCKER: _HOSTNAME_NAMESPACE_ ${_replacement} is generated and the new value ${val} is assigned to the ${name} parameter" + fi + fi + + # disallow any empty value for bootstrap parameters, e.g. url, proxy params + if [[ $requiresValue == 1 && -z $val ]] + then + logInfo "DOCKER: Parameter $name has no value and will not be added to $CONF_FILE" + return 0 + fi + + #Handle existing parameters + if [[ `xmlstarlet sel -t -v "/parameters/parameter/@name='$name'" $CONF_FILE` == "true" ]] + then + logInfo "DOCKER: Updating parameter $name in file: $CONF_FILE" + /bin/xmlstarlet edit --inplace --update "/parameters/parameter[@name='$name']/@value" --value "$val" $CONF_FILE + else + # Handle optional parameters + logInfo "DOCKER: Adding parameter $name to file: $CONF_FILE" + sed -i "s|| \n\n|g" $CONF_FILE + fi +} + +validateMandatoryParameters() { + logInfo "DOCKER: validating mandatory parameters" + + # These default values are taken from the config.xml ootb. + # If these values change in the future, those changes should be made here as well. + validateMandatoryParameter "url" "https://YOUR_INSTANCE.service-now.com/" 0 + validateMandatoryParameter "name" "YOUR_MIDSERVER_NAME_GOES_HERE" 0 + validateMandatoryParameter "mid.instance.username" "YOUR_INSTANCE_USER_NAME_HERE" $IS_MUTUAL_AUTH + validateMandatoryParameter "mid.instance.password" "YOUR_INSTANCE_PASSWORD_HERE" $IS_MUTUAL_AUTH +} + +validateMandatoryParameter() { + name=$1 + defaultValue=$2 + checkNotPresent=$3 + + logInfo "DOCKER: Validating parameter: '$name'" + + validationPath="//parameters/parameter/@name='$name'" + elementPath="//parameters/parameter[@name='$name']" + + if [[ `xmlstarlet sel -t -v $validationPath $CONF_FILE` == "true" ]] + then + xmlParam=`xmlstarlet sel -t -c ${elementPath} $CONF_FILE` + # If mutual auth is enabled we shouldn't have a username or pw in config.xml + if [[ $checkNotPresent == 1 ]] + then + logInfo "DOCKER: Found parameter '$name' in config.xml but not expected." + logInfo " This could be caused by mutual authentication setup failure. Abandoning setup." + exit $EXIT_CODE_MUTUAL_AUTH_SETUP_FAILURE + fi + valueRegex="^.* value=\"(.*)\".*$" + if [[ $xmlParam =~ $valueRegex && ( ${BASH_REMATCH[1]} == $defaultValue || ${BASH_REMATCH[1]} == "" ) ]] + then + logInfo "DOCKER: Parameter '$name' failed to update in config.xml." + logInfo " Please verify that the '$name' parameter has been supplied. Abandoning setup." + exit $EXIT_CODE_MISSING_CONFIG_PARAM + fi + elif [[ $checkNotPresent == 0 ]] + then + logInfo "DOCKER: Mandatory parameter '$name' expected but not found in config.xml." + logInfo " Please verify that a value has been given for '$name'. Abandoning setup." + exit $EXIT_CODE_MISSING_CONFIG_PARAM + fi +} + +# max wait time for initialization +MAX_INIT_WAIT_TIME=60 + +midStart () { + + touch /opt/snc_mid_server/agent/logs/agent0.log.0 + + logInfo "DOCKER: starting mid server" + + /opt/snc_mid_server/agent/bin/mid.sh start & + + # CLI does not fully spawned and need a wait until the process generated. Not having the wait would make container + # to exit immediately. + waitTime=0 + while [ ! -f /opt/snc_mid_server/agent/work/mid.pid ] && [ "$waitTime" -lt "$MAX_INIT_WAIT_TIME" ] + do + sleep 5 + waitTime=$(( $waitTime + 5 )) + logInfo "DOCKER: Mid wrapper init taking $waitTime sec .." + done + + wrapper_pid=$(cat /opt/snc_mid_server/agent/work/mid.pid) + + # In addition, we rely on CLI execution to restart MID service which should not exit the container either. + # We would need a way to keep container alive + while true + do + tail -F /opt/snc_mid_server/agent/logs/agent0.log.0 & wait ${!} + done + +} + +_handleSignal_SIGTERM() { + logInfo "DOCKER: Received signal SIGTERM, gracefully shut down mid before exiting..." + /opt/snc_mid_server/agent/bin/mid.sh stop + + exit $EXIT_CODE_NORMAL_SHUTDOWN +} + +midStop () { + logInfo "DOCKER: stopping mid server" + + /opt/snc_mid_server/agent/bin/mid.sh stop +} + +midRestart () { + logInfo "DOCKER: restarting mid server" + + /opt/snc_mid_server/agent/bin/mid.sh restart +} + +midSetup () { + flag=$1 + + # restore the config, wrapper config and other metadata files + if [[ -f "${MID_CONTAINER_DIR}/config.xml" ]] + then + logInfo "Restore the config and other metadata files from the persistent volume" + \cp -fp "${MID_CONTAINER_DIR}/config.xml" $CONF_FILE + \cp -fp "${MID_CONTAINER_DIR}/wrapper-override.conf" $WRAPPER_CONF_FILE + \cp -fp "${MID_CONTAINER_DIR}/.initialized" $MID_INITIALIZED_FILE + \cp -fp "${MID_CONTAINER_DIR}/.container" /opt/snc_mid_server/.container + \cp -fp "${MID_CONTAINER_DIR}/glide.properties" /opt/snc_mid_server/agent/properties/ + fi + + if [[ ! -f $MID_INITIALIZED_FILE || $flag == "-f" ]] + then + generateConfigXml + updateWrapperConfFromEnvVars + touch $MID_INITIALIZED_FILE + date > $MID_INITIALIZED_FILE + fi +} + +midHelp () { + echo "Available options:" + echo " start - Starts the mid server (default)" + echo " restart - Restarts the mid server" + echo " setup - Generate config.xml and java wrapper overrides" + echo " help - Displays the help" + echo " [command] - Execute the specified linux command eg. bash." +} + +# SIGTERM (15) : software termination signal - typically sent docker stop or by kill by default +trap '_handleSignal_SIGTERM' SIGTERM + +case "$1" in + start) + midSetup + midStart + ;; + setup) + midSetup -f + ;; + stop) + midStop + ;; + restart) + midRestart + ;; + help) + midHelp + ;; + *) + midHelp + ;; +esac + diff --git a/mid/asset/post_start.sh b/mid/asset/post_start.sh new file mode 100644 index 0000000..5e274b2 --- /dev/null +++ b/mid/asset/post_start.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +MID_CONTAINER_DIR="/opt/snc_mid_server/mid_container" +LOG_FILE="/opt/snc_mid_server/mid-container.log" + +logInfo () { + msg="$(date '+%Y-%m-%dT%T.%3N') ${1}" + echo "$msg" | tee -a ${LOG_FILE} +} + +if [[ -d $MID_CONTAINER_DIR ]] +then + LOG_FILE="${MID_CONTAINER_DIR}/mid-container.log"; +fi + +DRAIN_MARKER_FILE="/opt/snc_mid_server/.drain_before_termination" +if [[ -f "$DRAIN_MARKER_FILE" ]]; then + logInfo "Remove the drain marker file: ${DRAIN_MARKER_FILE}" + rm -f $DRAIN_MARKER_FILE +fi diff --git a/mid/asset/pre_stop.sh b/mid/asset/pre_stop.sh new file mode 100644 index 0000000..fa7ae46 --- /dev/null +++ b/mid/asset/pre_stop.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +MID_CONTAINER_DIR="/opt/snc_mid_server/mid_container" +LOG_FILE="/opt/snc_mid_server/mid-container.log" + +logInfo () { + msg="$(date '+%Y-%m-%dT%T.%3N') ${1}" + echo "$msg" | tee -a ${LOG_FILE} +} + +# Copy the config, wrapper config and other metadata files to the persistent volume +if [[ -d $MID_CONTAINER_DIR ]] +then + LOG_FILE="${MID_CONTAINER_DIR}/mid-container.log"; + logInfo "Backup the config and other metadata files to the persistent volume" + \cp -fp /opt/snc_mid_server/agent/config.xml \ + /opt/snc_mid_server/agent/conf/wrapper-override.conf \ + /opt/snc_mid_server/agent/.initialized \ + /opt/snc_mid_server/.container \ + /opt/snc_mid_server/agent/properties/glide.properties \ + ${MID_CONTAINER_DIR}/ +else + logInfo "The directory $MID_CONTAINER_DIR does not exist!" +fi + +# Create the drain marker file +DRAIN_MARKER_FILE="/opt/snc_mid_server/.drain_before_termination" +if [[ ! -f "$DRAIN_MARKER_FILE" ]]; then + logInfo "Create the drain marker file: ${DRAIN_MARKER_FILE}" + touch $DRAIN_MARKER_FILE +fi + +# Tell the wrapper to stop the MID server. Before stop, the MID server will drain if it sees +# the drain marker file and if mid.drain.run_before_container_termination = true +logInfo "Stop the MID server" +/opt/snc_mid_server/agent/bin/mid.sh stop + +# Remove the drain marker file +logInfo "Remove the drain marker file: ${DRAIN_MARKER_FILE}" +rm -f $DRAIN_MARKER_FILE +if [[ -f $DRAIN_MARKER_FILE ]] +then + logInfo "Failed to delete ${DRAIN_MARKER_FILE}" +fi diff --git a/mid/asset/validate_signature.sh b/mid/asset/validate_signature.sh new file mode 100644 index 0000000..ecb62d9 --- /dev/null +++ b/mid/asset/validate_signature.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +usage() { + echo "Usage: $0 " + echo " # The script will verify the digital signature of the specified ZIP file" + echo " # It has the exit code 0 if the signature is verified; otherwise, the exit code is 1" + exit 1 +} + +error_exit() { + echo "$1" 1>&2 + exit 1 +} + +# validate arguments +if [ $# -ne 1 ]; then + usage +fi + +if [ ! -f "$1" ]; then + echo "Error: $1 doesn't exist" + usage +fi + +zip_file=$1 +echo "DOCKER: Validating digital signature of $zip_file" +validation_result=`jarsigner -verify -strict -verbose "$zip_file"` + +# Turn on a case-insensitive matching +shopt -s nocasematch + +if [[ "$validation_result" == *"- Signed by "*"O=ServiceNow"*"jar verified."* ]]; then + echo "DOCKER: Successfully verified digital signature of $zip_file" +else + echo "ERROR: Digital signature of $zip_file cannot be verified" + echo "DOCKER: >>Validation result: \"$validation_result\"<<<" + exit 1 +fi + +# Turn off a case-insensitive matching +shopt -u nocasematch + +exit 0 diff --git a/mid/readme.md b/mid/readme.md new file mode 100644 index 0000000..4fa1d5b --- /dev/null +++ b/mid/readme.md @@ -0,0 +1,3 @@ +### ServiceNow MID Server + +For building your own MID server image, please follow the latest instructions in the ServiceNow documentation [here](https://docs.servicenow.com/bundle/washingtondc-servicenow-platform/page/product/mid-server/task/mid-build-docker-linux.html). The assets in this directory are for example purposes only and guarunteed to be kept up-to-date.