-
Notifications
You must be signed in to change notification settings - Fork 67
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
(HP-1244): update cedar job to get parameters from g3auto secrets
- Loading branch information
1 parent
d6f7b9a
commit 142df06
Showing
1 changed file
with
78 additions
and
138 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,19 +1,22 @@ | ||
# | ||
# run with: | ||
# gen3 job run cedar-ingestion \ | ||
# SUBMISSION_USER $submission_user \ | ||
# CEDAR_DIRECTORY_ID $cedar_directory_id \ | ||
# | ||
# SUBMISSION_USER(optional) | ||
# e-mail of user-account to submit the data to MDS, must have MDS admin and CEDAR polices granted. Default: "[email protected]" | ||
# gen3 job run cedar-ingestion [CEDAR_DIRECTORY_ID $cedar_directory_id] | ||
# | ||
# CEDAR_DIRECTORY_ID | ||
# ID of CEDAR directory where instances will be pulled from, only needs its UUID part. For example: "123e4567-e89b-12d3-a456-426614174000" | ||
# The directory id will be read from 'directory_id.txt' in the | ||
# 'cedar-g3auto' secret. | ||
# You can override the secret value with an optional command line argument. | ||
# | ||
# The deployed CEDAR wrapper services must be able to read from this directory. | ||
# | ||
# Example | ||
# gen3 job run cedar-ingestion CEDAR_DIRECTORY_ID 123e4567-e89b-12d3-a456-426614174000 SUBMISSION_USER [email protected] | ||
# ACCESS TOKENS | ||
# Access tokens will be generated for an existing fence-client, cedar_ingest_client. | ||
# The client_id and client_secret will be read from | ||
# 'cedar_client_credentials.json' in the 'cedar-g3auto' seccret. | ||
# | ||
# The fence-client must have MDS admin and CEDAR polices granted. | ||
# | ||
|
||
apiVersion: batch/v1 | ||
kind: Job | ||
metadata: | ||
|
@@ -44,36 +47,13 @@ spec: | |
- ONDEMAND | ||
serviceAccountName: useryaml-job | ||
volumes: | ||
- name: yaml-merge | ||
configMap: | ||
name: "fence-yaml-merge" | ||
- name: shared-data | ||
emptyDir: {} | ||
# ----------------------------------------------------------------------------- | ||
# DEPRECATED! Remove when all commons are no longer using local_settings.py | ||
# for fence. | ||
# ----------------------------------------------------------------------------- | ||
- name: old-config-volume | ||
secret: | ||
secretName: "fence-secret" | ||
- name: creds-volume | ||
secret: | ||
secretName: "fence-creds" | ||
- name: config-helper | ||
configMap: | ||
name: config-helper | ||
- name: json-secret-volume | ||
- name: cedar-client-volume-g3auto | ||
secret: | ||
secretName: "fence-json-secret" | ||
# ----------------------------------------------------------------------------- | ||
- name: config-volume | ||
secret: | ||
secretName: "fence-config" | ||
- name: fence-jwt-keys | ||
secret: | ||
secretName: "fence-jwt-keys" | ||
containers: | ||
- name: awshelper | ||
secretName: cedar-g3auto # the secret name in kube | ||
initContainers: | ||
- name: cedar | ||
image: quay.io/cdis/awshelper:master | ||
imagePullPolicy: Always | ||
ports: | ||
|
@@ -84,128 +64,88 @@ spec: | |
configMapKeyRef: | ||
name: global | ||
key: hostname | ||
- name: SUBMISSION_USER | ||
GEN3_SUBMISSION_USER|-value: "[email protected]"-| | ||
- name: CEDAR_DIRECTORY_ID | ||
GEN3_CEDAR_DIRECTORY_ID|-value: ""-| | ||
- name: CEDAR_DIRECTORY_ID_SECRET | ||
valueFrom: | ||
secretKeyRef: | ||
name: cedar-g3auto | ||
key: "directory_id.txt" | ||
volumeMounts: | ||
- name: cedar-client-volume-g3auto | ||
readOnly: true | ||
mountPath: /tmp/cedar_client_credentials.json | ||
subPath: cedar_client_credentials.json | ||
- name: shared-data | ||
mountPath: /mnt/shared | ||
resources: | ||
limits: | ||
cpu: 1 | ||
memory: 5Gi | ||
|
||
command: ["/bin/bash" ] | ||
args: | ||
- "-c" | ||
- | | ||
if [[ -z "$CEDAR_DIRECTORY_ID" ]]; then | ||
echo -e "CEDAR_DIRECTORY_ID is required" 1>&2 | ||
exit 1 | ||
if [[ ! -z "$CEDAR_DIRECTORY_ID_SECRET" ]]; then | ||
echo "CEDAR_DIRECTORY_ID is from g3auto secret" | ||
export CEDAR_DIRECTORY_ID=$CEDAR_DIRECTORY_ID_SECRET | ||
else | ||
echo -e "ERROR: CEDAR_DIRECTORY_ID must be in secret or on command line" 1>&2 | ||
exit 0 | ||
fi | ||
else | ||
echo "CEDAR_DIRECTORY_ID is from command line parameter" | ||
fi | ||
if [[ -f /tmp/cedar_client_credentials.json ]]; then | ||
export CEDAR_CLIENT_ID=$(cat /tmp/cedar_client_credentials.json | jq -r .client_id) | ||
export CEDAR_CLIENT_SECRET=$(cat /tmp/cedar_client_credentials.json | jq -r .client_secret) | ||
else | ||
echo -e "Could not read cedar-client credentials" 1>&2 | ||
exit 0 | ||
fi | ||
let count=0 | ||
while [[ ! -f /mnt/shared/access_token.txt && $count -lt 50 ]]; do | ||
echo "Waiting for /mnt/shared/access_token.txt"; | ||
sleep 2 | ||
let count=$count+1 | ||
done | ||
pip install pydash | ||
export GEN3_HOME="$HOME/cloud-automation" | ||
export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)" | ||
python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --access_token $ACCESS_TOKEN --directory $CEDAR_DIRECTORY_ID --hostname $HOSTNAME | ||
python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --directory $CEDAR_DIRECTORY_ID --client_id $CEDAR_CLIENT_ID --client_secret $CEDAR_CLIENT_SECRET --hostname $HOSTNAME | ||
echo "All done - exit status $?" | ||
- name: fence | ||
GEN3_FENCE_IMAGE | ||
imagePullPolicy: Always | ||
env: | ||
- name: PYTHONPATH | ||
value: /var/www/fence | ||
- name: SUBMISSION_USER | ||
GEN3_SUBMISSION_USER|-value: "[email protected]"-| | ||
- name: TOKEN_EXPIRATION | ||
value: "3600" | ||
- name: FENCE_PUBLIC_CONFIG | ||
valueFrom: | ||
configMapKeyRef: | ||
name: manifest-fence | ||
key: fence-config-public.yaml | ||
optional: true | ||
volumeMounts: | ||
# ----------------------------------------------------------------------------- | ||
# DEPRECATED! Remove when all commons are no longer using local_settings.py | ||
# for fence. | ||
# ----------------------------------------------------------------------------- | ||
- name: "old-config-volume" | ||
readOnly: true | ||
mountPath: "/var/www/fence/local_settings.py" | ||
subPath: local_settings.py | ||
- name: "creds-volume" | ||
readOnly: true | ||
mountPath: "/var/www/fence/creds.json" | ||
subPath: creds.json | ||
- name: "config-helper" | ||
readOnly: true | ||
mountPath: "/var/www/fence/config_helper.py" | ||
subPath: config_helper.py | ||
- name: "json-secret-volume" | ||
readOnly: true | ||
mountPath: "/var/www/fence/fence_credentials.json" | ||
subPath: fence_credentials.json | ||
# ----------------------------------------------------------------------------- | ||
- name: "config-volume" | ||
readOnly: true | ||
mountPath: "/var/www/fence/fence-config-secret.yaml" | ||
subPath: fence-config.yaml | ||
- name: "yaml-merge" | ||
readOnly: true | ||
mountPath: "/var/www/fence/yaml_merge.py" | ||
subPath: yaml_merge.py | ||
- name: "fence-jwt-keys" | ||
readOnly: true | ||
mountPath: "/fence/jwt-keys.tar" | ||
subPath: "jwt-keys.tar" | ||
- name: shared-data | ||
mountPath: /mnt/shared | ||
command: ["/bin/bash" ] | ||
args: | ||
touch /mnt/shared/success | ||
containers: | ||
- name: awshelper | ||
env: | ||
- name: slackWebHook | ||
valueFrom: | ||
configMapKeyRef: | ||
name: global | ||
key: slack_webhook | ||
- name: gen3Env | ||
valueFrom: | ||
configMapKeyRef: | ||
name: manifest-global | ||
key: hostname | ||
GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| | ||
volumeMounts: | ||
- name: shared-data | ||
mountPath: /mnt/shared | ||
command: ["/bin/bash"] | ||
args: | ||
- "-c" | ||
- | | ||
echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" | ||
python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml | ||
if [ -f /fence/jwt-keys.tar ]; then | ||
cd /fence | ||
tar xvf jwt-keys.tar | ||
if [ -d jwt-keys ]; then | ||
mkdir -p keys | ||
mv jwt-keys/* keys/ | ||
fi | ||
if [[ ! "$slackWebHook" =~ ^http ]]; then | ||
echo "Slack webhook not set" | ||
exit 0 | ||
fi | ||
echo "generate access token" | ||
echo "fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION" | ||
tempFile="$(mktemp -p /tmp token.txt_XXXXXX)" | ||
success=false | ||
count=0 | ||
sleepTime=10 | ||
# retry loop | ||
while [[ $count -lt 3 && $success == false ]]; do | ||
if fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION > "$tempFile"; then | ||
echo "fence-create success!" | ||
tail -1 "$tempFile" > /mnt/shared/access_token.txt | ||
# base64 --decode complains about invalid characters - don't know why | ||
awk -F . '{ print $2 }' /mnt/shared/access_token.txt | base64 --decode 2> /dev/null | ||
success=true | ||
else | ||
echo "fence-create failed!" | ||
cat "$tempFile" | ||
echo "sleep for $sleepTime, then retry" | ||
sleep "$sleepTime" | ||
let sleepTime=$sleepTime+$sleepTime | ||
fi | ||
let count=$count+1 | ||
done | ||
if [[ $success != true ]]; then | ||
echo "Giving up on fence-create after $count retries - failed to create valid access token" | ||
if ! [ -f /mnt/shared/success ]; then | ||
success="FAILED" | ||
color="ff0000" | ||
else | ||
success="SUCCESS" | ||
color="2EB67D" | ||
fi | ||
echo "" | ||
echo "All Done - always succeed to avoid k8s retries" | ||
echo "Sending ${success} message to slack..." | ||
payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}" | ||
echo "Payload=${payload}" | ||
curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}" | ||
restartPolicy: Never |