Skip to content
This repository has been archived by the owner on Dec 11, 2020. It is now read-only.

Commit

Permalink
Merge pull request #104 from RhodiumGroup/pangeify_octave
Browse files Browse the repository at this point in the history
WIP: Pangeify+octave
  • Loading branch information
delgadom authored Dec 16, 2019
2 parents c0a8c86 + ff5a13a commit b5a4b0c
Show file tree
Hide file tree
Showing 23 changed files with 1,316 additions and 425 deletions.
7 changes: 6 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,9 @@ jupyter-config.yml
secret-config.yml
storageclass.yml
values.yml
worker-config.yml
worker-config.yml

# tests
.coverage
__pycache__
.pytest_cache
131 changes: 104 additions & 27 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,36 +4,113 @@ services:
- docker

env:
matrix:
- IMAGE_NAME=notebook
- IMAGE_NAME=worker
- IMAGE_NAME=notebook
- IMAGE_NAME=worker

install:
- "if [[ \"$TRAVIS_TAG\" == \"\" ]]; then sed -i.bak 's/image: rhodium\\/worker.*/image: rhodium\\/worker:'\"$TRAVIS_COMMIT\"'/' notebook/worker-template.yml; else sed -i.bak 's/image: rhodium\\/worker:.*/image: rhodium\\/worker:'\"$TRAVIS_TAG\"'/' notebook/worker-template.yml; fi"
- "rm notebook/worker-template.yml.bak"
- "cat notebook/worker-template.yml | grep image:"
- "cd $IMAGE_NAME"
- "docker pull rhodium/$IMAGE_NAME:dev"
- "docker build --pull --cache-from rhodium/$IMAGE_NAME:dev -t rhodium/$IMAGE_NAME:$TRAVIS_COMMIT ."
- "if [[ \"$TRAVIS_TAG\" == \"\" ]]; then sed -i.bak \
's/image: rhodium\\/worker.*/image: rhodium\\/worker:'\"$TRAVIS_COMMIT\"'/' \
notebook/worker-template.yml; else sed -i.bak \
's/image: rhodium\\/worker:.*/image: rhodium\\/worker:'\"$TRAVIS_TAG\"'/' \
notebook/worker-template.yml; fi"
- "rm notebook/worker-template.yml.bak"
- "cat notebook/worker-template.yml | grep image:"
- "cp base_environment.yml $IMAGE_NAME/base_environment.yml"
- "cp common.sh $IMAGE_NAME/common.sh && chmod +x $IMAGE_NAME/common.sh"
- "cd $IMAGE_NAME"


script:
- docker images rhodium/$IMAGE_NAME:$TRAVIS_COMMIT
- docker build -t rhodium/$IMAGE_NAME:$TRAVIS_COMMIT .

deploy:
- provider: script
script: docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT rhodium/$IMAGE_NAME:dev && docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" && docker push "rhodium/$IMAGE_NAME:$TRAVIS_COMMIT" && docker push "rhodium/$IMAGE_NAME:dev"
skip_cleanup: true
on:
branch: dev

- provider: script
script: docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT rhodium/$IMAGE_NAME:dev && docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT rhodium/$IMAGE_NAME:latest && docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" && docker push "rhodium/$IMAGE_NAME:$TRAVIS_COMMIT" && docker push "rhodium/$IMAGE_NAME:dev" && docker push "rhodium/$IMAGE_NAME:latest"
skip_cleanup: true
on:
branch: master

- provider: script
script: docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT rhodium/$IMAGE_NAME:$TRAVIS_TAG && docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" && docker push "rhodium/$IMAGE_NAME:$TRAVIS_TAG"
skip_cleanup: true
on:
tags: true
- provider: script
script: >-
docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT
rhodium/$IMAGE_NAME:$TRAVIS_BRANCH &&
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" &&
docker push "rhodium/$IMAGE_NAME:$TRAVIS_COMMIT" &&
docker push "rhodium/$IMAGE_NAME:$TRAVIS_BRANCH"
skip_cleanup: true
on:
all_branches: true
condition: $TRAVIS_BRANCH =~ ^dev

- provider: script
script: >-
docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT rhodium/$IMAGE_NAME:dev &&
docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT rhodium/$IMAGE_NAME:latest &&
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" &&
docker push "rhodium/$IMAGE_NAME:$TRAVIS_COMMIT" &&
docker push "rhodium/$IMAGE_NAME:dev" &&
docker push "rhodium/$IMAGE_NAME:latest"
skip_cleanup: true
on:
branch: master

- provider: script
script: >-
docker tag rhodium/$IMAGE_NAME:$TRAVIS_COMMIT
rhodium/$IMAGE_NAME:$TRAVIS_TAG &&
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" &&
docker push "rhodium/$IMAGE_NAME:$TRAVIS_TAG"
skip_cleanup: true
on:
tags: true

# octave-worker builds
- provider: script
script: >-
docker build -t rhodium/octave-worker:$TRAVIS_COMMIT
--build-arg TRAVIS_COMMIT=$TRAVIS_COMMIT ../octave-worker &&
docker tag rhodium/octave-worker:$TRAVIS_COMMIT
rhodium/octave-worker:$TRAVIS_BRANCH &&
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" &&
docker push "rhodium/octave-worker:$TRAVIS_COMMIT" &&
docker push "rhodium/octave-worker:$TRAVIS_BRANCH"
skip_cleanup: true
on:
all_branches: true
condition: $TRAVIS_BRANCH =~ ^dev
condition: $IMAGE_NAME = worker

- provider: script
script: >-
docker tag rhodium/octave-worker:$TRAVIS_COMMIT
rhodium/octave-worker:dev &&
docker tag rhodium/octave-worker$TRAVIS_COMMIT
rhodium/octave-worker:latest &&
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" &&
docker push "rhodium/octave-worker:$TRAVIS_COMMIT" &&
docker push "rhodium/octave-worker:dev" &&
docker push "rhodium/octave-worker:latest"
skip_cleanup: true
on:
branch: master
condition: $IMAGE_NAME = worker

- provider: script
script: >-
docker tag rhodium/octave-worker:$TRAVIS_COMMIT
rhodium/octave-worker:$TRAVIS_TAG &&
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD" &&
docker push "rhodium/octave-worker:$TRAVIS_TAG"
skip_cleanup: true
on:
tags: true
condition: $IMAGE_NAME = worker

# - stage: alignment
# language: python
# python:
# - 3.6
# script:
# - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
# - bash miniconda.sh -b -p $HOME/miniconda
# - export PATH="$HOME/miniconda/bin:$PATH"
# - hash -r
# - conda config --set always_yes yes --set changeps1 no
# - conda update -q conda
# - conda info -a
# - conda install pytest pytest-cov pyyaml
# - pytest
16 changes: 14 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,21 @@ To update this file
4. Commit your changes
5. Tag your image with `python bump.py`
6. Push to github and make a pull request to master
7. If your build passes on Travis, we'll merge it and it will deploy to dockerhub
7. If your build passes on Travis, we'll merge it and it will deploy to dockerhub

Any questions please email jsimcock@rhg.com
Any questions please email mdelgado@rhg.com

# Cluster overview

* compute.rhg.com: flagship Rhodium compute cluster
* impactlab.rhg.org: flagship Climate Impact Lab compute cluster

Preemptable clusters:

* coastal.rhg.com: pods in this cluster are cheaper but can disappear at any time. expect less stability, more bugs, popup errors, and lower bills.

Testing clusters:

* compute-test.rhg.com: staging deployment with stable users & user directories. This cluster should be used to beta-test deployments scheduled for the production servers in an environment similar to production. users should not expect their data here to be safe, but admins should make an effort to simulate production roll-outs and to ensure data/user safety in upgrading the cluster. admins should encourage production users to test their workflows on this cluster before a major production upgrade.
* testing.climate-kube.com: bleeding-edge test cluster. absolutely no guarantee of data/user/environment preservation. users should expect the entire cluster to be deleted at any point.
* test2.climate-kube.com: same purpose as testing.climate-kube.com, but another one to parallelize the madness.
110 changes: 110 additions & 0 deletions base_environment.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
name: base
channels:
- conda-forge
dependencies:
- beautifulsoup4=4.8.1=py37_0
- boto3=1.10.34=py_0
- bqplot=0.12.1=py_0
- bumpversion=0.5.3=py_1001
- cachecontrol=0.12.5=py_0
- cartopy=0.17.0=py37h423102d_1009
- cftime=1.0.4.2=py37hc1659b7_0
- click=7.0=py_0
- compilers=1.0.4=0
- dask=2.8.1=py_0
- dask-glm=0.2.0=py_1
- dask-ml=1.1.1=py_0
- datashader=0.8.0=py_0
- distributed=2.8.1=py_0
- dropbox=9.4.0=py_0
# need to make sure we get esmpy compiled with mpi otherwise xesmf regridding
# won't work
- esmpy=8.0.0=mpi_mpich_py37ha9b28fa_101 # pinkeep: esmpy=8.0.0=mpi_mpich_py37ha9b28fa_101
- fastparquet=0.3.2=py37hc1659b7_0
- fiona=1.8.13=py37h900e953_0
- fusepy=3.0.1=py_0
# this gcc pin is necessary b/c of a weird feature in the h553295d_15 build
# which makes it hard to build numpy-based cython extensions (like pyclaw).
# we should try removing it whenever we next do an update and see if Clawpack
# can still be built
- gcc_linux-64=7.3.0=h553295d_14 # pinkeep: gcc_linux-64=7.3.0=h553295d_14
- gcsfs=0.5.3=py_0
- gdal=3.0.2=py37hbb6b9fb_5
- geoalchemy2=0.6.3=py_0
- geopandas=0.6.2=py_0
- geopy=1.20.0=py_0
- geotiff=1.5.1=hbd99317_7
- geoviews=1.6.6=py_0
- git=2.24.0=pl526hce37bd2_1
- gitpython=3.0.5=py_0
- google-cloud-container=0.3.0=py37_0
- google-cloud-storage=1.23.0=py37_0
- holoviews=1.12.7=py_0
- h5netcdf=0.7.4=py_0
- icu=64.2=he1b5a44_1
- iris=2.2.0=py37_1003
- jedi=0.15.1=py37_0
# need server proxy on workers if using remote scheduler
- jupyter-server-proxy=1.2.0=py_0
- kubernetes
- lapack=3.6.1=ha44fe06_2
- lz4=2.2.1=py37hd79334b_0
- make=4.2.1=h14c3975_2004
- matplotlib=3.1.2=py37_1
- nc-time-axis=1.2.0=py_0
- ncurses=6.1=hf484d3e_1002
- netcdf-fortran=4.5.2=mpi_mpich_ha8580a0_2
- netcdf4=1.5.3=mpi_mpich_py37h01ee55b_1
- numba=0.46.0=py37hb3f55d8_1
- numcodecs=0.6.4=py37he1b5a44_0
- pandas=0.25.3=py37hb3f55d8_0
# for geoviews
- phantomjs=2.1.1=1
- pip=19.3.1=py37_0
- plotly=4.3.0=py_0
- polyline=1.4.0=py_0
- pygeos=0.5=py37h5d51c17_1
- pyinterp=0.0.7=py37h97f2665_0
- pyshp=2.1.0=py_0
- python=3.7.3=h357f687_2 # pinkeep: python=3.7
- python-blosc=1.8.1=py37hf484d3e_0
- python-snappy=0.5.4=py37hee44bf9_1
- pyviz_comms=0.7.2=py_0
- pyyaml=5.2=py37h516909a_0
- rasterio=1.1.1=py37h900e953_0
- regionmask=0.4.0=py_0
- rtree=0.8.3=py37h7b0cdae_1003
- scikit-image=0.16.2=py37hb3f55d8_0
- scikit-learn=0.22=py37hcdab131_0
- scipy=1.3.2=py37h921218d_0
- seaborn=0.9.0=py_2
# for geoviews
- selenium=3.141.0=py37h516909a_1000
- shapely=1.6.4=py37h5d51c17_1007
- sparse=0.8.0=py_0
- statsmodels=0.10.2=py37hc1659b7_0
- tini=0.18.0=h14c3975_1001
- unzip=6.0=h516909a_0
- uritemplate=3.0.0=py_1
- xarray=0.14.1=py_0
- xesmf=0.2.1=py_0
- xgcm=0.2.0=py_0
- xhistogram=0.1.1=py_0
- xlrd=1.2.0=py_0
- xrft=0.2.0=py_0
- zarr=2.3.2=py37_0
- zeromq=4.3.2=he1b5a44_2
- zict=1.0.0=py_0
- pip:
- mapbox==0.18.0
- py-noaa==1.0
- sidecar==0.3.0
- climate-toolbox==0.1.5
- impactlab-tools==0.4.0
- parameterize-jobs==0.1.1
- git+https://github.com/rhodiumgroup/rhg_compute_tools.git@allow-remote-scheduler#egg=rhg_compute_tools
- git+https://github.com/NCAR/intake-esm.git#egg=intake_esm
# need to install from master until 0.10.1
# due to handling of remote scheduler
# (we also should at some point switch to dask-gateway instead of dask-kubernetes)
- git+https://github.com/dask/dask-kubernetes.git#egg=dask_kubernetes
44 changes: 44 additions & 0 deletions common.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/bin/sh
# install apt-get packages
apt-get update -y --no-install-recommends
apt-get install -yq --no-install-recommends \
apt-utils \
bzip2 \
ca-certificates \
curl \
lsb-release \
gnupg2 \
sudo \
libgl1-mesa-glx \
wget

# install gcsfuse, google cloud sdk, kubectl
# (need curl to be installed earlier)
export GCSFUSE_REPO=gcsfuse-`lsb_release -c -s`
echo "deb http://packages.cloud.google.com/apt $GCSFUSE_REPO main" | \
tee /etc/apt/sources.list.d/gcsfuse.list
echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] \
https://packages.cloud.google.com/apt cloud-sdk main" | \
tee -a /etc/apt/sources.list.d/google-cloud-sdk.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
apt-key --keyring /usr/share/keyrings/cloud.google.gpg add -
apt-get update -y
apt-get install -yq --no-install-recommends gcsfuse google-cloud-sdk kubectl
alias googlefuse=/usr/bin/gcsfuse

apt-get clean

# get cloud sql proxy
wget https://dl.google.com/cloudsql/cloud_sql_proxy.linux.amd64 -O /usr/bin/cloud_sql_proxy
chmod +x /usr/bin/cloud_sql_proxy

# filepath curating
chmod +x /usr/bin/prepare.sh
mkdir /gcs
mkdir /opt/app

# super sketchy hack to get around our need for compiler_compat binaries and some
# other things that cause problems together?
# see https://github.com/ContinuumIO/anaconda-issues/issues/11152
rm -rf /opt/conda/compiler_compat/ld
57 changes: 0 additions & 57 deletions conda_environment.yml

This file was deleted.

Empty file.
Loading

0 comments on commit b5a4b0c

Please sign in to comment.