diff --git a/.github/workflows/run_use_cases_examples.yaml b/.github/workflows/run_use_cases_examples.yaml new file mode 100644 index 0000000000..7e0904943c --- /dev/null +++ b/.github/workflows/run_use_cases_examples.yaml @@ -0,0 +1,126 @@ +name: Run Use Case Examples +on: + push: + # workflow_dispatch: + # schedule: + # - cron: '0 0 * * MON' # Scheduled trigger every Monday at midnight + +concurrency: + group: ${{ github.ref }} + cancel-in-progress: true + +env: + ACTION_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + +jobs: + start-runner-linux: + name: Start EC2 runner + runs-on: ubuntu-20.04 + outputs: + label-38: ${{ steps.start-ec2-runner-38.outputs.label }} + ec2-instance-id-38: ${{ steps.start-ec2-runner-38.outputs.ec2-instance-id || '' }} + steps: + - name: Checkout Code + uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 + + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@8c3f20df09ac63af7b3ae3d7c91f105f857d8497 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_REGION }} + + - name: Start EC2 runner python 38 + id: start-ec2-runner-38 + uses: machulav/ec2-github-runner@4e0303de215db88e1c489e07a15ca4d867f488ea + with: + mode: start + github-token: ${{ secrets.EC2_RUNNER_BOT_TOKEN }} + ec2-image-id: ${{ secrets.AWS_EC2_AMI }} + ec2-instance-type: "m6i.metal" + subnet-id: ${{ secrets.AWS_EC2_SUBNET_ID }} + security-group-id: ${{ secrets.AWS_EC2_SECURITY_GROUP_ID }} + + run-use-case-examples: + needs: [start-runner-linux] + runs-on: ${{ needs.start-runner-linux.outputs.label-38 }} + container: + image: ubuntu:20.04 + defaults: + run: + shell: bash + steps: + - name: Checkout Code + uses: actions/checkout@v2 + + - name: Set up Environment + run: | + # Setup commands if any, for example, installing dependencies, etc. + apt-get update && apt-get install -y python3-venv make && apt install git git-lfs -y + + - name: Run Use Case Examples Script + run: | + chmod +x ./script/make_utils/run_use_case_examples.sh + ./script/make_utils/run_use_case_examples.sh + + stop-runner-linux: + name: Stop EC2 runner + needs: [run-use-case-examples, start-runner-linux] + runs-on: ubuntu-20.04 + if: ${{ always() && (needs.start-runner-linux.result != 'skipped') }} + steps: + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@8c3f20df09ac63af7b3ae3d7c91f105f857d8497 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_REGION }} + + - name: Stop EC2 runner python 38 + uses: machulav/ec2-github-runner@4e0303de215db88e1c489e07a15ca4d867f488ea + if: ${{ always() && needs.start-runner-linux.outputs.ec2-instance-id-38 }} + with: + github-token: ${{ secrets.EC2_RUNNER_BOT_TOKEN }} + label: ${{ needs.start-runner-linux.outputs.label-38 }} + ec2-instance-id: ${{ needs.start-runner-linux.outputs.ec2-instance-id-38 }} + mode: stop + + send-report: + if: ${{ always() }} + needs: + [ + start-runner-linux, + run-use-case-examples, + stop-runner-linux, + ] + name: Send Slack notification + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 # Update to the latest stable version + + - name: Prepare whole job status + if: ${{ always() }} + continue-on-error: true + env: + NEEDS_JSON: ${{ toJSON(needs) }} + run: | + echo "${NEEDS_JSON}" > /tmp/needs_context.json + JOB_STATUS=$(python3 ./script/actions_utils/actions_combine_status.py \ + --needs_context_json /tmp/needs_context.json) + echo "JOB_STATUS=${JOB_STATUS}" >> "$GITHUB_ENV" + + - name: Slack Notification + if: ${{ always() }} + continue-on-error: true + uses: rtCamp/action-slack-notify@b24d75fe0e728a4bf9fc42ee217caa686d141ee8 + env: + SLACK_CHANNEL: ${{ secrets.SLACK_CHANNEL }} + SLACK_ICON: https://pbs.twimg.com/profile_images/1274014582265298945/OjBKP9kn_400x400.png + SLACK_COLOR: ${{ env.JOB_STATUS || 'failure' }} + SLACK_MESSAGE: "Full run of use case examples finished with status ${{ env.JOB_STATUS || 'failure' }} \ + (${{ env.ACTION_RUN_URL }})\n\ + - start-runner-linux: ${{ needs.start-runner-linux.result || 'Did not run.'}}\n\n\ + - run-use-case-examples: ${{ needs.run-use-case-examples.result || 'Did not run.' }}\n\n\ + - stop-runner-linux: ${{ needs.stop-runner-linux.result || 'Did not run.'}}" + SLACK_USERNAME: ${{ secrets.BOT_USERNAME }} + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} \ No newline at end of file diff --git a/script/make_utils/run_use_case_examples.sh b/script/make_utils/run_use_case_examples.sh index 6461466f2c..e0a9cb0463 100755 --- a/script/make_utils/run_use_case_examples.sh +++ b/script/make_utils/run_use_case_examples.sh @@ -3,15 +3,6 @@ set -e DIR=$(dirname "$0") -# shellcheck disable=SC1090,SC1091 -source "${DIR}/detect_docker.sh" - -if isDocker; then - echo "Can not run in docker -> this script needs to install new virtualenvs" - exit 1 -fi - - CML_DIR=$(pwd) USE_CASE_REL_DIR="use_case_examples" USE_CASE_DIR="${CML_DIR}/${USE_CASE_REL_DIR}" @@ -21,9 +12,8 @@ if [ ! -d "$USE_CASE_DIR" ]; then exit 1 fi -echo "Refreshing notebooks with PIP installed Concrete ML" +echo "Running notebooks with PIP installed Concrete ML" -# shellcheck disable=SC2143 if [[ $(git ls-files --others --exclude-standard | grep ${USE_CASE_REL_DIR}) ]]; then echo "This script must be run in a clean clone of the Concrete ML repo" echo "This directory has untracked files in ${USE_CASE_REL_DIR}" @@ -51,79 +41,93 @@ else fi fi -if [ ! "$(docker images -q zamafhe/concrete-ml:latest 2> /dev/null)" ]; then - # BUILD THE DOCKER IMAGE - echo "Building docker image" - poetry build && mkdir -p pkg && cp dist/* pkg/ && make release_docker - docker tag concrete-ml-release:latest zamafhe/concrete-ml:latest -fi +declare -a success_examples +declare -a failed_examples -# shellcheck disable=SC2068 -for EXAMPLE in ${LIST_OF_USE_CASES[@]} +for EXAMPLE in "${LIST_OF_USE_CASES[@]}" do EXAMPLE_NAME=$(basename "${EXAMPLE}") - if [ -f "${EXAMPLE}/Makefile" ]; then - echo "*** Processing example ${EXAMPLE_NAME}" - else + if [ ! -f "${EXAMPLE}/Makefile" ]; then continue fi + echo "*** Processing example ${EXAMPLE_NAME}" + # Setup a new venv VENV_PATH="/tmp/virtualenv_${EXAMPLE_NAME}" if [ -d "$VENV_PATH" ]; then echo " - VirtualEnv already exists, deleting the old one" rm -rf "$VENV_PATH" fi - virtualenv -q "$VENV_PATH" + python3 -m venv "$VENV_PATH" echo " - VirtualEnv created at $VENV_PATH" # shellcheck disable=SC1090,SC1091 source "${VENV_PATH}/bin/activate" # Install Concrete ML - set +e cd "$CML_DIR" - pip install -e . &> "/tmp/log_cml_pip_${EXAMPLE_NAME}" + pip install -U pip setuptools wheel + pip install -e . hresult=$? if [ $hresult -ne 0 ]; then echo "Could not install Concrete ML in the virtualenv, see /tmp/log_cml_pip_${EXAMPLE_NAME}" rm -rf "$VENV_PATH" + failed_examples+=("$EXAMPLE_NAME") continue fi - set -e echo " - Concrete ML installed in $VENV_PATH" # Install example requirements cd "$EXAMPLE" if [ -f "requirements.txt" ]; then - set +e - pip install -r requirements.txt &> "/tmp/log_reqs_${EXAMPLE_NAME}" + pip install -r requirements.txt hresult=$? - set -e if [ $hresult -ne 0 ]; then - echo "Could not install Concrete ML in the virtualenv, see /tmp/log_reqs_${EXAMPLE_NAME}" + echo "Could not install example requirements in the virtualenv, see /tmp/log_reqs_${EXAMPLE_NAME}" rm -rf "$VENV_PATH" + failed_examples+=("$EXAMPLE_NAME") continue fi echo " - Requirements installed in $VENV_PATH" fi set +e - # Strip colors from the error output before piping to the log files +# Strip colors from the error output before piping to the log files # Swap stderr and stdout, all output of jupyter execution is in stderr # The information about time spent running the notebook is in stdout # The following will pipe the stderr to the regex so that it # ends up in the log file. # The timing shows in the terminal - USE_CASE_DIR=$USE_CASE_DIR make 3>&2 2>&1 1>&3- | perl -pe 's/\e([^\[\]]|\[.*?[a-zA-Z]|\].*?\a)//g' > "/tmp/log_${EXAMPLE_NAME}" + USE_CASE_DIR=$USE_CASE_DIR make 3>&2 2>&1 1>&3- | perl -pe 's/\e([^\[\]]|\[.*?[a-zA-Z]|\].*?\a)//g' # Neet to check the result of execution of the make command (ignore the results # of the other commands in the pipe) hresult="${PIPESTATUS[0]}" if [ "$hresult" -ne 0 ]; then - echo "Error while running example ${EXAMPLE_NAME} see /tmp/log_${EXAMPLE_NAME}" + echo "Error while running example ${EXAMPLE_NAME}" + failed_examples+=("$EXAMPLE_NAME") + else + success_examples+=("$EXAMPLE_NAME") fi set -e # Remove the virtualenv rm -rf "$VENV_PATH" done + +# Print summary +echo +echo "Summary:" +echo "Successes: ${#success_examples[@]} examples" +for example in "${success_examples[@]}"; do + echo " - $example" +done +echo "Failures: ${#failed_examples[@]} examples" +for example in "${failed_examples[@]}"; do + echo " - $example" +done + +# Exit with a failure status if there are any failures +if [[ ${#failed_examples[@]} -gt 0 ]]; then + exit 1 +fi diff --git a/use_case_examples/cifar/cifar_brevitas_training/Makefile b/use_case_examples/cifar/cifar_brevitas_training/Makefile index d222b84d0e..840146c7a1 100644 --- a/use_case_examples/cifar/cifar_brevitas_training/Makefile +++ b/use_case_examples/cifar/cifar_brevitas_training/Makefile @@ -2,7 +2,7 @@ export LC_ALL=en_US.UTF-8 export LANG=en_US.UTF-8 -EXAMPLE_NAME=cifar_brevitas_finetuning +EXAMPLE_NAME=cifar_brevitas_training JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute TIME_NB="${USE_CASE_DIR}/time_notebook_execution.sh" diff --git a/use_case_examples/cifar/cifar_brevitas_with_model_splitting/Makefile b/use_case_examples/cifar/cifar_brevitas_with_model_splitting/Makefile index 53d36d2e7d..e90c848b40 100644 --- a/use_case_examples/cifar/cifar_brevitas_with_model_splitting/Makefile +++ b/use_case_examples/cifar/cifar_brevitas_with_model_splitting/Makefile @@ -2,7 +2,7 @@ export LC_ALL=en_US.UTF-8 export LANG=en_US.UTF-8 -EXAMPLE_NAME=cifar_brevitas_finetuning +EXAMPLE_NAME=cifar_brevitas_with_model_spitting JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute TIME_NB="${USE_CASE_DIR}/time_notebook_execution.sh" diff --git a/use_case_examples/credit_scoring/Makefile b/use_case_examples/credit_scoring/Makefile new file mode 100644 index 0000000000..60bc03f90b --- /dev/null +++ b/use_case_examples/credit_scoring/Makefile @@ -0,0 +1,12 @@ +# Useful for jupyter notebooks +export LC_ALL=en_US.UTF-8 +export LANG=en_US.UTF-8 + +EXAMPLE_NAME=credit_scoring +JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute +TIME_NB="${USE_CASE_DIR}/time_notebook_execution.sh" + +run_example: one + +one: + @$(TIME_NB) CreditScoring.ipynb diff --git a/use_case_examples/deployment/breast_cancer_builtin/Makefile b/use_case_examples/deployment/breast_cancer_builtin/Makefile deleted file mode 100644 index 4a22f850f0..0000000000 --- a/use_case_examples/deployment/breast_cancer_builtin/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -# Useful for jupyter notebooks -export LC_ALL=en_US.UTF-8 -export LANG=en_US.UTF-8 - -EXAMPLE_NAME=cifar_brevitas_finetuning -JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute - -run_example: three - -one: - @./train_with_docker.sh - -two: one - @python -m concrete.ml.deployment.deploy_to_docker --only-build - -three: two - @python build_docker_client_image.py diff --git a/use_case_examples/deployment/cifar_8_bit/Makefile b/use_case_examples/deployment/cifar_8_bit/Makefile deleted file mode 100644 index 5043905b0f..0000000000 --- a/use_case_examples/deployment/cifar_8_bit/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -# Useful for jupyter notebooks -export LC_ALL=en_US.UTF-8 -export LANG=en_US.UTF-8 - -EXAMPLE_NAME=cifar_brevitas_finetuning -JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute - -run_example: three - -one: - @python compile_with_docker.py - -two: one - @python -m concrete.ml.deployment.deploy_to_docker --only-build - -three: two - @python build_docker_client_image.py diff --git a/use_case_examples/deployment/sentiment_analysis/Makefile b/use_case_examples/deployment/sentiment_analysis/Makefile deleted file mode 100644 index 4a22f850f0..0000000000 --- a/use_case_examples/deployment/sentiment_analysis/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -# Useful for jupyter notebooks -export LC_ALL=en_US.UTF-8 -export LANG=en_US.UTF-8 - -EXAMPLE_NAME=cifar_brevitas_finetuning -JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute - -run_example: three - -one: - @./train_with_docker.sh - -two: one - @python -m concrete.ml.deployment.deploy_to_docker --only-build - -three: two - @python build_docker_client_image.py diff --git a/use_case_examples/disease_prediction/Makefile b/use_case_examples/disease_prediction/Makefile new file mode 100644 index 0000000000..5b8102b60a --- /dev/null +++ b/use_case_examples/disease_prediction/Makefile @@ -0,0 +1,12 @@ +# Useful for jupyter notebooks +export LC_ALL=en_US.UTF-8 +export LANG=en_US.UTF-8 + +EXAMPLE_NAME=dicease_prediction +JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute +TIME_NB="${USE_CASE_DIR}/time_notebook_execution.sh" + +run_example: one + +one: + @$(TIME_NB) HealthCarePrediction.ipynb diff --git a/use_case_examples/disease_prediction/requirements.txt b/use_case_examples/disease_prediction/requirements.txt index 56b78ae9f5..1709ea8a17 100644 --- a/use_case_examples/disease_prediction/requirements.txt +++ b/use_case_examples/disease_prediction/requirements.txt @@ -1,3 +1,4 @@ concrete-ml jupyter pandas +matplotlib diff --git a/use_case_examples/sentiment_analysis_with_transformer/Makefile b/use_case_examples/sentiment_analysis_with_transformer/Makefile index 3340332bc8..dde11d706e 100644 --- a/use_case_examples/sentiment_analysis_with_transformer/Makefile +++ b/use_case_examples/sentiment_analysis_with_transformer/Makefile @@ -2,7 +2,7 @@ export LC_ALL=en_US.UTF-8 export LANG=en_US.UTF-8 -EXAMPLE_NAME=cifar_brevitas_finetuning +EXAMPLE_NAME=sentiment_analysis_with_transformers JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute TIME_NB="${USE_CASE_DIR}/time_notebook_execution.sh" diff --git a/use_case_examples/sentiment_analysis_with_transformer/SentimentClassification.ipynb b/use_case_examples/sentiment_analysis_with_transformer/SentimentClassification.ipynb index dc9774336b..6abbc10636 100644 --- a/use_case_examples/sentiment_analysis_with_transformer/SentimentClassification.ipynb +++ b/use_case_examples/sentiment_analysis_with_transformer/SentimentClassification.ipynb @@ -39,6 +39,7 @@ "\n", "import numpy\n", "import pandas as pd\n", + "import requests\n", "from sklearn.metrics import average_precision_score\n", "from sklearn.model_selection import GridSearchCV, train_test_split\n", "\n", @@ -49,32 +50,15 @@ "cell_type": "code", "execution_count": 2, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "--2023-04-06 09:49:38-- https://huggingface.co/datasets/osanseviero/twitter-airline-sentiment/resolve/main/Tweets.csv\r\n", - "Resolving huggingface.co (huggingface.co)... 13.32.145.73, 13.32.145.5, 13.32.145.11, ...\r\n", - "Connecting to huggingface.co (huggingface.co)|13.32.145.73|:443... connected.\r\n", - "HTTP request sent, awaiting response... 302 Found\r\n", - "Location: https://cdn-lfs.huggingface.co/repos/ce/37/ce37f07d6007921b5a0a814bd1cb03df4a2fa91f9631a025317f3a3e4acbe83c/ea94b23f41892b290dec3330bb8cf9cb6b8bc669eaae5f3a84c40f7b0de8f15e?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27Tweets.csv%3B+filename%3D%22Tweets.csv%22%3B&response-content-type=text%2Fcsv&Expires=1681033779&Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cHM6Ly9jZG4tbGZzLmh1Z2dpbmdmYWNlLmNvL3JlcG9zL2NlLzM3L2NlMzdmMDdkNjAwNzkyMWI1YTBhODE0YmQxY2IwM2RmNGEyZmE5MWY5NjMxYTAyNTMxN2YzYTNlNGFjYmU4M2MvZWE5NGIyM2Y0MTg5MmIyOTBkZWMzMzMwYmI4Y2Y5Y2I2YjhiYzY2OWVhYWU1ZjNhODRjNDBmN2IwZGU4ZjE1ZT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE2ODEwMzM3Nzl9fX1dfQ__&Signature=PvnA1Xq05ghV1ztqtUf4EEQ4HtueYlPV9xuL6bydyg%7EC2IAAq1DcepkTws%7EsMKD8xiSQWxGQBWq7QbrOr0bYwKSD4ACUYBRJ6I4iEeiI3%7EjljnlGjqDKB7khTyrkU3s8oGy6wMKRSWHORpeoiXgBHf21QVghWo5Fl-mA0NhejTQ8gDsH4rowJHzC4k77zUxY0aMuhpHQnbJlTFlYODxjHy%7ELUNL2xUJQkQymsbiCV2dmYkslpIJUdKNj5d4r7jbvfpvZ63In340WF34Ym69%7E5XHNe8v6t1Qy4fOwxg62Qe3CbNlh0Sp9ZNS48%7EZ23az9qevO2CRoSGAsBE3mmS2vCA__&Key-Pair-Id=KVTP0A1DKRTAX [following]\r\n", - "--2023-04-06 09:49:38-- https://cdn-lfs.huggingface.co/repos/ce/37/ce37f07d6007921b5a0a814bd1cb03df4a2fa91f9631a025317f3a3e4acbe83c/ea94b23f41892b290dec3330bb8cf9cb6b8bc669eaae5f3a84c40f7b0de8f15e?response-content-disposition=attachment%3B+filename*%3DUTF-8%27%27Tweets.csv%3B+filename%3D%22Tweets.csv%22%3B&response-content-type=text%2Fcsv&Expires=1681033779&Policy=eyJTdGF0ZW1lbnQiOlt7IlJlc291cmNlIjoiaHR0cHM6Ly9jZG4tbGZzLmh1Z2dpbmdmYWNlLmNvL3JlcG9zL2NlLzM3L2NlMzdmMDdkNjAwNzkyMWI1YTBhODE0YmQxY2IwM2RmNGEyZmE5MWY5NjMxYTAyNTMxN2YzYTNlNGFjYmU4M2MvZWE5NGIyM2Y0MTg5MmIyOTBkZWMzMzMwYmI4Y2Y5Y2I2YjhiYzY2OWVhYWU1ZjNhODRjNDBmN2IwZGU4ZjE1ZT9yZXNwb25zZS1jb250ZW50LWRpc3Bvc2l0aW9uPSomcmVzcG9uc2UtY29udGVudC10eXBlPSoiLCJDb25kaXRpb24iOnsiRGF0ZUxlc3NUaGFuIjp7IkFXUzpFcG9jaFRpbWUiOjE2ODEwMzM3Nzl9fX1dfQ__&Signature=PvnA1Xq05ghV1ztqtUf4EEQ4HtueYlPV9xuL6bydyg%7EC2IAAq1DcepkTws%7EsMKD8xiSQWxGQBWq7QbrOr0bYwKSD4ACUYBRJ6I4iEeiI3%7EjljnlGjqDKB7khTyrkU3s8oGy6wMKRSWHORpeoiXgBHf21QVghWo5Fl-mA0NhejTQ8gDsH4rowJHzC4k77zUxY0aMuhpHQnbJlTFlYODxjHy%7ELUNL2xUJQkQymsbiCV2dmYkslpIJUdKNj5d4r7jbvfpvZ63In340WF34Ym69%7E5XHNe8v6t1Qy4fOwxg62Qe3CbNlh0Sp9ZNS48%7EZ23az9qevO2CRoSGAsBE3mmS2vCA__&Key-Pair-Id=KVTP0A1DKRTAX\r\n", - "Resolving cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)... 52.222.174.26, 52.222.174.3, 52.222.174.32, ...\r\n", - "Connecting to cdn-lfs.huggingface.co (cdn-lfs.huggingface.co)|52.222.174.26|:443... connected.\r\n", - "HTTP request sent, awaiting response... 200 OK\r\n", - "Length: 3421431 (3.3M) [text/csv]\r\n", - "Saving to: ‘Tweets.csv’\r\n", - "\r\n", - "Tweets.csv 100%[===================>] 3.26M --.-KB/s in 0.02s \r\n", - "\r\n", - "2023-04-06 09:49:39 (146 MB/s) - ‘Tweets.csv’ saved [3421431/3421431]\r\n", - "\r\n" - ] - } - ], + "outputs": [], "source": [ - "!wget -O Tweets.csv https://huggingface.co/datasets/osanseviero/twitter-airline-sentiment/resolve/main/Tweets.csv" + "url = (\n", + " \"https://huggingface.co/datasets/osanseviero/twitter-airline-sentiment/resolve/main/Tweets.csv\"\n", + ")\n", + "response = requests.get(url)\n", + "\n", + "with open(\"Tweets.csv\", \"wb\") as file:\n", + " file.write(response.content)" ] }, { @@ -93,10 +77,6 @@ } ], "source": [ - "# Download the data-sets\n", - "if not os.path.isfile(\"Tweets.csv\"):\n", - " raise ValueError(\"Please launch the `download_data.sh` script in order to get the data-sets.\")\n", - "\n", "train = pd.read_csv(\"Tweets.csv\", index_col=0)\n", "text_X = train[\"text\"]\n", "y = train[\"airline_sentiment\"]\n", diff --git a/use_case_examples/titanic/Makefile b/use_case_examples/titanic/Makefile index 9753f08800..51224b012c 100644 --- a/use_case_examples/titanic/Makefile +++ b/use_case_examples/titanic/Makefile @@ -2,11 +2,12 @@ export LC_ALL=en_US.UTF-8 export LANG=en_US.UTF-8 -EXAMPLE_NAME=cifar_brevitas_finetuning +EXAMPLE_NAME=titanic JUPYTER_RUN=jupyter nbconvert --to notebook --inplace --execute TIME_NB="${USE_CASE_DIR}/time_notebook_execution.sh" run_example: one one: + @bash download_data.sh @$(TIME_NB) KaggleTitanic.ipynb