diff --git a/scripts/build-site.sh b/scripts/build-site.sh index d52e2a88..7d8b4bc9 100755 --- a/scripts/build-site.sh +++ b/scripts/build-site.sh @@ -11,21 +11,19 @@ IFS=$'\n\t' TIMESTAMP=`date` +# RKI Abwasser ---------------------------------------- svn checkout --username $SVN_USER --password $SVN_PASSWORD --no-auth-cache --depth infinity \ https://svn.vsp.tu-berlin.de/repos/public-svn/matsim/scenarios/countries/de/episim/original-data/Abwasser/ -pip install html_to_json -pip install bs2json +# fetch rki sewage data from GitHub into Abwasser/amelag_einzelstandorte.tsv +wget https://raw.githubusercontent.com/robert-koch-institut/Abwassersurveillance_AMELAG/main/amelag_einzelstandorte.tsv -O Abwasser/amelag_einzelstandorte.tsv + +# add additional Abwasser cities here: python3 scripts/sewage_data_parser.py -c Köln -python3 scripts/sewage_data_parser.py -a True -python3 scripts/sewage_data_parser.py -g True -# ls -# pwd -# cp ./Abwasser/*sewage_data* Abwasser/ -# cp ./Abwasser/sewage_combined_data.csv* Abwasser/ svn commit --username $SVN_USER --password $SVN_PASSWORD --no-auth-cache -m "autobuild: $TIMESTAMP" Abwasser +# RKI Infection Data --------------------------------- echo BUILD: Getting RKI_FILE RKI_FILE=https://media.githubusercontent.com/media/robert-koch-institut/SARS-CoV-2-Infektionen_in_Deutschland/main/Aktuell_Deutschland_SarsCov2_Infektionen.csv diff --git a/scripts/sewage_data_parser.py b/scripts/sewage_data_parser.py index e188e973..a76e9d15 100644 --- a/scripts/sewage_data_parser.py +++ b/scripts/sewage_data_parser.py @@ -2,39 +2,11 @@ import json import os import shutil -from matplotlib import pyplot as plt import pandas as pd -import requests -from bs4 import BeautifulSoup -from bs2json import BS2Json +import numpy as np import csv -# import pandas as pd -# import matplotlib.pyplot as plt - -def fetch_data_from_website(url): - # Send an HTTP request to the website - response = requests.get(url) - response.raise_for_status() # Check for any HTTP errors - return response.text - -def parse_html(html_content): - # Parse HTML content using BeautifulSoup - soup = BeautifulSoup(html_content, 'html.parser') - return soup - -def find_matching_scripts(soup, text_to_find): - # Find all