diff --git a/.github/ISSUE_TEMPLATE/user-story.md b/.github/ISSUE_TEMPLATE/user-story.md
index 98ef72cc..91938c94 100644
--- a/.github/ISSUE_TEMPLATE/user-story.md
+++ b/.github/ISSUE_TEMPLATE/user-story.md
@@ -10,6 +10,7 @@ assignees: ''
**Title:**
**Description:**
+- [ ] "As a [type of user]" "I want [an action or feature]" "So that [benefit or value]" -
**Wireframe:**
diff --git a/.github/workflows/build-release.yaml b/.github/workflows/build-release.yaml
deleted file mode 100644
index 3e232aaf..00000000
--- a/.github/workflows/build-release.yaml
+++ /dev/null
@@ -1,150 +0,0 @@
-## For each release, the value of workflow name, branches, PR_NUMBER and RELEASE_NAME need to be adjusted accordingly
-## Also change the .pipelin/lib/config.js version number
-name: CTHUB v0.2.0
-
-on:
- # push:
- # branches: [ release-0.2.0 ]
- workflow_dispatch:
- workflow_call:
-
-env:
- ## The pull request number of the Tracking pull request to merge the release branch to main
- PR_NUMBER: 73
- RELEASE_NAME: release-0.2.0
-
-jobs:
-
- ## This is the CI job
- build:
-
- name: Build CTHUB on Openshift
- runs-on: ubuntu-latest
- timeout-minutes: 60
-
- steps:
-
- ## it will checkout to /home/runner/work/cthub/cthub
- - name: Check out repository
- uses: actions/checkout@v3
-
- # open it when cthub updated the python packages
- #- name: Run django tests
- # uses: kuanfandevops/django-test-action@cthub-django-test
- # with:
- # settings-dir-path: "backend/cthub"
- # requirements-file: "backend/requirements.txt"
- # managepy-dir: backend
-
- ## Log in to Openshift with a token of service account
- - name: Log in to Openshift
- uses: redhat-actions/oc-login@v1.3
- with:
- openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }}
- openshift_token: ${{ secrets.OPENSHIFT_TOKEN }}
- insecure_skip_tls_verify: true
- namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools
-
- ## Run build on Openshift
- - name: Run build
- run: |
- cd .pipeline
- npm install
- npm run build -- --pr=${{ env.PR_NUMBER }} --env=build
-
- # deploy-on-dev:
-
- # name: Deploy CTHUB on Dev Environment
- # runs-on: ubuntu-latest
- # timeout-minutes: 60
- # needs: build
-
- # steps:
-
- # - name: Check out repository
- # uses: actions/checkout@v3
-
- # - name: Log in to Openshift
- # uses: redhat-actions/oc-login@v1.3
- # with:
- # openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }}
- # openshift_token: ${{ secrets.OPENSHIFT_TOKEN }}
- # insecure_skip_tls_verify: true
- # namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev
-
- # - name: Run BCDK deployment on CTHUB Dev environment
- # run: |
- # cd .pipeline
- # echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Dev"
- # npm install
- # npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=dev
-
- # deploy-on-test:
-
- # name: Deploy CTHUB on Test Environment
- # runs-on: ubuntu-latest
- # timeout-minutes: 60
- # needs: deploy-on-dev
-
- # steps:
-
- # - name: Check out repository
- # uses: actions/checkout@v3
-
- # - name: Log in to Openshift
- # uses: redhat-actions/oc-login@v1.3
- # with:
- # openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }}
- # openshift_token: ${{ secrets.OPENSHIFT_TOKEN }}
- # insecure_skip_tls_verify: true
- # namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test
-
- # - name: Ask for approval for CTHUB Test deployment
- # uses: trstringer/manual-approval@v1.6.0
- # with:
- # secret: ${{ github.TOKEN }}
- # approvers: emi-hi,tim738745,kuanfandevops,JulianForeman
- # minimum-approvals: 1
- # issue-title: "CTHUB ${{ env.RELEASE_NAME }} Test Deployment"
-
- # - name: Run BCDK deployment on CTHUB Test environment
- # run: |
- # cd .pipeline
- # echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Test"
- # npm install
- # npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=test
-
- deploy-on-prod:
-
- name: Deploy CTHUB on Prod Environment
- runs-on: ubuntu-latest
- timeout-minutes: 60
- # needs: deploy-on-test
-
- steps:
-
- - name: Check out repository
- uses: actions/checkout@v3
-
- - name: Log in to Openshift
- uses: redhat-actions/oc-login@v1.3
- with:
- openshift_server_url: ${{ secrets.OPENSHIFT_SERVER }}
- openshift_token: ${{ secrets.OPENSHIFT_TOKEN }}
- insecure_skip_tls_verify: true
- namespace: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-prod
-
- - name: Ask for approval for CTHUB Prod deployment
- uses: trstringer/manual-approval@v1.6.0
- with:
- secret: ${{ github.TOKEN }}
- approvers: emi-hi,tim738745,kuanfandevops,JulianForeman
- minimum-approvals: 2
- issue-title: "CTHUB ${{ env.RELEASE_NAME }} Prod Deployment"
-
- - name: Run BCDK deployment on CTHUB Prod environment
- run: |
- cd .pipeline
- echo "Deploying CTHUB ${{ env.RELEASE_NAME }} on Prod"
- npm install
- npm run deploy -- --pr=${{ env.PR_NUMBER }} --env=prod
diff --git a/.github/workflows/dev-ci.yaml b/.github/workflows/dev-ci.yaml
index 42437304..9c846cb1 100644
--- a/.github/workflows/dev-ci.yaml
+++ b/.github/workflows/dev-ci.yaml
@@ -1,17 +1,17 @@
## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly
-name: CTHUB 0.2.0 Dev CI
+name: CTHUB 0.3.0 Dev CI
on:
push:
- branches: [ release-0.2.0 ]
- paths:
- - frontend/**
- - backend/**
+ branches: [ release-0.3.0 ]
+ # paths:
+ # - frontend/**
+ # - backend/**
workflow_dispatch:
env:
- VERSION: 0.2.0
+ VERSION: 0.3.0
GIT_URL: https://github.com/bcgov/cthub.git
TOOLS_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-tools
DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev
@@ -23,9 +23,36 @@ concurrency:
jobs:
+ install-oc:
+ runs-on: ubuntu-latest
+ outputs:
+ cache-hit: ${{ steps.cache.outputs.cache-hit }}
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4.1.1
+
+ - name: Set up cache for OpenShift CLI
+ id: cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc # Path where the `oc` binary will be installed
+ key: oc-cli-${{ runner.os }}
+
+ - name: Install OpenShift CLI (if not cached)
+ if: steps.cache.outputs.cache-hit != 'true'
+ run: |
+ curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz
+ tar -xvf openshift-client-linux.tar.gz
+ sudo mv oc /usr/local/bin/
+ oc version --client
+
+ - name: Confirm OpenShift CLI is Available
+ run: oc version --client
+
set-pre-release:
name: Calculate pre-release number
runs-on: ubuntu-latest
+ needs: [install-oc]
outputs:
output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }}
@@ -49,6 +76,12 @@ jobs:
- name: Check out repository
uses: actions/checkout@v4.1.1
+ - name: Restore oc command from Cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc
+ key: oc-cli-${{ runner.os }}
+
- name: Log in to Openshift
uses: redhat-actions/oc-login@v1.3
with:
@@ -81,6 +114,14 @@ jobs:
oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-task-queue-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1
oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
+ - name: Build CTHUB Vinpower
+ run: |
+ cd openshift/templates/vinpower
+ oc process -f ./vinpower-bc.yaml NAME=cthub SUFFIX=-${{ env.VERSION }}-${{ env.PRE_RELEASE }} VERSION=${{ env.VERSION }}-${{ env.PRE_RELEASE }} GIT_URL=${{ env.GIT_URL }} GIT_REF=release-${{ env.VERSION }} | oc apply --wait=true -f - -n ${{ env.TOOLS_NAMESPACE }}
+ sleep 5s
+ oc -n ${{ env.TOOLS_NAMESPACE }} wait --for=condition=Complete --timeout=900s build/cthub-vinpower-${{ env.VERSION }}-${{ env.PRE_RELEASE }}-1
+ oc tag ${{ env.TOOLS_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.DEV_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
+
deploy:
name: Deploy CTHUB on Dev
@@ -100,20 +141,24 @@ jobs:
ref: main
ssh-key: ${{ secrets.MANIFEST_REPO_DEPLOY_KEY }}
- - name: Update frontend tag
- uses: mikefarah/yq@v4.40.5
- with:
- cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
-
- - name: Update backend tag
- uses: mikefarah/yq@v4.40.5
- with:
- cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
-
- - name: Update task-queue tag
+ - name: Update tags
uses: mikefarah/yq@v4.40.5
with:
- cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
+ cmd: |
+ yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
+ yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
+ yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
+ yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
+
+ # - name: Update backend tag
+ # uses: mikefarah/yq@v4.40.5
+ # with:
+ # cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
+
+ # - name: Update task-queue tag
+ # uses: mikefarah/yq@v4.40.5
+ # with:
+ # cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-dev.yaml
- name: GitHub Commit & Push
run: |
diff --git a/.github/workflows/prod-ci.yaml b/.github/workflows/prod-ci.yaml
index a561b435..590fa91e 100644
--- a/.github/workflows/prod-ci.yaml
+++ b/.github/workflows/prod-ci.yaml
@@ -1,11 +1,11 @@
## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly
-name: CTHUB 0.2.0 Prod CI
+name: CTHUB 0.3.0 Prod CI
on:
workflow_dispatch:
env:
- VERSION: 0.2.0
+ VERSION: 0.3.0
GIT_URL: https://github.com/bcgov/cthub.git
TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test
PROD_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-prod
@@ -16,15 +16,46 @@ concurrency:
cancel-in-progress: true
jobs:
+ install-oc:
+ runs-on: ubuntu-latest
+ outputs:
+ cache-hit: ${{ steps.cache.outputs.cache-hit }}
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4.1.1
+
+ - name: Set up cache for OpenShift CLI
+ id: cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc # Path where the `oc` binary will be installed
+ key: oc-cli-${{ runner.os }}
+
+ - name: Install OpenShift CLI (if not cached)
+ if: steps.cache.outputs.cache-hit != 'true'
+ run: |
+ curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz
+ tar -xvf openshift-client-linux.tar.gz
+ sudo mv oc /usr/local/bin/
+ oc version --client
+
+ - name: Confirm OpenShift CLI is Available
+ run: oc version --client
set-pre-release:
name: Find Test deployment pre-release number
runs-on: ubuntu-latest
+ needs: [install-oc]
outputs:
output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }}
steps:
+ - name: Restore oc command from Cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc
+ key: oc-cli-${{ runner.os }}
- name: Log in to Openshift
uses: redhat-actions/oc-login@v1.3
@@ -55,7 +86,13 @@ jobs:
secret: ${{ github.TOKEN }}
approvers: emi-hi,kuanfandevops,tim738745,JulianForeman
minimum-approvals: 2
- issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} PRODUCTION Deployment"
+ issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} PRODUCTION Deployment"
+
+ - name: Restore oc command from Cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc
+ key: oc-cli-${{ runner.os }}
- name: Log in to Openshift
uses: redhat-actions/oc-login@v1.3
@@ -70,6 +107,7 @@ jobs:
oc tag ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
oc tag ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
oc tag ${{ env.TEST_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
+ oc tag ${{ env.TEST_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.PROD_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
- name: Checkout Manifest repository
uses: actions/checkout@v4.1.1
@@ -81,17 +119,11 @@ jobs:
- name: Update frontend tag
uses: mikefarah/yq@v4.40.5
with:
- cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml
-
- - name: Update backend tag
- uses: mikefarah/yq@v4.40.5
- with:
- cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml
-
- - name: Update backend tag
- uses: mikefarah/yq@v4.40.5
- with:
- cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml
+ cmd: |
+ yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml
+ yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml
+ yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml
+ yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-prod.yaml
- name: GitHub Commit & Push
run: |
diff --git a/.github/workflows/test-ci.yaml b/.github/workflows/test-ci.yaml
index 6a8da706..29161e48 100644
--- a/.github/workflows/test-ci.yaml
+++ b/.github/workflows/test-ci.yaml
@@ -1,11 +1,11 @@
## For each release, the value of workflow name, branches and VERSION need to be adjusted accordingly
-name: CTHUB 0.2.0 Test CI
+name: CTHUB 0.3.0 Test CI
on:
workflow_dispatch:
env:
- VERSION: 0.2.0
+ VERSION: 0.3.0
GIT_URL: https://github.com/bcgov/cthub.git
DEV_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-dev
TEST_NAMESPACE: ${{ secrets.OPENSHIFT_NAMESPACE_PLATE }}-test
@@ -16,15 +16,46 @@ concurrency:
cancel-in-progress: true
jobs:
+ install-oc:
+ runs-on: ubuntu-latest
+ outputs:
+ cache-hit: ${{ steps.cache.outputs.cache-hit }}
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4.1.1
+
+ - name: Set up cache for OpenShift CLI
+ id: cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc # Path where the `oc` binary will be installed
+ key: oc-cli-${{ runner.os }}
+
+ - name: Install OpenShift CLI (if not cached)
+ if: steps.cache.outputs.cache-hit != 'true'
+ run: |
+ curl -LO https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz
+ tar -xvf openshift-client-linux.tar.gz
+ sudo mv oc /usr/local/bin/
+ oc version --client
+
+ - name: Confirm OpenShift CLI is Available
+ run: oc version --client
set-pre-release:
name: Find Dev deployment pre-release number
runs-on: ubuntu-latest
+ needs: [install-oc]
outputs:
output1: ${{ steps.set-pre-release.outputs.PRE_RELEASE }}
steps:
+ - name: Restore oc command from Cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc
+ key: oc-cli-${{ runner.os }}
- name: Log in to Openshift
uses: redhat-actions/oc-login@v1.3
@@ -56,6 +87,12 @@ jobs:
approvers: emi-hi,kuanfandevops,tim738745,JulianForeman
minimum-approvals: 1
issue-title: "CTHUB release-${{ env.VERSION }}-${{ env.PRE_RELEASE }} Test Deployment"
+
+ - name: Restore oc command from Cache
+ uses: actions/cache@v4.2.0
+ with:
+ path: /usr/local/bin/oc
+ key: oc-cli-${{ runner.os }}
- name: Log in to Openshift
uses: redhat-actions/oc-login@v1.3
@@ -70,6 +107,7 @@ jobs:
oc tag ${{ env.DEV_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-backend:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
oc tag ${{ env.DEV_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-frontend:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
oc tag ${{ env.DEV_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-task-queue:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
+ oc tag ${{ env.DEV_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }} ${{ env.TEST_NAMESPACE }}/cthub-vinpower:${{ env.VERSION }}-${{ env.PRE_RELEASE }}
- name: Checkout Manifest repository
uses: actions/checkout@v4.1.1
@@ -81,17 +119,11 @@ jobs:
- name: Update frontend tag
uses: mikefarah/yq@v4.40.5
with:
- cmd: yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml
-
- - name: Update backend tag
- uses: mikefarah/yq@v4.40.5
- with:
- cmd: yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml
-
- - name: Update task-queue tag
- uses: mikefarah/yq@v4.40.5
- with:
- cmd: yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml
+ cmd: |
+ yq -i '.frontend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml
+ yq -i '.backend.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml
+ yq -i '.task-queue.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml
+ yq -i '.vinpower.image.tag = "${{ env.VERSION }}-${{ env.PRE_RELEASE }}"' cthub/values-test.yaml
- name: GitHub Commit & Push
run: |
diff --git a/django/README.md b/django/README.md
index f2da1074..6bb2b6a2 100644
--- a/django/README.md
+++ b/django/README.md
@@ -46,8 +46,8 @@ use the same as above to load fixtures
docker-compose exec api bash
python manage.py loaddata api/fixtures/0001_add_ldv_rebates_datasets.json
-
-etc
+or
+python manage.py loaddata api/fixtures/00*
## Creating User Account
After running all the fixtures to create the dataset dropdown list and the user_permissions table.
diff --git a/django/api/constants.py b/django/api/constants/constants.py
similarity index 69%
rename from django/api/constants.py
rename to django/api/constants/constants.py
index f613d2bf..ffe20391 100644
--- a/django/api/constants.py
+++ b/django/api/constants/constants.py
@@ -2,8 +2,6 @@
from decimal import Decimal
from enum import Enum
-import pandas as pd
-
from api.models.arc_project_tracking import ARCProjectTracking
from api.models.charger_rebates import ChargerRebates
from api.models.data_fleets import DataFleets
@@ -13,6 +11,7 @@
from api.models.public_charging import PublicCharging
from api.models.scrap_it import ScrapIt
from api.models.go_electric_rebates import GoElectricRebates
+from api.models.cvp_data import CVPData
from api.services.spreadsheet_uploader_prep import (
prepare_arc_project_tracking,
prepare_hydrogen_fleets,
@@ -21,44 +20,66 @@
prepare_public_charging,
prepare_scrap_it,
prepare_go_electric_rebates,
+ prepare_cvp_data,
+ validate_phone_numbers,
+ typo_checker,
+ location_checker,
+ email_validator,
+ validate_field_values,
+ region_checker,
+ format_postal_codes
)
+from api.services.resolvers import get_google_resolver
+from api.constants.misc import GER_VALID_FIELD_VALUES, ARC_VALID_FIELD_VALUES, LOCALITY_FEATURES_MAP, CVP_DATA_VALID_FIELD_VALUES
+
+from enum import Enum
class ARCProjectTrackingColumns(Enum):
- FUNDING_CALL = "Funding Call"
+ REFERENCE_NUMBER = "Ref #"
PROPONENT = "Proponent"
- REF_NUMBER = "Ref #"
+ STATUS = "Status"
+ FUNDING_CALL = "Funding Call"
PROJECT_TITLE = "Project Title"
+ VEHICLE_CATEGORY = "Vehicle Category"
+ ZEV_SUB_SECTOR = "ZEV Sub-Section"
+ FUEL_TYPE = "Fuel Type"
+ RETROFIT = "Retrofit"
PRIMARY_LOCATION = "Primary Location"
- STATUS = "Status"
- ARC_FUNDING = "ARC Funding"
- FUNDS_ISSUED = "Funds Issued"
+ ECONOMIC_REGION = "Economic Region"
+ JOBS = "Jobs (FTEs)"
+ FUNDS_COMMITED = "Funds Committed"
+ FUNDS_DISBURSED = "Funds Disbursed"
+ REMAINING_DISBURSED = "Remaining To Disburse"
+ TOTAL_PROJECT_VALUE = "Total Project Value"
START_DATE = "Start Date"
COMPLETION_DATE = "Completion Date"
- TOTAL_PROJECT_VALUE = "Total Project Value"
- ZEV_SUB_SECTOR = "ZEV Sub-Sector"
- ON_ROAD_OFF_ROAD = "On-Road/Off-Road"
- FUEL_TYPE = "Fuel Type"
+ COMPLETE_OR_TERMINATION_DATE = "Complete or Termination date"
PUBLICLY_ANNOUNCED = "Publicly Announced"
-
+ NOTES = "Notes"
class ArcProjectTrackingColumnMapping(Enum):
- funding_call = "Funding Call"
- proponent = "Proponent"
reference_number = "Ref #"
+ proponent = "Proponent"
+ status = "Status"
+ funding_call = "Funding Call"
project_title = "Project Title"
+ vehicle_category = "Vehicle Category"
+ zev_sub_sector = "ZEV Sub-Section"
+ fuel_type = "Fuel Type"
+ retrofit = "Retrofit"
primary_location = "Primary Location"
- status = "Status"
- arc_funding = "ARC Funding"
- funds_issued = "Funds Issued"
+ economic_region = "Economic Region"
+ jobs = "Jobs (FTEs)"
+ funds_commited = "Funds Committed"
+ funds_disbursed = "Funds Disbursed"
+ remaining_disbursed = "Remaining To Disburse"
+ total_project_value = "Total Project Value"
start_date = "Start Date"
completion_date = "Completion Date"
- total_project_value = "Total Project Value"
- zev_sub_sector = "ZEV Sub-Sector"
- on_road_off_road = "On-Road/Off-Road"
- fuel_type = "Fuel Type"
+ complete_or_termination_date = "Complete or Termination date"
publicly_announced = "Publicly Announced"
-
+ notes = "Notes"
class EVChargingRebatesColumns(Enum):
ORGANIZATION = "Organization"
@@ -376,11 +397,13 @@ class GoElectricRebatesColumns(Enum):
MANUFACTURER = "Manufacturer"
MODEL = "Model"
CITY = "City"
- POSTAL_CODE = "Postal Code"
- PHONE = "Phone"
+ POSTAL_CODE = "Postal code"
+ PHONE = "Phone Number"
EMAIL = "Email"
- VIN = "VIN"
+ VIN = "VIN Number"
VEHICLE_CLASS = "Class"
+ REBATE_ADJUSTMENT = "Rebate adjustment (discount)"
+ NOTES = "Notes"
class GoElectricRebatesColumnMapping(Enum):
@@ -395,31 +418,134 @@ class GoElectricRebatesColumnMapping(Enum):
manufacturer = "Manufacturer"
model = "Model"
city = "City"
- postal_code = "Postal Code"
- phone = "Phone"
+ postal_code = "Postal code"
+ phone = "Phone Number"
email = "Email"
- vin = "VIN"
+ vin = "VIN Number"
vehicle_class = "Class"
+ rebate_adjustment = "Rebate adjustment (discount)"
+ notes = "Notes"
+class CVPDataColumns(Enum):
+ FUNDING_CALL = "FC"
+ PROJECT_IDENTIFIER = "Project Identifier"
+ APPLICANT_NAME = "Name of Applicant"
+ RANK = "Rank"
+ STATUS = "Status"
+ SCORE = "Score"
+ VEHICLE_DEPLOYED = "Vehicle Deployed"
+ VEHICLE_CATEGORY = "Vehicle Category"
+ DRIVE_TYPE = "Drive Type"
+ VEHICLE_TYPE = "Vehicle Type"
+ ROAD_CLASS = "Class"
+ USE_CASE = "Use Case"
+ MAKE_AND_MODEL = "Vehicle Make and Model"
+ ECONOMIC_REGION = "Economic Region"
+ START_DATE = "Start Date"
+ COMPLETION_DATE = "Completion Date"
+ PROJECT_TYPE = "Project Type"
+ CLASS_3 = "Class 3"
+ CLASS_4 = "Class 4"
+ CLASS_5 = "Class 5"
+ CLASS_6 = "Class 6"
+ CLASS_7 = "Class 7"
+ CLASS_8 = "Class 8"
+ ON_ROAD_TOTAL = "On Road Total"
+ OFF_ROAD = "Off-Road"
+ LEVEL_2_CHARGER = "Level 2 Charger (3.3 kW to 19.2 kW)"
+ LEVEL_3_CHARGER = "Level 3 Charger (20 kW to 49 kW)"
+ HIGH_LEVEL_3_CHARGER = "Level 3 Charger (50 kW to 99kW)"
+ LEVEL_CHARGER = "Level Charger (100 kW and above)"
+ OTHER_CHARGER = "Other Charger"
+ H2_FUELING_STATION = "H2 Fueling Station"
+ CHARGER_BRAND = "Charger Brand"
+ H2_FUELLING_STATION_DESCRIPTION = "H2 Fuelling Station Description"
+ GHG_EMISSION_REDUCTION = "Proponent's GHG Emission Reduction (tCO2e/yr)"
+ ESTIMATED_GHG_EMISSION_REDUCTION = "Le-ef Estimated GHG Reduction (tCO2e/yr)"
+ FUNDING_EFFICIENCY = "Funding Efficiency for Emmision Abatment ($/tCO2e)"
+ MARKET_EMISSION_REDUCTIONS = "Market Emission Reductions (tCO2e by 2030)"
+ CVP_FUNDING_REQUEST = "CVP Program Funding Request (Initial)"
+ CVP_FUNDING_CONTRIBUTION = "CVP Funding (approved - Contribution Agreement)"
+ EXTERNAL_FUNDING = "External Funding"
+ PROPONENT_FUNDING = "Proponent funding"
+ PROJECT_COST_INITIAL = "Total project cost (initial)"
+ PROJECT_COST_REVISED = "Total Project Cost (revised)"
+ FUNDING_SOURCE = "Funding Source"
+ NOTES = "Notes"
+ IMHZEV = "iMHZEV"
+
+class CVPDataColumnMapping(Enum):
+ funding_call = "FC"
+ project_identifier = "Project Identifier"
+ applicant_name = "Name of Applicant"
+ rank = "Rank"
+ status = "Status"
+ score = "Score"
+ vehicle_deployed = "Vehicle Deployed"
+ vehicle_category = "Vehicle Category"
+ drive_type = "Drive Type"
+ vehicle_type = "Vehicle Type"
+ road_class = "Class"
+ use_case = "Use Case"
+ make_and_model = "Vehicle Make and Model"
+ economic_region = "Economic Region"
+ start_date = "Start Date"
+ completion_date = "Completion Date"
+ project_type = "Project Type"
+ class_3 = "Class 3"
+ class_4 = "Class 4"
+ class_5 = "Class 5"
+ class_6 = "Class 6"
+ class_7 = "Class 7"
+ class_8 = "Class 8"
+ on_road_total = "On Road Total"
+ off_road = "Off-Road"
+ level_2_charger = "Level 2 Charger (3.3 kW to 19.2 kW)"
+ level_3_charger = "Level 3 Charger (20 kW to 49 kW)"
+ high_level_3_charger = "Level 3 Charger (50 kW to 99kW)"
+ level_charger = "Level Charger (100 kW and above)"
+ other_charger = "Other Charger"
+ h2_fuelling_station = "H2 Fueling Station"
+ charger_brand = "Charger Brand"
+ h2_fuelling_station_description = "H2 Fuelling Station Description"
+ ghg_emission_reduction = "Proponent's GHG Emission Reduction (tCO2e/yr)"
+ estimated_ghg_emission_reduction = "Le-ef Estimated GHG Reduction (tCO2e/yr)"
+ funding_efficiency = "Funding Efficiency for Emmision Abatment ($/tCO2e)"
+ market_emission_reductions = "Market Emission Reductions (tCO2e by 2030)"
+ cvp_funding_request = "CVP Program Funding Request (Initial)"
+ cvp_funding_contribution = "CVP Funding (approved - Contribution Agreement)"
+ external_funding = "External Funding"
+ proponent_funding = "Proponent funding"
+ project_cost_initial = "Total project cost (initial)"
+ project_cost_revised = "Total Project Cost (revised)"
+ funding_source = "Funding Source"
+ notes = "Notes"
+ imhzev = "iMHZEV"
FIELD_TYPES = {
"ARC Project Tracking": {
- "funding_call": str,
- "proponent": str,
"reference_number": str,
- "project_title": str,
- "primary_location": str,
+ "proponent": str,
"status": str,
- "arc_funding": int,
- "funds_issued": int,
- "start_date": str,
- "completion_date": str,
- "total_project_value": int,
+ "funding_call": str,
+ "project_title": str,
+ "vehicle_category": str,
"zev_sub_sector": str,
- "on_road_off_road": str,
"fuel_type": str,
- "publicly_announced": bool,
+ "retrofit": str,
+ "primary_location": str,
+ "economic_region": str,
+ "jobs": int,
+ "funds_commited": int,
+ "funds_disbursed": int,
+ "remaining_disbursed": int,
+ "total_project_value": int,
+ "start_date": datetime.date,
+ "completion_date": datetime.date,
+ "complete_or_termination_date": datetime.date,
+ "publicly_announced": str,
+ "notes": str,
},
"EV Charging Rebates": {
"organization": str,
@@ -576,8 +702,59 @@ class GoElectricRebatesColumnMapping(Enum):
"email": str,
"vin": str,
"vehicle_class": str,
+ "rebate_adjustment": str,
+ "notes": str,
+ },
+ "CVP Data": {
+ "funding_call": int,
+ "project_identifier": int,
+ "applicant_name": str,
+ "rank": int,
+ "status": str,
+ "score": int,
+ "vehicle_deployed": str,
+ "vehicle_category": str,
+ "drive_type": str,
+ "vehicle_type": str,
+ "road_class": str,
+ "use_case": str,
+ "make_and_model": str,
+ "economic_region": str,
+ "start_date": datetime.date,
+ "completion_date": datetime.date,
+ "project_type": str,
+ "class_3": int,
+ "class_4": int,
+ "class_5": int,
+ "class_6": int,
+ "class_7": int,
+ "class_8": int,
+ "on_road_total": int,
+ "off_road": int,
+ "level_2_charger": int,
+ "level_3_charger": int,
+ "high_level_3_charger": int,
+ "level_charger": int,
+ "other_charger": int,
+ "h2_fuelling_station": int,
+ "charger_brand": str,
+ "h2_fuelling_station_description": str,
+ "ghg_emission_reduction": int,
+ "estimated_ghg_emission_reduction": int,
+ "funding_efficiency": int,
+ "market_emission_reductions": int,
+ "cvp_funding_request": int,
+ "cvp_funding_contribution": int,
+ "external_funding": int,
+ "proponent_funding": int,
+ "project_cost_initial": int,
+ "project_cost_revised": int,
+ "funding_source": str,
+ "notes": str,
+ "imhzev": str,
},
+
}
DATASET_CONFIG = {
@@ -585,8 +762,12 @@ class GoElectricRebatesColumnMapping(Enum):
"model": ARCProjectTracking,
"columns": ARCProjectTrackingColumns,
"column_mapping": ArcProjectTrackingColumnMapping,
- "sheet_name": "Project_Tracking",
+ "sheet_name": "ARC Data",
"preparation_functions": [prepare_arc_project_tracking],
+ "validation_functions": [
+ {'function': validate_field_values, "columns": [], "kwargs": {"indices_offset":2, "fields_and_values": ARC_VALID_FIELD_VALUES}},
+ {"function": region_checker, "columns": ['Economic Region'], "kwargs": {"indices_offset":2}},
+ ]
},
"EV Charging Rebates": {
"model": ChargerRebates,
@@ -641,7 +822,24 @@ class GoElectricRebatesColumnMapping(Enum):
"model": GoElectricRebates,
"columns": GoElectricRebatesColumns,
"column_mapping": GoElectricRebatesColumnMapping,
- "sheet_name": "Main list",
+ "sheet_name": "Distribution List - Master",
"preparation_functions": [prepare_go_electric_rebates],
+ "validation_functions": [
+ {"function": validate_phone_numbers, "columns": ["Phone Number"], "kwargs": {"indices_offset": 2}},
+ {"function": typo_checker, "columns": ["Applicant Name"], "kwargs": {"cutoff": 0.8, "indices_offset": 2}},
+ {"function": location_checker, "columns": ["City"], "kwargs": {"columns_to_features_map": {"City": LOCALITY_FEATURES_MAP}, "indices_offset":2}},
+ {"function": email_validator, "columns": ["Email"], "kwargs": {"indices_offset":2, "get_resolver": get_google_resolver}},
+ {"function": validate_field_values, "columns": [], "kwargs": {"indices_offset":2, "fields_and_values": GER_VALID_FIELD_VALUES}},
+ {"function": format_postal_codes, "columns": ["Postal code"], "kwargs": {"indices_offset":2, "validate": True}}
+ ]
+ },
+ "CVP Data": {
+ "model": CVPData,
+ "columns": CVPDataColumns,
+ "column_mapping": CVPDataColumnMapping,
+ "sheet_name": "Data",
+ "preparation_functions": [prepare_cvp_data],
+ "validation_functions": [{"function": validate_field_values, "columns": [], "kwargs": {"indices_offset":2, "fields_and_values": CVP_DATA_VALID_FIELD_VALUES, "delimiter": ","}},]
},
+
}
diff --git a/django/api/decoder_constants.py b/django/api/constants/decoder.py
similarity index 99%
rename from django/api/decoder_constants.py
rename to django/api/constants/decoder.py
index dc6eca6f..8915e498 100644
--- a/django/api/decoder_constants.py
+++ b/django/api/constants/decoder.py
@@ -1,4 +1,3 @@
-import os
from enum import Enum
from functools import partial
from api.models.decoded_vin_record import VpicDecodedVinRecord, VinpowerDecodedVinRecord
diff --git a/django/api/constants/misc.py b/django/api/constants/misc.py
new file mode 100644
index 00000000..5ede204a
--- /dev/null
+++ b/django/api/constants/misc.py
@@ -0,0 +1,109 @@
+AREA_CODES = [
+ 587,
+ 368,
+ 403,
+ 825,
+ 780,
+ 236,
+ 672,
+ 604,
+ 778,
+ 250,
+ 584,
+ 431,
+ 204,
+ 506,
+ 709,
+ 867,
+ 782,
+ 902,
+ 867,
+ 365,
+ 226,
+ 647,
+ 519,
+ 289,
+ 742,
+ 807,
+ 548,
+ 753,
+ 249,
+ 683,
+ 437,
+ 905,
+ 343,
+ 613,
+ 705,
+ 416,
+ 782,
+ 902,
+ 450,
+ 418,
+ 873,
+ 468,
+ 367,
+ 819,
+ 579,
+ 581,
+ 438,
+ 354,
+ 514,
+ 263,
+ 306,
+ 474,
+ 639,
+ 867,
+]
+
+# map of feature category codes to feature types for locality features:
+LOCALITY_FEATURES_MAP = {
+ 1: ["City", "District Municipality (1)", "Resort Municipality", "Village (1)", "Town"],
+ 2: ["Community", "First Nation Village", "Former Locality", "Locality", "Recreational Community"],
+ 3: ["Urban Community"],
+ 5: ["Indian Government District", "Indian Government District : Land Unit"],
+ 6: ["Indian Reserve-Réserve indienne", "Region", "Regional District"],
+ 28: ["Canadian Forces Base", "Canadian Forces Station", "Recreation Facility"],
+}
+
+GER_VALID_FIELD_VALUES = {
+ 'Approvals': ['Approved', 'Approved Fraudulent'],
+ 'Category': [
+ 'Forklift', 'Low Speed', 'Motorcycle', 'Medium & Heavy Duty',
+ 'Airport & Port Specialty Vehicle', 'Cargo E-Bike', 'Utility Vehicle'
+ ],
+ 'Fleet/Individuals': ['Fleet', 'Individual'],
+ 'Rebate adjustment (discount)': ['Yes'],
+ 'Class': ['2B', '3', '4', '5', '6', '7', '8']
+ }
+
+ARC_VALID_FIELD_VALUES = {
+ 'Funding Call': ['ARC-2018-1', 'ARC-2020-2'],
+ 'Status': ['Approved', 'Completed', 'Terminated'],
+ 'Vehicle Category': ['On-Road', 'On/Off Road', 'Marine', 'Aviation', 'Off-Road'],
+ 'Zev Sub-Section': [
+ 'Testing and certification services', 'Battery components',
+ 'Vehicle components', 'Fuelling Infrastructure', 'Vehicles',
+ 'Transferable Technologies'
+ ],
+ 'Fuel Type': ['H2', 'Electric'],
+ 'Retrofit': ['BEV Retrofit', 'Hybrid Retrofit', 'H2 Retrofit', 'N/A']
+}
+
+CVP_DATA_VALID_FIELD_VALUES = {
+ 'Funding Call': ['1', '2', '3', '4', '5', '6', '7', '8', '10'],
+ 'Status': ['Approved', 'Completed', 'Terminated', 'Not Approved', 'Application Withdrawn'],
+ 'Vehicles Deployed': ['Yes', 'No'],
+ 'Vehicle Category': ['On-Road', 'Off-Road', 'Marine', 'Rail', 'Aviation'],
+ 'Class': [
+ 'Road - 3', 'Road - 4', 'Road - 5', 'Road - 6', 'Road - 7',
+ 'Road - 8', 'Road - 8C'
+ ],
+ 'Economic Region': [
+ 'Nechako', 'Northeast', 'North Coast', 'Cariboo', 'Vancouver Island/Coast',
+ 'Mainland/Southwest', 'Thompson/Okanagan', 'Kootenay', 'Across BC'
+ ],
+ 'Drive Type': ['BEV', 'FC', 'PHEV'],
+ 'Project Type': [
+ 'Procurement', 'New Design', 'Hybrid Retrofit', 'BEV Retrofit', 'H2 Retrofit'
+ ]
+}
diff --git a/django/api/fixtures/0001_add_ldv_rebates_datasets.json b/django/api/fixtures/0001_add_ldv_rebates_datasets.json
index e6198625..5c69e20e 100644
--- a/django/api/fixtures/0001_add_ldv_rebates_datasets.json
+++ b/django/api/fixtures/0001_add_ldv_rebates_datasets.json
@@ -1 +1 @@
-[{"model": "api.datasets", "pk": 1, "fields": {"create_timestamp": "2021-11-20T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "LDV Rebates"}}, {"model": "api.datasets", "pk": 2, "fields": {"create_timestamp": "2021-12-01T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "Specialty Use Vehicle Incentive Program"}}]
+[{"model": "api.datasets", "pk": 1, "fields": {"create_timestamp": "2021-11-20T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "LDV Rebates"}}]
diff --git a/django/api/fixtures/0010_add_go_electric_rebates.json b/django/api/fixtures/0010_add_go_electric_rebates.json
new file mode 100644
index 00000000..14f4bf0a
--- /dev/null
+++ b/django/api/fixtures/0010_add_go_electric_rebates.json
@@ -0,0 +1 @@
+[{"model": "api.datasets", "pk": 10, "fields": {"create_timestamp": "2022-06-11T00:00:00Z", "create_user": "user", "update_timestamp": null, "update_user": null, "name": "Go Electric Rebates Program"}}]
diff --git a/django/api/migrations/0027_goelectricrebates_rebate_adjustment.py b/django/api/migrations/0027_goelectricrebates_rebate_adjustment.py
new file mode 100644
index 00000000..a376303c
--- /dev/null
+++ b/django/api/migrations/0027_goelectricrebates_rebate_adjustment.py
@@ -0,0 +1,18 @@
+# Generated by Django 3.2.25 on 2024-06-05 21:59
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0026_alter_uploadedvinsfile_chunk_size'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='goelectricrebates',
+ name='rebate_adjustment',
+ field=models.CharField(blank=True, max_length=50, null=True),
+ ),
+ ]
diff --git a/django/api/migrations/0028_auto_20240611_0251.py b/django/api/migrations/0028_auto_20240611_0251.py
new file mode 100644
index 00000000..3ffde2ad
--- /dev/null
+++ b/django/api/migrations/0028_auto_20240611_0251.py
@@ -0,0 +1,27 @@
+# Generated by Django 3.2.25 on 2024-06-11 02:51
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0027_goelectricrebates_rebate_adjustment'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='uploadedvinsfile',
+ name='chunks_per_run',
+ ),
+ migrations.AddField(
+ model_name='uploadedvinrecord',
+ name='timestamp',
+ field=models.DateTimeField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='uploadedvinrecord',
+ name='postal_code',
+ field=models.CharField(max_length=7),
+ ),
+ ]
diff --git a/django/api/migrations/0029_alter_uploadedvinrecord_timestamp.py b/django/api/migrations/0029_alter_uploadedvinrecord_timestamp.py
new file mode 100644
index 00000000..d59c32a0
--- /dev/null
+++ b/django/api/migrations/0029_alter_uploadedvinrecord_timestamp.py
@@ -0,0 +1,18 @@
+# Generated by Django 3.2.25 on 2024-06-11 02:51
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0028_auto_20240611_0251'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='uploadedvinrecord',
+ name='timestamp',
+ field=models.DateTimeField(),
+ ),
+ ]
diff --git a/django/api/migrations/0030_goelectricrebates_notes.py b/django/api/migrations/0030_goelectricrebates_notes.py
new file mode 100644
index 00000000..dbb27341
--- /dev/null
+++ b/django/api/migrations/0030_goelectricrebates_notes.py
@@ -0,0 +1,18 @@
+# Generated by Django 3.2.25 on 2024-06-18 20:24
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0029_alter_uploadedvinrecord_timestamp'),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name='goelectricrebates',
+ name='notes',
+ field=models.CharField(blank=True, max_length=250, null=True),
+ ),
+ ]
diff --git a/django/api/migrations/0031_auto_20240712_2036.py b/django/api/migrations/0031_auto_20240712_2036.py
new file mode 100644
index 00000000..d186b5a0
--- /dev/null
+++ b/django/api/migrations/0031_auto_20240712_2036.py
@@ -0,0 +1,28 @@
+# Generated by Django 3.2.25 on 2024-07-12 20:36
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0030_goelectricrebates_notes'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='goelectricrebates',
+ name='email',
+ field=models.CharField(blank=True, max_length=50, null=True),
+ ),
+ migrations.AlterField(
+ model_name='goelectricrebates',
+ name='phone',
+ field=models.CharField(blank=True, max_length=20, null=True),
+ ),
+ migrations.AlterField(
+ model_name='goelectricrebates',
+ name='total_purchase_price',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ ]
diff --git a/django/api/migrations/0032_auto_20240726_2118.py b/django/api/migrations/0032_auto_20240726_2118.py
new file mode 100644
index 00000000..1331feee
--- /dev/null
+++ b/django/api/migrations/0032_auto_20240726_2118.py
@@ -0,0 +1,112 @@
+# Generated by Django 3.2.25 on 2024-07-26 21:18
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0031_auto_20240712_2036'),
+ ]
+
+ operations = [
+ migrations.RenameField(
+ model_name='arcprojecttracking',
+ old_name='arc_funding',
+ new_name='funds_disbursed',
+ ),
+ migrations.RenameField(
+ model_name='arcprojecttracking',
+ old_name='funds_issued',
+ new_name='jobs',
+ ),
+ migrations.RenameField(
+ model_name='arcprojecttracking',
+ old_name='on_road_off_road',
+ new_name='notes',
+ ),
+ migrations.AddField(
+ model_name='arcprojecttracking',
+ name='complete_or_termination_date',
+ field=models.DateField(blank=True, null=True),
+ ),
+ migrations.AddField(
+ model_name='arcprojecttracking',
+ name='economic_region',
+ field=models.CharField(default='Across BC', max_length=250),
+ preserve_default=False,
+ ),
+ migrations.AddField(
+ model_name='arcprojecttracking',
+ name='funds_commited',
+ field=models.IntegerField(default=0),
+ preserve_default=False,
+ ),
+ migrations.AddField(
+ model_name='arcprojecttracking',
+ name='remaining_disbursed',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AddField(
+ model_name='arcprojecttracking',
+ name='retrofit',
+ field=models.CharField(blank=True, max_length=250, null=True),
+ ),
+ migrations.AddField(
+ model_name='arcprojecttracking',
+ name='vehicle_category',
+ field=models.CharField(default='A', max_length=250),
+ preserve_default=False,
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='completion_date',
+ field=models.DateField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='funding_call',
+ field=models.CharField(default='a', max_length=50),
+ preserve_default=False,
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='primary_location',
+ field=models.CharField(default='BC', max_length=250),
+ preserve_default=False,
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='project_title',
+ field=models.CharField(default='Title', max_length=500),
+ preserve_default=False,
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='proponent',
+ field=models.CharField(default='test', max_length=500),
+ preserve_default=False,
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='publicly_announced',
+ field=models.CharField(blank=True, max_length=250, null=True),
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='reference_number',
+ field=models.CharField(default=0, max_length=50),
+ preserve_default=False,
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='start_date',
+ field=models.DateField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='arcprojecttracking',
+ name='status',
+ field=models.CharField(default='Valid', max_length=250),
+ preserve_default=False,
+ ),
+ ]
diff --git a/django/api/migrations/0033_regions.py b/django/api/migrations/0033_regions.py
new file mode 100644
index 00000000..65e60d74
--- /dev/null
+++ b/django/api/migrations/0033_regions.py
@@ -0,0 +1,27 @@
+# Generated by Django 3.2.25 on 2024-07-31 17:42
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0032_auto_20240726_2118'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='Regions',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)),
+ ('create_user', models.CharField(default='SYSTEM', max_length=130)),
+ ('update_timestamp', models.DateTimeField(auto_now=True, null=True)),
+ ('update_user', models.CharField(max_length=130, null=True)),
+ ('name', models.CharField(max_length=250)),
+ ],
+ options={
+ 'db_table': 'regions',
+ },
+ ),
+ ]
diff --git a/django/api/migrations/0034_cvpdata.py b/django/api/migrations/0034_cvpdata.py
new file mode 100644
index 00000000..d8218f3c
--- /dev/null
+++ b/django/api/migrations/0034_cvpdata.py
@@ -0,0 +1,71 @@
+# Generated by Django 3.2.25 on 2024-09-10 21:38
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0033_regions'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='CVPData',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)),
+ ('create_user', models.CharField(default='SYSTEM', max_length=130)),
+ ('update_timestamp', models.DateTimeField(auto_now=True, null=True)),
+ ('update_user', models.CharField(max_length=130, null=True)),
+ ('funding_call', models.IntegerField(max_length=5)),
+ ('project_identifier', models.IntegerField(max_length=5)),
+ ('applicant_name', models.CharField(max_length=50)),
+ ('rank', models.IntegerField(blank=True, max_length=3, null=True)),
+ ('status', models.CharField(max_length=50)),
+ ('score', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('vehicle_deployed', models.CharField(max_length=50)),
+ ('vehicle_category', models.CharField(max_length=50)),
+ ('drive_type', models.CharField(max_length=50)),
+ ('vehicle_type', models.CharField(max_length=50)),
+ ('portfolio', models.CharField(max_length=50)),
+ ('make_and_model', models.CharField(max_length=50)),
+ ('economic_region', models.CharField(max_length=150)),
+ ('start_date', models.DateField(blank=True, null=True)),
+ ('completion_date', models.DateField(blank=True, null=True)),
+ ('project_type', models.CharField(max_length=50)),
+ ('class_3', models.IntegerField(blank=True, max_length=3, null=True)),
+ ('class_4', models.IntegerField(blank=True, max_length=3, null=True)),
+ ('class_5', models.IntegerField(blank=True, max_length=3, null=True)),
+ ('class_6', models.IntegerField(blank=True, max_length=3, null=True)),
+ ('class_7', models.IntegerField(blank=True, max_length=3, null=True)),
+ ('class_8', models.IntegerField(blank=True, max_length=3, null=True)),
+ ('on_road_total', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('off_road', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('level_2_charger', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('level_3_charger', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('high_level_3_charger', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('level_charger', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('other_charger', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('h2_fuelling_station', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('charger_brand', models.CharField(blank=True, max_length=50, null=True)),
+ ('h2_fuelling_station_description', models.CharField(blank=True, max_length=500, null=True)),
+ ('ghg_emission_reduction', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('estimated_ghg_emission_reduction', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('funding_efficiency', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('market_emission_reductions', models.IntegerField(blank=True, max_length=5, null=True)),
+ ('cvp_funding_request', models.IntegerField(max_length=10)),
+ ('cvp_funding_contribution', models.IntegerField(max_length=10)),
+ ('external_funding', models.IntegerField(blank=True, max_length=10, null=True)),
+ ('proponent_funding', models.IntegerField(blank=True, max_length=10, null=True)),
+ ('project_cost_initial', models.IntegerField(max_length=10)),
+ ('project_cost_revised', models.IntegerField(max_length=10)),
+ ('funding_source', models.CharField(blank=True, max_length=50, null=True)),
+ ('notes', models.CharField(blank=True, max_length=500, null=True)),
+ ('imhzev', models.CharField(blank=True, max_length=50, null=True)),
+ ],
+ options={
+ 'db_table': 'cvp_data',
+ },
+ ),
+ ]
diff --git a/django/api/migrations/0035_auto_20240910_2143.py b/django/api/migrations/0035_auto_20240910_2143.py
new file mode 100644
index 00000000..1fb18028
--- /dev/null
+++ b/django/api/migrations/0035_auto_20240910_2143.py
@@ -0,0 +1,165 @@
+# Generated by Django 3.2.25 on 2024-09-10 21:43
+
+from django.db import migrations, models
+
+def add_cvp_data_to_datasets(apps, schema_editor):
+
+ Datasets = apps.get_model('api', 'Datasets')
+
+ Datasets.objects.get_or_create(
+ create_timestamp="2024-09-10 00:00:00+00",
+ create_user="user",
+ name="CVP Data"
+ )
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0034_cvpdata'),
+ ]
+
+
+
+ operations = [
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='class_3',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='class_4',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='class_5',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='class_6',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='class_7',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='class_8',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='cvp_funding_contribution',
+ field=models.IntegerField(),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='cvp_funding_request',
+ field=models.IntegerField(),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='estimated_ghg_emission_reduction',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='external_funding',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='funding_call',
+ field=models.IntegerField(),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='funding_efficiency',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='ghg_emission_reduction',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='h2_fuelling_station',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='high_level_3_charger',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='level_2_charger',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='level_3_charger',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='level_charger',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='market_emission_reductions',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='off_road',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='on_road_total',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='other_charger',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='project_cost_initial',
+ field=models.IntegerField(),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='project_cost_revised',
+ field=models.IntegerField(),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='project_identifier',
+ field=models.IntegerField(),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='proponent_funding',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='rank',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='score',
+ field=models.IntegerField(blank=True, null=True),
+ ),
+ migrations.RunPython(add_cvp_data_to_datasets),
+ ]
diff --git a/django/api/migrations/0036_auto_20240911_1758.py b/django/api/migrations/0036_auto_20240911_1758.py
new file mode 100644
index 00000000..8441653b
--- /dev/null
+++ b/django/api/migrations/0036_auto_20240911_1758.py
@@ -0,0 +1,73 @@
+# Generated by Django 3.2.25 on 2024-09-11 17:58
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0035_auto_20240910_2143'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='applicant_name',
+ field=models.CharField(max_length=500),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='charger_brand',
+ field=models.CharField(blank=True, max_length=100, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='drive_type',
+ field=models.CharField(max_length=100),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='funding_source',
+ field=models.CharField(blank=True, max_length=100, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='imhzev',
+ field=models.CharField(blank=True, max_length=100, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='make_and_model',
+ field=models.CharField(max_length=100),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='portfolio',
+ field=models.CharField(max_length=100),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='project_type',
+ field=models.CharField(max_length=100),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='status',
+ field=models.CharField(max_length=500),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='vehicle_category',
+ field=models.CharField(max_length=100),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='vehicle_deployed',
+ field=models.CharField(max_length=100),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='vehicle_type',
+ field=models.CharField(max_length=100),
+ ),
+ ]
diff --git a/django/api/migrations/0037_auto_20240911_1800.py b/django/api/migrations/0037_auto_20240911_1800.py
new file mode 100644
index 00000000..b432e335
--- /dev/null
+++ b/django/api/migrations/0037_auto_20240911_1800.py
@@ -0,0 +1,23 @@
+# Generated by Django 3.2.25 on 2024-09-11 18:00
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0036_auto_20240911_1758'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='funding_source',
+ field=models.CharField(blank=True, max_length=500, null=True),
+ ),
+ migrations.AlterField(
+ model_name='cvpdata',
+ name='imhzev',
+ field=models.CharField(blank=True, max_length=500, null=True),
+ ),
+ ]
diff --git a/django/api/migrations/0038_addregions.py b/django/api/migrations/0038_addregions.py
new file mode 100644
index 00000000..ffffcbd5
--- /dev/null
+++ b/django/api/migrations/0038_addregions.py
@@ -0,0 +1,45 @@
+from django.db import migrations, models
+from datetime import datetime
+
+def add_region_data(apps, schema_editor):
+ Regions = apps.get_model('api', 'Regions')
+
+ current_timestamp = datetime.now()
+
+ regions_data = [
+ {"name": "Nechako"},
+ {"name": "Northeast"},
+ {"name": "Cariboo"},
+ {"name": "North Coast"},
+ {"name": "Vancouver Island/Coast"},
+ {"name": "Mainland/Southwest"},
+ {"name": "Thompson/Okanagan"},
+ {"name": "Kootenay"},
+ {"name": "Across BC"},
+ ]
+
+ for region in regions_data:
+ Regions.objects.get_or_create(
+ name=region["name"],
+ defaults={
+ "create_timestamp": current_timestamp,
+ "create_user": "SYSTEM",
+ "update_timestamp": current_timestamp,
+ "update_user": "SYSTEM",
+ }
+ )
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0037_auto_20240911_1800'),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name='regions',
+ name='name',
+ field=models.CharField(max_length=250, null=False, unique=True)
+ ),
+ migrations.RunPython(add_region_data),
+ ]
diff --git a/django/api/migrations/0039_auto_20241031_2123.py b/django/api/migrations/0039_auto_20241031_2123.py
new file mode 100644
index 00000000..eb120c7f
--- /dev/null
+++ b/django/api/migrations/0039_auto_20241031_2123.py
@@ -0,0 +1,27 @@
+# Generated by Django 3.2.25 on 2024-10-31 21:23
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0038_addregions'),
+ ]
+
+ operations = [
+ migrations.RemoveField(
+ model_name='cvpdata',
+ name='portfolio',
+ ),
+ migrations.AddField(
+ model_name='cvpdata',
+ name='road_class',
+ field=models.CharField(blank=True, max_length=100, null=True),
+ ),
+ migrations.AddField(
+ model_name='cvpdata',
+ name='use_case',
+ field=models.CharField(blank=True, max_length=100, null=True),
+ ),
+ ]
diff --git a/django/api/migrations/0040_filerequirements.py b/django/api/migrations/0040_filerequirements.py
new file mode 100644
index 00000000..72601506
--- /dev/null
+++ b/django/api/migrations/0040_filerequirements.py
@@ -0,0 +1,31 @@
+# Generated by Django 3.2.25 on 2024-11-07 22:15
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ('api', '0039_auto_20241031_2123'),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name='FileRequirements',
+ fields=[
+ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+ ('create_timestamp', models.DateTimeField(auto_now_add=True, null=True)),
+ ('create_user', models.CharField(default='SYSTEM', max_length=130)),
+ ('update_timestamp', models.DateTimeField(auto_now=True, null=True)),
+ ('update_user', models.CharField(max_length=130, null=True)),
+ ('sheet', models.TextField(blank=True, null=True)),
+ ('columns', models.TextField(blank=True, null=True)),
+ ('formats', models.TextField(blank=True, null=True)),
+ ('dataset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='file_requirements', to='api.datasets')),
+ ],
+ options={
+ 'db_table': 'file_requirements',
+ },
+ ),
+ ]
diff --git a/django/api/migrations/0041_add_datasets_and_file_requirements.py b/django/api/migrations/0041_add_datasets_and_file_requirements.py
new file mode 100644
index 00000000..6b71d6ac
--- /dev/null
+++ b/django/api/migrations/0041_add_datasets_and_file_requirements.py
@@ -0,0 +1,63 @@
+# Generated by Django 3.2.25 on 2024-11-07 22:17
+
+from django.db import migrations
+
+
+def add_datasets_and_file_requirements(apps, schema_editor):
+ Datasets = apps.get_model("api", "Datasets")
+ FileRequirements = apps.get_model("api", "FileRequirements")
+ columns_default_text = "All required columns must match the column names exactly as they appear in the provided template"
+ formats_default_text = "File format should be xlsx. xlsm. only"
+
+ ger_obj, ger_created = Datasets.objects.get_or_create(
+ name="Go Electric Rebates Program", defaults={"update_user": "SYSTEM"}
+ )
+ FileRequirements.objects.get_or_create(
+ dataset=ger_obj,
+ defaults={
+ "sheet": """
+ The sheet name must be "Distribution List - Master"
+ """,
+ "columns": columns_default_text,
+ "formats": formats_default_text,
+ },
+ )
+
+ arc_obj, arc_created = Datasets.objects.get_or_create(
+ name="ARC Project Tracking", defaults={"update_user": "SYSTEM"}
+ )
+ FileRequirements.objects.get_or_create(
+ dataset=arc_obj,
+ defaults={
+ "sheet": """
+ The sheet name must be "ARC Data"
+ """,
+ "columns": columns_default_text,
+ "formats": formats_default_text,
+ },
+ )
+
+ cvp_obj, cvp_created = Datasets.objects.get_or_create(
+ name="CVP Data", defaults={"update_user": "SYSTEM"}
+ )
+ FileRequirements.objects.get_or_create(
+ dataset=cvp_obj,
+ defaults={
+ "sheet": """
+ The sheet name must be "Data"
+ """,
+ "columns": columns_default_text,
+ "formats": formats_default_text,
+ },
+ )
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("api", "0040_filerequirements"),
+ ]
+
+ operations = [
+ migrations.RunPython(add_datasets_and_file_requirements),
+ ]
diff --git a/django/api/models/__init__.py b/django/api/models/__init__.py
index 79c55108..c163f3f8 100644
--- a/django/api/models/__init__.py
+++ b/django/api/models/__init__.py
@@ -25,3 +25,6 @@
from . import uploaded_vins_file
from . import uploaded_vin_record
from . import decoded_vin_record
+from . import regions
+from . import cvp_data
+from . import file_requirements
diff --git a/django/api/models/arc_project_tracking.py b/django/api/models/arc_project_tracking.py
index 8c66bc92..69579122 100644
--- a/django/api/models/arc_project_tracking.py
+++ b/django/api/models/arc_project_tracking.py
@@ -4,47 +4,63 @@
class ARCProjectTracking(Auditable):
- funding_call = models.CharField(blank=True, null=True, max_length=50, unique=False)
-
- proponent = models.CharField(blank=True, null=True, max_length=500, unique=False)
-
reference_number = models.CharField(
- blank=True, null=True, max_length=50, unique=False
+ blank=False, null=False, max_length=50, unique=False
)
+ proponent = models.CharField(blank=False, null=False, max_length=500, unique=False)
+
+ status = models.CharField(blank=False, null=False, max_length=250, unique=False)
+
+ funding_call = models.CharField(blank=False, null=False, max_length=50, unique=False)
+
project_title = models.CharField(
- blank=True, null=True, max_length=500, unique=False
+ blank=False, null=False, max_length=500, unique=False
)
- primary_location = models.CharField(
- blank=True, null=True, max_length=250, unique=False
+ vehicle_category = models.CharField(
+ blank=False, null=False, max_length=250
)
- status = models.CharField(blank=True, null=True, max_length=250, unique=False)
+ zev_sub_sector = models.CharField(
+ blank=True, null=True, max_length=250, unique=False
+ )
- arc_funding = models.IntegerField(blank=True, null=True)
+ fuel_type = models.CharField(blank=True, null=True, max_length=250, unique=False)
- funds_issued = models.IntegerField(blank=True, null=True)
+ retrofit = models.CharField(blank=True, null=True, max_length=250)
- start_date = models.CharField(blank=True, null=True, max_length=250, unique=False)
+ primary_location = models.CharField(
+ blank=False, null=False, max_length=250, unique=False
+ )
- completion_date = models.CharField(
- blank=True, null=True, max_length=250, unique=False
+ economic_region = models.CharField(
+ blank=False, null=False, max_length=250
)
+ jobs = models.IntegerField(blank=True, null=True)
+
+ funds_commited = models.IntegerField(blank=False, null=False)
+
+ funds_disbursed = models.IntegerField(blank=True, null=True)
+
+ remaining_disbursed = models.IntegerField(blank=True, null=True)
+
total_project_value = models.IntegerField(blank=True, null=True)
- zev_sub_sector = models.CharField(
- blank=True, null=True, max_length=250, unique=False
+ start_date = models.DateField(blank=True, null=True, unique=False)
+
+ completion_date = models.DateField(
+ blank=True, null=True, unique=False
)
- on_road_off_road = models.CharField(
- blank=True, null=True, max_length=250, unique=False
+ complete_or_termination_date = models.DateField(
+ blank=True, null=True, unique=False
)
- fuel_type = models.CharField(blank=True, null=True, max_length=250, unique=False)
+ publicly_announced = models.CharField(blank=True, null=True, max_length=250)
- publicly_announced = models.BooleanField(default=False)
+ notes = models.CharField(blank=True, null=True, max_length=250)
class Meta:
db_table = "arc_project_tracking"
diff --git a/django/api/models/cvp_data.py b/django/api/models/cvp_data.py
new file mode 100644
index 00000000..9f5c8770
--- /dev/null
+++ b/django/api/models/cvp_data.py
@@ -0,0 +1,192 @@
+from auditable.models import Auditable
+from django.db import models
+
+
+class CVPData(Auditable):
+
+ funding_call = models.IntegerField(
+ blank=False, null=False
+ )
+
+ project_identifier = models.IntegerField(
+ blank=False, null=False
+ )
+
+ applicant_name = models.CharField(
+ blank=False, null=False, max_length=500
+ )
+
+ rank = models.IntegerField(
+ blank=True, null=True
+ )
+
+ status = models.CharField(
+ blank=False, null=False, max_length=500
+ )
+
+ score = models.IntegerField(
+ blank=True, null=True
+ )
+
+ vehicle_deployed = models.CharField(
+ blank=False, null=False, max_length=100
+ )
+
+ vehicle_category = models.CharField(
+ blank=False, null=False, max_length=100
+ )
+
+ drive_type = models.CharField(
+ blank=False, null=False, max_length=100
+ )
+
+ vehicle_type = models.CharField(
+ blank=False, null=False, max_length=100
+ )
+
+ road_class = models.CharField(
+ blank=True, null=True, max_length=100
+ )
+
+ use_case = models.CharField(
+ blank=True, null=True, max_length=100
+ )
+
+ make_and_model = models.CharField(
+ blank=False, null=False, max_length=100
+ )
+
+ economic_region = models.CharField(
+ blank=False, null=False, max_length=150
+ )
+
+ start_date = models.DateField(
+ blank=True, null=True
+ )
+
+ completion_date = models.DateField(
+ blank=True, null=True
+ )
+
+ project_type = models.CharField(
+ blank=False, null=False, max_length=100
+ )
+
+ class_3 = models.IntegerField(
+ blank=True, null=True
+ )
+
+ class_4 = models.IntegerField(
+ blank=True, null=True
+ )
+
+ class_5 = models.IntegerField(
+ blank=True, null=True
+ )
+
+ class_6 = models.IntegerField(
+ blank=True, null=True
+ )
+
+ class_7 = models.IntegerField(
+ blank=True, null=True
+ )
+
+ class_8 = models.IntegerField(
+ blank=True, null=True
+ )
+
+ on_road_total = models.IntegerField(
+ blank=True, null=True
+ )
+
+ off_road = models.IntegerField(
+ blank=True, null=True
+ )
+
+ level_2_charger = models.IntegerField(
+ blank=True, null=True
+ )
+
+ level_3_charger = models.IntegerField(
+ blank=True, null=True
+ )
+
+ high_level_3_charger = models.IntegerField(
+ blank=True, null=True
+ )
+
+ level_charger = models.IntegerField(
+ blank=True, null=True
+ )
+
+ other_charger = models.IntegerField(
+ blank=True, null=True
+ )
+
+ h2_fuelling_station = models.IntegerField(
+ blank=True, null=True
+ )
+
+ charger_brand = models.CharField(
+ blank=True, null=True, max_length=100
+ )
+
+ h2_fuelling_station_description = models.CharField(
+ blank=True, null=True, max_length=500
+ )
+
+ ghg_emission_reduction = models.IntegerField(
+ blank=True, null=True
+ )
+
+ estimated_ghg_emission_reduction = models.IntegerField(
+ blank=True, null=True
+ )
+
+ funding_efficiency = models.IntegerField(
+ blank=True, null=True
+ )
+
+ market_emission_reductions = models.IntegerField(
+ blank=True, null=True
+ )
+
+ cvp_funding_request = models.IntegerField(
+ blank=False, null=False
+ )
+
+ cvp_funding_contribution = models.IntegerField(
+ blank=False, null=False
+ )
+
+ external_funding = models.IntegerField(
+ blank=True, null=True
+ )
+
+ proponent_funding = models.IntegerField(
+ blank=True, null=True
+ )
+
+ project_cost_initial = models.IntegerField(
+ blank=False, null=False
+ )
+
+ project_cost_revised = models.IntegerField(
+ blank=False, null=False
+ )
+
+ funding_source = models.CharField(
+ blank=True, null=True, max_length=500
+ )
+
+ notes = models.CharField(
+ blank=True, null=True, max_length=500
+ )
+
+ imhzev = models.CharField(
+ blank=True, null=True, max_length=500
+ )
+
+ class Meta:
+ db_table = "cvp_data"
diff --git a/django/api/models/file_requirements.py b/django/api/models/file_requirements.py
new file mode 100644
index 00000000..d9b3cbe5
--- /dev/null
+++ b/django/api/models/file_requirements.py
@@ -0,0 +1,20 @@
+from django.db import models
+from auditable.models import Auditable
+from api.models.datasets import Datasets
+
+
+class FileRequirements(Auditable):
+ dataset = models.OneToOneField(
+ Datasets,
+ related_name="file_requirements",
+ on_delete=models.CASCADE,
+ )
+
+ sheet = models.TextField(blank=True, null=True)
+
+ columns = models.TextField(blank=True, null=True)
+
+ formats = models.TextField(blank=True, null=True)
+
+ class Meta:
+ db_table = "file_requirements"
diff --git a/django/api/models/go_electric_rebates.py b/django/api/models/go_electric_rebates.py
index b266e11d..3b3a8e75 100644
--- a/django/api/models/go_electric_rebates.py
+++ b/django/api/models/go_electric_rebates.py
@@ -18,17 +18,18 @@ class GoElectricRebates(Auditable):
blank=False,
)
total_purchase_price = models.IntegerField(
- null=False,
- blank=False,
+ null=True,
+ blank=True,
)
manufacturer = models.CharField(blank=False, max_length=250, null=False)
model = models.CharField(blank=False, max_length=250, null=False)
city = models.CharField(blank=False, max_length=250, null=False)
postal_code = models.CharField(blank=True, max_length=250, null=True)
- phone = models.CharField(blank=False, max_length=20, null=False)
- email = models.CharField(blank=False, max_length=50, null=False)
+ phone = models.CharField(blank=True, max_length=20, null=True)
+ email = models.CharField(blank=True, max_length=50, null=True)
vin = models.CharField(blank=True, max_length=100, null=True)
vehicle_class = models.CharField(blank=True, null=True, max_length=50)
-
+ rebate_adjustment = models.CharField(blank=True, null=True, max_length=50)
+ notes = models.CharField(blank=True, null=True, max_length=250)
class Meta:
db_table = "go_electric_rebates"
diff --git a/django/api/models/regions.py b/django/api/models/regions.py
new file mode 100644
index 00000000..854cf98d
--- /dev/null
+++ b/django/api/models/regions.py
@@ -0,0 +1,10 @@
+from auditable.models import Auditable
+from django.db import models
+
+
+class Regions(Auditable):
+
+ name = models.CharField(blank=False, null=False, max_length=250, unique=True)
+
+ class Meta:
+ db_table = "regions"
diff --git a/django/api/models/uploaded_vin_record.py b/django/api/models/uploaded_vin_record.py
index 371b6b38..1d6d306b 100644
--- a/django/api/models/uploaded_vin_record.py
+++ b/django/api/models/uploaded_vin_record.py
@@ -5,7 +5,9 @@
class UploadedVinRecord(Auditable):
vin = models.CharField(max_length=17)
- postal_code = models.CharField(max_length=7, null=True, blank=True)
+ postal_code = models.CharField(max_length=7)
+
+ timestamp = models.DateTimeField()
data = models.JSONField()
diff --git a/django/api/models/uploaded_vins_file.py b/django/api/models/uploaded_vins_file.py
index a958dd28..853397f7 100644
--- a/django/api/models/uploaded_vins_file.py
+++ b/django/api/models/uploaded_vins_file.py
@@ -7,8 +7,6 @@ class UploadedVinsFile(Auditable):
chunk_size = models.IntegerField(default=5000)
- chunks_per_run = models.IntegerField(default=4)
-
start_index = models.IntegerField(default=0)
processed = models.BooleanField(default=False)
diff --git a/django/api/serializers/file_requirements.py b/django/api/serializers/file_requirements.py
new file mode 100644
index 00000000..f53b9718
--- /dev/null
+++ b/django/api/serializers/file_requirements.py
@@ -0,0 +1,9 @@
+from rest_framework.serializers import ModelSerializer
+from api.models.file_requirements import FileRequirements
+
+
+class FileRequirementsSerializer(ModelSerializer):
+
+ class Meta:
+ model = FileRequirements
+ fields = ("sheet", "columns", "formats")
diff --git a/django/api/services/bcngws.py b/django/api/services/bcngws.py
new file mode 100644
index 00000000..55ce2aec
--- /dev/null
+++ b/django/api/services/bcngws.py
@@ -0,0 +1,43 @@
+import requests
+from django.conf import settings
+
+
+# names should be a list of location names, feature_category should be an integer or *,
+# feature_types should be a list or *, page_size should be an integer >=1, <=200,
+# start_index should be an integer, result should be a set
+def get_placename_matches(
+ names, feature_category, feature_types, page_size, start_index, result
+):
+ names_string = " ".join(names)
+
+ query = {
+ "outputFormat": "json",
+ "name": names_string,
+ "itemsPerPage": page_size,
+ "startIndex": start_index,
+ "featureCategory": feature_category,
+ }
+
+ try:
+ response = requests.get(settings.PLACENAMES_ENDPOINT, params=query)
+ response.raise_for_status()
+ response = response.json()
+
+ for feature in response["features"]:
+ name = feature["properties"]["name"]
+ type = feature["properties"]["featureType"]
+ if feature_types == "*" or type in feature_types:
+ result.add(name)
+
+ if response["properties"]["totalResults"] >= start_index + page_size:
+ get_placename_matches(
+ names,
+ feature_category,
+ feature_types,
+ page_size,
+ start_index + page_size,
+ result,
+ )
+
+ except requests.RequestException as e:
+ print(f"Error fetching data: {e}")
diff --git a/django/api/services/datasheet_template_generator.py b/django/api/services/datasheet_template_generator.py
index 5ea132f8..3949ddb3 100644
--- a/django/api/services/datasheet_template_generator.py
+++ b/django/api/services/datasheet_template_generator.py
@@ -1,6 +1,6 @@
import pandas as pd
from io import BytesIO
-from api.constants import *
+from api.constants.constants import *
def generate_template(dataset_name):
diff --git a/django/api/services/decoded_vin_record.py b/django/api/services/decoded_vin_record.py
index f8846d3c..5ec6108e 100644
--- a/django/api/services/decoded_vin_record.py
+++ b/django/api/services/decoded_vin_record.py
@@ -1,5 +1,5 @@
from api.models.uploaded_vin_record import UploadedVinRecord
-from api.decoder_constants import get_service
+from api.constants.decoder import get_service
from api.services.uploaded_vin_record import (
set_decode_successful,
get_number_of_decode_attempts,
@@ -13,7 +13,7 @@
def save_decoded_data(
uploaded_vin_records,
vins_to_insert,
- decoded_records_to_update_map,
+ vins_to_decoded_record_ids_map,
service_name,
decoded_data,
):
@@ -34,10 +34,12 @@ def save_decoded_data(
decoded_records_to_insert.append(
decoded_vin_model(vin=vin, data=decoded_datum)
)
- elif vin in decoded_records_to_update_map:
- decoded_record_to_update = decoded_records_to_update_map.get(vin)
- decoded_record_to_update.update_timestamp = timezone.now()
- decoded_record_to_update.data = decoded_datum
+ elif vin in vins_to_decoded_record_ids_map:
+ decoded_record_to_update = decoded_vin_model(
+ id=vins_to_decoded_record_ids_map[vin],
+ update_timestamp=timezone.now(),
+ data=decoded_datum,
+ )
decoded_records_to_update.append(decoded_record_to_update)
elif vin in failed_vins:
set_decode_successful(service_name, uploaded_record, False)
diff --git a/django/api/services/file_requirements.py b/django/api/services/file_requirements.py
new file mode 100644
index 00000000..06b029ae
--- /dev/null
+++ b/django/api/services/file_requirements.py
@@ -0,0 +1,5 @@
+from api.models.file_requirements import FileRequirements
+
+
+def get_file_requirements(dataset_name):
+ return FileRequirements.objects.filter(dataset__name=dataset_name).first()
diff --git a/django/api/services/resolvers.py b/django/api/services/resolvers.py
new file mode 100644
index 00000000..65b6785a
--- /dev/null
+++ b/django/api/services/resolvers.py
@@ -0,0 +1,8 @@
+from dns.resolver import Resolver
+from email_validator import caching_resolver
+
+
+def get_google_resolver():
+ resolver = Resolver()
+ resolver.nameservers = ["8.8.8.8"]
+ return caching_resolver(dns_resolver=resolver)
diff --git a/django/api/services/spreadsheet_uploader.py b/django/api/services/spreadsheet_uploader.py
index 30dbaedb..c76059e5 100644
--- a/django/api/services/spreadsheet_uploader.py
+++ b/django/api/services/spreadsheet_uploader.py
@@ -1,8 +1,9 @@
from decimal import Decimal, ROUND_HALF_UP
import pandas as pd
import traceback
+import numpy as np
from django.db import transaction
-
+from datetime import datetime
def get_field_default(model, field):
field = model._meta.get_field(field)
@@ -13,11 +14,11 @@ def get_field_default(model, field):
def get_nullable_fields(model):
- nullable_fields = {}
+ nullable_fields = []
for field in model._meta.get_fields():
if hasattr(field, "null") and field.null:
- nullable_fields[field.name] = True
+ nullable_fields.append(field.name)
return nullable_fields
@@ -29,18 +30,18 @@ def trim_all_columns(df):
def extract_data(excel_file, sheet_name, header_row):
try:
df = pd.read_excel(excel_file, sheet_name, header=header_row)
- df = df.fillna('TEMP_NULL')
df = trim_all_columns(df)
return df
except Exception as e:
- traceback.print_exc()
- raise
+ return None
def transform_data(
df,
dataset_columns,
column_mapping_enum,
+ field_types,
+ model,
preparation_functions=[],
validation_functions=[],
):
@@ -48,112 +49,157 @@ def transform_data(
df = df[[col for col in df.columns if col in required_columns]]
+ errors_and_warnings = {}
+
missing_columns = [col for col in required_columns if col not in df.columns]
- if missing_columns:
- raise ValueError(f"Missing columns: {', '.join(missing_columns)}")
+ if (missing_columns):
+ errors_and_warnings['Headers'] = {}
+ errors_and_warnings['Headers']['Missing Headers'] = {
+ "Expected Type": "missing one or more required columns",
+ "Rows": missing_columns,
+ "Severity": "Critical"
+ }
+ return df, errors_and_warnings
for prep_func in preparation_functions:
df = prep_func(df)
- for validate in validation_functions:
- df = validate(df)
+ nullable_fields = get_nullable_fields(model)
+
+ column_mapping = {e.value: e.name for e in column_mapping_enum}
+
+ type_to_string = {
+ int: "Integer",
+ float: "Float",
+ Decimal: "Decimal",
+ str: "String",
+ datetime: "Date (YYYY-MM-DD)"
+ }
+
+ df = df.replace({np.nan: None})
+
+ for index, row in df.iterrows():
+ row_dict = row.to_dict()
+
+ for column, value in row_dict.items():
+ db_field_name = column_mapping.get(column)
+
+ if db_field_name:
+ is_nullable = db_field_name in nullable_fields
+ expected_type = field_types.get(column)
+
+ if pd.isna(value) or value == "" or value is None:
+ if is_nullable:
+ row_dict[column] = None
+ else:
+ if column not in errors_and_warnings:
+ errors_and_warnings[column] = {}
+ if "Empty Value" not in errors_and_warnings[column]:
+ errors_and_warnings[column]["Empty Value"] = {
+ "Expected Type": "Cells in this column cannot be blank.",
+ "Rows": [],
+ "Severity": "Error"
+ }
+ errors_and_warnings[column]["Empty Value"]["Rows"].append(index + 1)
+
+ if expected_type == datetime and value is not None and value != '':
+ try:
+ datetime.strptime(value, "%Y-%m-%d")
+ except ValueError:
+ if column not in errors_and_warnings:
+ errors_and_warnings[column] = {}
+ if "Incorrect Date Format" not in errors_and_warnings[column]:
+ errors_and_warnings[column]["Incorrect Date Format"] = {
+ "Expected Type": "The following rows contained an incorrect date format. Expected YYYY-MM-DD.",
+ "Rows": [],
+ "Severity": "Error"
+ }
+ errors_and_warnings[column]["Incorrect Date Format"]["Rows"].append(index + 1)
+
+ if expected_type in [int, float, Decimal] and value is not None and pd.notna(value) and value != '':
+ value = str(value).replace(',', '').strip()
+ try:
+ if expected_type == int:
+ row_dict[column] = int(float(value))
+ elif expected_type == Decimal:
+ row_dict[column] = Decimal(value).quantize(Decimal("0.01"), rounding=ROUND_HALF_UP)
+ else:
+ row_dict[column] = float(value)
+ except ValueError:
+ if column not in errors_and_warnings:
+ errors_and_warnings[column] = {}
+ if "Incorrect Type" not in errors_and_warnings[column]:
+ errors_and_warnings[column]["Incorrect Type"] = {
+ "Expected Type": f"The following rows contained types for the column {column}. Expected {type_to_string.get(expected_type, str(expected_type))}",
+ "Rows": [],
+ "Severity": "Error"
+ }
+ errors_and_warnings[column]["Incorrect Type"]["Rows"].append(index + 1)
+
+ # Check if expected_type is valid before using isinstance
+ elif expected_type is not None and isinstance(expected_type, type) and not isinstance(row_dict[column], expected_type) and value != "":
+ if column not in errors_and_warnings:
+ errors_and_warnings[column] = {}
+ if "Incorrect Type" not in errors_and_warnings[column]:
+ errors_and_warnings[column]["Incorrect Type"] = {
+ "Expected Type": f"The following rows contained types for the column {column}. Expected {type_to_string.get(expected_type, str(expected_type))}",
+ "Rows": [],
+ "Severity": "Error"
+ }
+ errors_and_warnings[column]["Incorrect Type"]["Rows"].append(index + 1)
+
+ for x in validation_functions:
+ validate = x["function"]
+ columns = x["columns"]
+ kwargs = x["kwargs"]
+ warnings = validate(df, *columns, **kwargs)
+
+ if warnings:
+ for column, issues in warnings.items():
+ if column not in errors_and_warnings:
+ errors_and_warnings[column] = {}
+ for issue, details in issues.items():
+ if issue not in errors_and_warnings[column]:
+ if(details.get("Severity", "Error") == 'Warning'):
+ errors_and_warnings[column][issue] = {
+ "Expected Type": details.get("Expected Type", "Unknown"),
+ "Groups": details.get("Groups", []),
+ "Severity": details.get("Severity", "Error")
+ }
+ else:
+ errors_and_warnings[column][issue] = {
+ "Expected Type": details.get("Expected Type", "Unknown"),
+ "Rows": details.get("Rows", []),
+ "Severity": details.get("Severity", "Error")
+ }
+ else:
+ errors_and_warnings[column][issue]["Groups"].extend(details.get("Groups", []))
column_mapping = {col.name: col.value for col in column_mapping_enum}
- # Need to use the inverse (keys) for mapping the columns to what the database expects in order to use enums
inverse_column_mapping = {v: k for k, v in column_mapping.items()}
df.rename(columns=inverse_column_mapping, inplace=True)
- return df
+ return df, errors_and_warnings
@transaction.atomic
-def load_data(df, model, field_types, replace_data, user):
- row_count = 0
+def load_data(df, model, replace_data, user):
records_inserted = 0
- errors = []
- nullable_fields = get_nullable_fields(model)
if replace_data:
model.objects.all().delete()
-
+
for index, row in df.iterrows():
row_dict = row.to_dict()
- valid_row = True
+ row_dict["update_user"] = user
- for column, value in row_dict.items():
-
- expected_type = field_types.get(column)
- is_nullable = column in nullable_fields
-
- if pd.isna(value) or value == "" or value == 'TEMP_NULL':
- if is_nullable:
- row_dict[column] = None
- else:
- row_dict[column] = get_field_default(model, column)
- elif expected_type == float:
- if isinstance(value, int):
- row_dict[column] = float(value)
- elif isinstance(value, float):
- row_dict[column] = round(value, 2)
- elif isinstance(value, str) and value.strip() != "":
- try:
- float_value = float(value)
- row_dict[column] = round(float_value, 2)
- except ValueError:
- errors.append(
- f"Row {index + 1}: Unable to convert value to float for '{column}'. Value was '{value}'."
- )
- valid_row = False
- continue
- elif expected_type == int and (
- (isinstance(value, str) and value.strip() != "")
- or isinstance(value, float)
- ):
- try:
- row_dict[column] = int(value)
- except ValueError:
- errors.append(
- f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'."
- )
- valid_row = False
- continue
- elif expected_type == Decimal and (
- (isinstance(value, int) or isinstance(value, float))
- ):
- try:
- decimal_value = Decimal(value).quantize(
- Decimal("0.01"), rounding=ROUND_HALF_UP
- )
- row_dict[column] = decimal_value
- except ValueError:
- errors.append(
- f"Row {index + 1}: Unable to convert value to int for '{column}'. Value was '{value}'."
- )
- valid_row = False
- continue
- elif not isinstance(value, expected_type) and value != "":
- errors.append(
- f"Row {index + 1}: Incorrect type for '{column}'. Expected {expected_type.__name__}, got {type(value).__name__}."
- )
- valid_row = False
- continue
-
- if valid_row:
- try:
- row_dict["update_user"] = user
- model_instance = model(**row_dict)
- model_instance.full_clean()
- model_instance.save()
- records_inserted += 1
- except Exception as e:
- errors.append(f"Row {index + 1}: {e}")
-
- row_count += 1
+ model_instance = model(**row_dict)
+ model_instance.save()
+ records_inserted += 1
return {
- "row_count": row_count,
+ "row_count": len(df),
"records_inserted": records_inserted,
- "errors": errors,
}
@@ -169,41 +215,54 @@ def import_from_xls(
user,
preparation_functions=[],
validation_functions=[],
+ check_for_warnings=True,
):
+ errors_and_warnings = {}
try:
df = extract_data(excel_file, sheet_name, header_row)
- df = transform_data(
- df,
- dataset_columns,
- column_mapping_enum,
- preparation_functions,
- validation_functions,
- )
- result = load_data(df, model, field_types, replace_data, user)
+ if df is not None:
+ df, errors_and_warnings = transform_data(
+ df,
+ dataset_columns,
+ column_mapping_enum,
+ field_types,
+ model,
+ preparation_functions,
+ validation_functions,
+ )
+
+ else:
+ errors_and_warnings['Spreadsheet'] = {}
+ errors_and_warnings['Spreadsheet']['Missing Worksheet'] = {
+ 'Expected Type': 'The worksheet is missing or incorrectly named',
+ 'Rows': [sheet_name],
+ 'Severity': 'Critical'
+ }
+
+ if check_for_warnings:
+ ## do the error checking
+
+ if errors_and_warnings:
+ return {
+ "success": True,
+ "message": "We encountered some potential errors in your data. Please choose whether to ignore them and continue inserting data or cancel upload and make edits to the data before reuploading",
+ "warning": True,
+ "errors_and_warnings": errors_and_warnings,
+ }
+ else:
+ print('no warnings')
+
+ result = load_data(df, model, replace_data, user)
total_rows = result["row_count"]
inserted_rows = result["records_inserted"]
- if result["errors"] and result["records_inserted"] > 0:
- return {
- "success": True,
- "message": f"{inserted_rows} out of {total_rows} rows successfully inserted with some errors encountered.",
- "errors": result["errors"],
- "rows_processed": result["row_count"],
- }
- elif len(result["errors"]) > 0:
- return {
- "success": False,
- "message": "Errors encountered with no successful insertions.",
- "errors": result["errors"],
- "rows_processed": result["row_count"],
- }
- else:
- return {
- "success": True,
- "message": f"All {inserted_rows} records successfully inserted out of {total_rows}.",
- "rows_processed": result["row_count"],
+ return {
+ "success": True,
+ "message": f"All {inserted_rows} records successfully inserted out of {total_rows}.",
+ "rows_processed": result["row_count"],
}
+
except Exception as error:
traceback.print_exc()
error_msg = f"Unexpected error: {str(error)}"
diff --git a/django/api/services/spreadsheet_uploader_prep.py b/django/api/services/spreadsheet_uploader_prep.py
index 9accc46d..7e4db9a6 100644
--- a/django/api/services/spreadsheet_uploader_prep.py
+++ b/django/api/services/spreadsheet_uploader_prep.py
@@ -1,7 +1,11 @@
from decimal import Decimal
-import numpy as np
import pandas as pd
-
+import difflib as dl
+from api.services.bcngws import get_placename_matches
+from api.models.regions import Regions
+from email_validator import validate_email, EmailNotValidError
+from api.utilities.series import get_map_of_values_to_indices
+from api.constants.misc import AREA_CODES
def prepare_arc_project_tracking(df):
df["Publicly Announced"] = df["Publicly Announced"].replace(
@@ -15,7 +19,6 @@ def prepare_hydrogen_fleets(df):
df.apply(lambda x: x.fillna(0) if x.dtype.kind in "biufc" else x.fillna(""))
return df
-
def prepare_hydrogen_fueling(df):
decimal_columns = ["Capital Funding Awarded", "O&M Funding Potential"]
@@ -79,10 +82,369 @@ def prepare_go_electric_rebates(df):
df = df.applymap(lambda s: s.upper() if type(s) == str else s)
- num_columns = df.select_dtypes(include=["number"]).columns.tolist()
- df[num_columns] = df[num_columns].fillna(0)
-
- non_num_columns = df.columns.difference(num_columns)
+ non_num_columns = df.select_dtypes(exclude=["number"]).columns.tolist()
df[non_num_columns] = df[non_num_columns].fillna("")
+ format_dict = {
+ 'title': ['Approvals', 'Applicant Name', 'Category',
+ 'Fleet/Individuals', 'Rebate adjustment (discount)',
+ 'Manufacturer', 'City'],
+ 'upper': ['Model', 'Postal code', 'VIN Number'],
+ 'lower': ['Email'],
+ 'skip': ['Phone Number'],
+ 'sentence': ['Notes'],
+}
+ for key in format_dict:
+ df[format_dict[key]] = df[format_dict[key]].apply(format_case, case = key)
+
+ make_names_consistent(df)
+ make_prepositions_consistent(df)
+ adjust_ger_manufacturer_names(df)
+ return df
+
+def prepare_cvp_data(df):
+ df = df.applymap(lambda s: s.upper() if type(s) == str else s)
+ df = df.apply(lambda x: x.fillna(0) if x.dtype.kind in "biufc" else x.fillna(""))
return df
+
+def format_case(s, case='skip', ignore_list=[]):
+ # Apply transformations to non-NaN values only
+ mask = s.notna()
+
+ s.loc[mask] = (
+ s.loc[mask] # I am applying this function to non-NaN values only. If you do not, they get converted from NaN to nan and are more annoying to work with.
+ .astype(str) # Convert to string
+ .str.strip() # Strip white spaces (this dataset suffers from extra tabs, lines, etc.)
+ )
+
+ if case == 'title':
+ s.loc[mask] = s.loc[mask].str.title()
+ elif case == 'upper':
+ s.loc[mask] = s.loc[mask].str.upper()
+ elif case == 'lower':
+ s.loc[mask] = s.loc[mask].str.lower()
+ elif case == 'sentence':
+ s.loc[mask] = s.loc[mask].str.capitalize()
+
+ return s
+
+
+def make_names_consistent(df):
+ """
+ This step is done after formatting because people use all kinds of cases (`LTD`, `ltd', 'LIMITED'`, etc.).
+
+ To `Ltd.` from:
+ - `Ltd`
+ - `Limited`
+ - `Limited.`
+
+ To `Inc.` from:
+ - `Inc`
+ - `Incorporated`
+
+ - From `Dba` to `DBA` i.e. "doing business as"
+
+ """
+ consistent_name_dict = (
+ dict.fromkeys([
+ '\\bLtd(?!\\.)\\b', # Matches word "Ltd" not followed by "."
+ 'Limited$', # Matches "Limited" at the end of the string
+ 'Limited\\.$', # Matches "Limited." at the end of the string
+ ', Ltd.'
+ ], 'Ltd.') |
+ dict.fromkeys([
+ '\\bInc(?!\\.)\\b', # Matches "Inc" not followed by "."
+ 'Incorporated'], 'Inc.') |
+ {', Inc.': ' Inc.',
+ '(?i)\\bdba\\b': 'DBA'} # Matches word "dba" regardless of case
+)
+ df[['Applicant Name', 'Manufacturer']] = df[['Applicant Name', 'Manufacturer']].replace(
+ consistent_name_dict,
+ regex=True)
+
+def make_prepositions_consistent(df):
+ df[['Applicant Name', 'Manufacturer']] = df[['Applicant Name', 'Manufacturer']].replace(
+ dict.fromkeys(
+ ['(?i)\\bbc(?=\\W)', # Matches word "bc" regardless of case
+ '(?i)\\bb\\.c\\.(?=\\W)'], 'BC'), # Matches word "b.c." regardless of case
+ regex=True
+ ).replace(
+ {'BC Ltd.': 'B.C. Ltd.',
+ '\\bOf(?=\\W)': 'of',
+ '\\bAnd(?=\\W)': 'and', # Matches word "And"
+ '\\bThe(?=\\W)': 'the',
+ '\\bA(?=\\W)': 'a',
+ '\\bAn(?=\\W)': 'an'},
+ regex=True
+ )
+ ##The first letter should be capitalized
+ df[['Applicant Name', 'Manufacturer']] = df[['Applicant Name', 'Manufacturer']].applymap(
+ lambda x: x[0].upper() + x[1:] if isinstance(x, str) and len(x) > 1 else x.upper() if isinstance(x, str) and len(x) == 1 else x
+)
+
+
+def adjust_ger_manufacturer_names(df):
+ """""
+ This function is currently GER specific updating the manufacturer names to have casing that makes more sense
+ since currently all manufacturer column entries are set to sentence casing.
+
+ """""
+
+ name_replacements = {
+ 'International Ic Bus': 'International IC Bus',
+ 'Lightning Emotors': 'Lightning eMotors',
+ 'Avro Gse': 'Avro GSE',
+ 'Bmw': 'BMW',
+ 'Ego': 'EGO',
+ 'Sc Carts': 'SC Carts'
+ }
+
+ df[['Manufacturer']] = df[['Manufacturer']].replace(name_replacements, regex=False)
+
+
+def typo_checker(df, *columns, **kwargs):
+ result = {}
+
+ for column in columns:
+ series = df[column]
+ unique_vals = set(series)
+
+ map_of_values_to_indices = get_map_of_values_to_indices(series, kwargs.get("indices_offset", 0))
+
+ typo_groups = []
+ processed_values = set()
+
+ for value in unique_vals:
+ if value in processed_values:
+ continue
+
+ matches = dl.get_close_matches(value, unique_vals.difference({value}), cutoff=kwargs.get("cutoff", 0.8))
+
+ if matches:
+ current_group = {
+ "Typo Group": [value] + matches,
+ "Rows": []
+ }
+
+ current_group["Rows"].extend(map_of_values_to_indices[value])
+
+ for match in matches:
+ current_group["Rows"].extend(map_of_values_to_indices[match])
+
+ processed_values.add(value)
+ processed_values.update(matches)
+
+ typo_groups.append(current_group)
+
+ if typo_groups:
+ result[column] = {
+ "Similar Values Detected": {
+ "Expected Type": "We detected applicant names that sound very similar. If these names refer to the same person/entity, please replace the applicant names in your dataset to the preferred spelling to ensure consistency",
+ "Groups": typo_groups,
+ "Severity": "Warning"
+ }
+ }
+
+ return result
+
+
+def validate_phone_numbers(df, *columns, **kwargs):
+ result = {}
+ for column in columns:
+ series = df[column]
+ map_of_values_to_indices = get_map_of_values_to_indices(series, kwargs.get("indices_offset", 0))
+ invalid_groups = []
+
+ for phone_number, indices in map_of_values_to_indices.items():
+ formatted_number = str(phone_number).strip().replace('-', '')
+ if len(formatted_number) != 10 or int(formatted_number[:3]) not in AREA_CODES:
+ if pd.isna(formatted_number) or formatted_number == '':
+ continue
+ invalid_groups.append({
+ "Invalid Phone Number": phone_number,
+ "Rows": indices
+ })
+
+ if invalid_groups:
+ result[column] = {
+ "Phone Number Appears Incorrect": {
+ "Expected Type": "Ensure phone numbers match the Canadian format (XXX-XXX-XXXX)",
+ "Groups": invalid_groups,
+ "Severity": "Warning"
+ }
+ }
+ return result
+
+
+
+def location_checker(df, *columns, columns_to_features_map={}, **kwargs):
+ result = {}
+
+ for column in columns:
+ series = df[column]
+ unique_values = set(series)
+ map_of_values_to_indices = get_map_of_values_to_indices(series, kwargs.get("indices_offset", 0))
+
+ communities = set()
+ features_map = columns_to_features_map.get(column, {})
+
+ for category_code, feature_types in features_map.items():
+ get_placename_matches(
+ list(unique_values), category_code, feature_types,
+ 200, 1, communities
+ )
+
+ names_without_match = unique_values.difference(communities)
+ unrecognized_groups = []
+
+ for name in names_without_match:
+ group = {
+ "Unrecognized Name": name,
+ "Rows": map_of_values_to_indices[name]
+ }
+ unrecognized_groups.append(group)
+
+ if unrecognized_groups:
+ result[column] = {
+ "Unrecognized City Names": {
+ "Expected Type": (
+ "The following city names are not in the list of geographic names. "
+ "Please double-check that these places exist or have correct spelling "
+ "and adjust your dataset accordingly."
+ ),
+ "Groups": unrecognized_groups,
+ "Severity": "Warning"
+ }
+ }
+
+ return result
+
+
+
+def email_validator(df, *columns, **kwargs):
+ resolver = kwargs.get("get_resolver", None)
+ if resolver:
+ resolver = resolver()
+
+ result = {}
+ for column in columns:
+ series = df[column]
+ map_of_values_to_indices = get_map_of_values_to_indices(series, kwargs.get("indices_offset", 0))
+ invalid_groups = []
+
+ for email, indices in map_of_values_to_indices.items():
+ try:
+ validate_email(email, dns_resolver=resolver)
+ except EmailNotValidError:
+ if pd.isna(email) or email == '':
+ continue
+ invalid_groups.append({
+ "Invalid Email": email,
+ "Rows": indices
+ })
+
+ if invalid_groups:
+ result[column] = {
+ "Possible Errors in Email Addresses": {
+ "Expected Type": "Verify email addresses are valid",
+ "Groups": invalid_groups,
+ "Severity": "Warning"
+ }
+ }
+ return result
+
+def validate_field_values(df, *columns, **kwargs):
+ allowed_values = kwargs.get("fields_and_values")
+ invalid_values = []
+
+ result = {}
+ delimiter = kwargs.get("delimiter")
+ for column in df.columns:
+ if column in allowed_values:
+ indices = []
+ series = df[column]
+ for index, value in series.items():
+ if value is not None and pd.notna(value):
+ str_value = str(value)
+ items = [str_value.strip()]
+ if delimiter is not None:
+ items = [item.strip() for item in str_value.split(delimiter)]
+
+ for item in items:
+ if str(item).upper() not in (valid.upper() for valid in allowed_values[column]) and item != '' and item is not None and not pd.isna(item):
+ if index + kwargs.get("indices_offset", 0) not in indices:
+ indices.append(index + kwargs.get("indices_offset", 0))
+ if str(item) not in invalid_values:
+ invalid_values.append(str(item))
+
+ if indices:
+ result[column] = {
+ ', '.join(invalid_values) + " - is not in the list of expected values": {
+ "Expected Type": ', '.join(allowed_values[column]),
+ "Rows": indices,
+ "Severity": "Error"
+ }
+ }
+
+ return result
+
+def region_checker(df, *columns, **kwargs):
+ valid_regions = set(Regions.objects.values_list('name', flat=True))
+
+ result = {}
+ indices = []
+ for column in columns:
+ for index, value in df[column].items():
+ values_list = [item.strip() for item in value.split(',')]
+ if all(value in valid_regions for value in values_list):
+ continue
+ else:
+ indices.append(index + kwargs.get('indices_offset', 0))
+
+ if indices:
+ result[column] = {
+ "Invalid Region": {
+ "Expected Type": ", ".join(valid_regions),
+ "Rows": indices,
+ "Severity": "Error"
+ }
+ }
+
+ return result
+
+def format_postal_codes(df, *columns, **kwargs):
+ validate = kwargs.get('validate', False)
+ indices_offset = kwargs.get("indices_offset", 0)
+
+ result = {}
+
+ for column in columns:
+ series = df[column]
+ map_of_values_to_indices = get_map_of_values_to_indices(series, indices_offset)
+ invalid_groups = []
+
+ for value, indices in map_of_values_to_indices.items():
+ clean_value = value.replace(" ", "") if isinstance(value, str) else ""
+
+ if len(clean_value) == 6:
+ formatted_value = clean_value[:3] + " " + clean_value[3:]
+ for index in indices:
+ df.at[index - indices_offset, column] = formatted_value
+ elif validate:
+ if pd.isna(value) or value == "":
+ value = "Empty"
+ invalid_groups.append({
+ "Invalid Postal Code": value,
+ "Rows": indices
+ })
+
+ if validate and invalid_groups:
+ result[column] = {
+ "Invalid Postal Codes": {
+ "Expected Type": "Postal Code should be formatted as (XXX XXX)",
+ "Groups": invalid_groups,
+ "Severity": "Warning"
+ }
+ }
+
+ return result if validate else None
diff --git a/django/api/services/uploaded_vin_record.py b/django/api/services/uploaded_vin_record.py
index ae1bea36..711ad58e 100644
--- a/django/api/services/uploaded_vin_record.py
+++ b/django/api/services/uploaded_vin_record.py
@@ -1,53 +1,120 @@
+from datetime import datetime
import pandas as pd
+from django.utils import timezone
from api.models.uploaded_vin_record import UploadedVinRecord
-from api.decoder_constants import get_service
+from api.constants.decoder import get_service
def parse_and_save(uploaded_vins_file, file_response):
processed = True
- number_of_chunks_processed = 0
- number_of_chunks_to_process = uploaded_vins_file.chunks_per_run
- chunksize = uploaded_vins_file.chunk_size
start_index = uploaded_vins_file.start_index
- chunks = pd.read_csv(file_response, sep="|", chunksize=chunksize)
-
- for idx, chunk in enumerate(chunks):
- if (
- idx >= start_index
- and number_of_chunks_processed < number_of_chunks_to_process
- ):
- vin_records_to_insert = get_vin_records_to_insert(chunk)
- UploadedVinRecord.objects.bulk_create(
- vin_records_to_insert,
- ignore_conflicts=True,
+ chunks = pd.read_csv(
+ file_response, sep="|", chunksize=uploaded_vins_file.chunk_size
+ )
+
+ for idx, df in enumerate(chunks):
+ if idx == start_index:
+ df.fillna("", inplace=True)
+ vins = []
+ for _, row in df.iterrows():
+ if row["vin"] != "":
+ vins.append(row["vin"])
+ df_records_map = get_df_records_map(df)
+ existing_records_map = get_existing_records_map(vins)
+ records_to_insert = get_records_to_insert(
+ df_records_map, existing_records_map
+ )
+ UploadedVinRecord.objects.bulk_create(records_to_insert)
+ records_to_update = get_records_to_update(
+ df_records_map, existing_records_map
)
- number_of_chunks_processed = number_of_chunks_processed + 1
- elif idx >= start_index + number_of_chunks_processed:
+ UploadedVinRecord.objects.bulk_update(
+ records_to_update, ["data", "timestamp", "update_timestamp"]
+ )
+ elif idx > start_index:
processed = False
break
- new_start_index = start_index + number_of_chunks_processed
uploaded_vins_file.processed = processed
- uploaded_vins_file.start_index = new_start_index
+ uploaded_vins_file.start_index = start_index + 1
uploaded_vins_file.save()
-def get_vin_records_to_insert(df):
- result = []
- df.fillna("", inplace=True)
+# returns a dict of (vin, postal_code) -> {timestamp, data}
+def get_df_records_map(df):
+ result = {}
for _, row in df.iterrows():
- if row["vin"] != "":
- vin = row["vin"]
- postal_code = row["postal_code"]
- data = row.to_dict()
+ vin = row["vin"]
+ postal_code = row["postal_code"]
+ df_timestamp = row["snapshot_date"]
+ if vin and postal_code and df_timestamp:
+ key = (vin, postal_code)
+ timestamp = timezone.make_aware(
+ datetime.strptime(df_timestamp, "%Y-%m-%d %H:%M:%S.%f")
+ )
+ df_data = row.to_dict()
+ data = df_data if df_data else {}
del data["vin"]
del data["postal_code"]
+ del data["snapshot_date"]
+ if key in result:
+ most_recent_ts = result[key]["timestamp"]
+ if most_recent_ts < timestamp:
+ result[key] = {"timestamp": timestamp, "data": data}
+ else:
+ result[key] = {"timestamp": timestamp, "data": data}
+ return result
+
+
+# returns a dict of (vin, postal_code) -> {id, timestamp}
+def get_existing_records_map(vins):
+ result = {}
+ records = UploadedVinRecord.objects.only(
+ "id", "vin", "postal_code", "timestamp"
+ ).filter(vin__in=vins)
+ for record in records:
+ key = (record.vin, record.postal_code)
+ result[key] = {"id": record.id, "timestamp": record.timestamp}
+ return result
+
+
+# df_records_map should be dict of (vin, postal_code) -> {timestamp, data}
+# existing_records_map should be dict of (vin, postal_code) -> {id, timestamp}
+def get_records_to_insert(df_records_map, existing_records_map):
+ result = []
+ for key, value in df_records_map.items():
+ if key not in existing_records_map:
result.append(
- UploadedVinRecord(vin=vin, postal_code=postal_code, data=data)
+ UploadedVinRecord(
+ vin=key[0],
+ postal_code=key[1],
+ timestamp=value["timestamp"],
+ data=value["data"],
+ )
)
return result
+# df_records_map should be dict of (vin, postal_code) -> {timestamp, data}
+# existing_records_map should be dict of (vin, postal_code) -> {id, timestamp}
+def get_records_to_update(df_records_map, existing_records_map):
+ result = []
+ for key, value in df_records_map.items():
+ if key in existing_records_map:
+ existing_record = existing_records_map[key]
+ timestamp = value["timestamp"]
+ if existing_record["timestamp"] < timestamp:
+ result.append(
+ UploadedVinRecord(
+ id=existing_record["id"],
+ timestamp=timestamp,
+ data=value["data"],
+ update_timestamp=timezone.now(),
+ )
+ )
+ return result
+
+
def get_decode_successful(service_name, uploaded_record):
service = get_service(service_name)
if service:
diff --git a/django/api/settings.py b/django/api/settings.py
index dbf9125a..80592f15 100644
--- a/django/api/settings.py
+++ b/django/api/settings.py
@@ -188,7 +188,7 @@
"name": "CTHUB",
"workers": 4,
"timeout": 90,
- "retry": 120,
+ "retry": 1800,
"queue_limit": 50,
"bulk": 10,
"orm": "default",
@@ -206,6 +206,8 @@
VPIC_ERROR_CODE_NAME = os.getenv("VPIC_ERROR_CODE_NAME", "ErrorCode")
VPIC_SUCCESS_ERROR_CODE = os.getenv("VPIC_SUCCESS_ERROR_CODE", "0")
+VINPOWER_ENDPOINT = os.getenv("VINPOWER_ENDPOINT", "http://spring:8080")
+
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
@@ -226,3 +228,5 @@
},
},
}
+
+PLACENAMES_ENDPOINT = PLACENAMES_ENDPOINT = os.getenv("PLACENAMES_ENDPOINT", "https://apps.gov.bc.ca/pub/bcgnws/names/search")
diff --git a/django/api/tests/test_spreadsheet_uploader.py b/django/api/tests/test_spreadsheet_uploader.py
index f151678f..db23cbc0 100644
--- a/django/api/tests/test_spreadsheet_uploader.py
+++ b/django/api/tests/test_spreadsheet_uploader.py
@@ -4,7 +4,7 @@
import pandas as pd
from api.models.scrap_it import ScrapIt
from api.services.spreadsheet_uploader import import_from_xls
-from api.constants import ScrapItColumnMapping, ScrapItColumns
+from api.constants.constants import ScrapItColumnMapping, ScrapItColumns
from api.services.spreadsheet_uploader_prep import prepare_scrap_it
class UploadTests(TestCase):
diff --git a/django/api/utilities/generic.py b/django/api/utilities/generic.py
index 20d5a8d7..98307551 100644
--- a/django/api/utilities/generic.py
+++ b/django/api/utilities/generic.py
@@ -4,3 +4,12 @@ def get_map(key_name, objects):
key = getattr(object, key_name)
result[key] = object
return result
+
+
+def get_unified_map(key_name, value_name, maps):
+ result = {}
+ for map in maps:
+ key = map.get(key_name)
+ value = map.get(value_name)
+ result[key] = value
+ return result
diff --git a/django/api/utilities/series.py b/django/api/utilities/series.py
new file mode 100644
index 00000000..0aa585a5
--- /dev/null
+++ b/django/api/utilities/series.py
@@ -0,0 +1,8 @@
+# expects a Pandas series and returns a map f: value -> [indices]
+def get_map_of_values_to_indices(series, index_offset=0):
+ result = {}
+ for index, value in series.items():
+ if result.get(value) is None:
+ result[value] = []
+ result[value].append(index + index_offset)
+ return result
diff --git a/django/api/viewsets/upload.py b/django/api/viewsets/upload.py
index 13704861..16132204 100644
--- a/django/api/viewsets/upload.py
+++ b/django/api/viewsets/upload.py
@@ -15,9 +15,11 @@
from api.services.minio import minio_get_object, minio_remove_object
from api.services.datasheet_template_generator import generate_template
from api.services.spreadsheet_uploader import import_from_xls
-import api.constants as constants
+import api.constants.constants as constants
from api.services.spreadsheet_uploader_prep import *
from api.services.uploaded_vins_file import create_vins_file
+from api.services.file_requirements import get_file_requirements
+from api.serializers.file_requirements import FileRequirementsSerializer
class UploadViewset(GenericViewSet):
@@ -33,7 +35,6 @@ def datasets_list(self, request):
"EV Charging Rebates",
"Hydrogen Fueling",
"Hydrogen Fleets",
- "ARC Project Tracking",
"Data Fleets",
"Scrap It",
]
@@ -47,11 +48,14 @@ def datasets_list(self, request):
@action(detail=False, methods=["post"])
@method_decorator(check_upload_permission())
def import_data(self, request):
-
filename = request.data.get("filename")
dataset_selected = request.data.get("datasetSelected")
- replace_data = request.data.get("replace", False)
+ replace_data = request.data.get("replaceData", False)
filepath = request.data.get("filepath")
+ check_for_warnings = request.data.get("checkForWarnings")
+ #boolean, if true show warnings before inserting data
+ #after displaying warnings, code can be rerun with show_warnings = false
+ #if warnings have been ignore
if dataset_selected == "ICBC Vins":
file_extension = pathlib.Path(filepath).suffix
@@ -95,6 +99,7 @@ def import_data(self, request):
field_types=constants.FIELD_TYPES.get(dataset_selected),
replace_data=replace_data,
user=request.user,
+ check_for_warnings=check_for_warnings
)
if not result["success"]:
@@ -128,3 +133,12 @@ def download_dataset(self, request):
return response
except ValueError as e:
return HttpResponse(str(e), status=400)
+
+ @action(detail=False, methods=["get"])
+ def file_requirements(self, request):
+ dataset_name = request.query_params.get("dataset")
+ file_requirements = get_file_requirements(dataset_name)
+ if file_requirements is None:
+ return Response({})
+ serializer = FileRequirementsSerializer(file_requirements)
+ return Response(serializer.data)
diff --git a/django/requirements.txt b/django/requirements.txt
index b8a19ac5..1ece7e27 100644
--- a/django/requirements.txt
+++ b/django/requirements.txt
@@ -16,9 +16,13 @@ whitenoise==5.2.0
dj-database-url==0.5.0
django-heroku==0.3.1
gunicorn==20.1.0
+numpy==1.26.4
python-keycloak==0.26.1
python-dotenv==0.19.0
pandas==1.3.4
openpyxl==3.0.9
minio==7.1.1
xlsxwriter==3.2.0
+xmltodict==0.13.0
+dnspython==2.6.1
+email-validator==2.2.0
diff --git a/django/workers/apps.py b/django/workers/apps.py
index c05d62f6..81e58fbb 100644
--- a/django/workers/apps.py
+++ b/django/workers/apps.py
@@ -9,10 +9,12 @@ def ready(self):
from workers.scheduled_jobs import (
schedule_create_minio_bucket,
schedule_read_uploaded_vins_file,
- schedule_batch_decode_vins,
+ schedule_batch_decode_vins_vpic,
+ schedule_batch_decode_vins_vinpower,
)
if "qcluster" in sys.argv:
schedule_create_minio_bucket()
schedule_read_uploaded_vins_file()
- schedule_batch_decode_vins()
+ schedule_batch_decode_vins_vpic()
+ schedule_batch_decode_vins_vinpower()
diff --git a/django/workers/decorators/tasks.py b/django/workers/decorators/tasks.py
new file mode 100644
index 00000000..ccba8e6c
--- /dev/null
+++ b/django/workers/decorators/tasks.py
@@ -0,0 +1,31 @@
+import ctypes
+import threading
+
+
+class TaskTimeoutException(Exception):
+ pass
+
+
+def timeout(time):
+ def wrapper(func):
+ def wrapped(*args, **kwargs):
+ current_thread_id = threading.current_thread().ident
+
+ def throw_timeout():
+ ctypes.pythonapi.PyThreadState_SetAsyncExc(
+ ctypes.c_ulong(current_thread_id),
+ ctypes.py_object(TaskTimeoutException),
+ )
+
+ t = threading.Timer(time, throw_timeout)
+ t.start()
+ try:
+ func(*args, **kwargs)
+ t.cancel()
+ except Exception as ex:
+ t.cancel()
+ raise ex
+
+ return wrapped
+
+ return wrapper
diff --git a/django/workers/external_apis/vinpower.py b/django/workers/external_apis/vinpower.py
index 735c918d..6c1adf86 100644
--- a/django/workers/external_apis/vinpower.py
+++ b/django/workers/external_apis/vinpower.py
@@ -1,2 +1,32 @@
+import requests
+from django.conf import settings
+import json
+import xmltodict
+
+
def batch_decode(uploaded_vin_records):
- return {"successful_records": [], "failed_vins": []}
+ successful_records = {}
+ failed_vins = set()
+ url = settings.VINPOWER_ENDPOINT + "/decode"
+
+ vins = []
+ for record in uploaded_vin_records:
+ vins.append(record.vin)
+ headers = {"content-type": "application/json"}
+ response = requests.get(url, data=json.dumps(vins), headers=headers)
+ response.raise_for_status()
+
+ data = response.json()
+ for vin in vins:
+ decoded_xml = data.get(vin)
+ if decoded_xml is not None:
+ dict = xmltodict.parse(decoded_xml)
+ atts = dict["VINPOWER"]["VIN"]["DECODED"]["ITEM"]
+ decoded_data = {}
+ for att in atts:
+ decoded_data[att["@name"]] = att["@value"]
+ successful_records[vin] = decoded_data
+ else:
+ failed_vins.add(vin)
+
+ return {"successful_records": successful_records, "failed_vins": failed_vins}
diff --git a/django/workers/external_apis/vpic.py b/django/workers/external_apis/vpic.py
index fa941a56..706df439 100644
--- a/django/workers/external_apis/vpic.py
+++ b/django/workers/external_apis/vpic.py
@@ -17,7 +17,7 @@ def batch_decode(uploaded_vin_records):
body = {"format": "json", "data": request_data}
response = requests.post(url, data=body)
- response.raise_for_status
+ response.raise_for_status()
data = response.json()["Results"]
decoded_vins_map = {}
for record in data:
diff --git a/django/workers/scheduled_jobs.py b/django/workers/scheduled_jobs.py
index 28d878c7..09775486 100644
--- a/django/workers/scheduled_jobs.py
+++ b/django/workers/scheduled_jobs.py
@@ -20,23 +20,38 @@ def schedule_read_uploaded_vins_file():
"workers.tasks.read_uploaded_vins_file",
name="read_uploaded_vins_file",
schedule_type="C",
- cron="*/15 * * * *",
- q_options={"timeout": 660, "ack_failure": True},
+ cron="*/3 * * * *",
+ q_options={"timeout": 165, "ack_failure": True},
)
except IntegrityError:
pass
-def schedule_batch_decode_vins():
+def schedule_batch_decode_vins_vpic():
try:
schedule(
"workers.tasks.batch_decode_vins",
"vpic",
50,
- name="batch_decode_vins",
+ name="vpic_batch_decode_vins",
schedule_type="C",
- cron="* * * * *",
- q_options={"timeout": 60, "ack_failure": True},
+ cron="*/2 * * * *",
+ q_options={"timeout": 105, "ack_failure": True},
+ )
+ except IntegrityError:
+ pass
+
+
+def schedule_batch_decode_vins_vinpower():
+ try:
+ schedule(
+ "workers.tasks.batch_decode_vins",
+ "vinpower",
+ 500,
+ name="vinpower_batch_decode_vins",
+ schedule_type="C",
+ cron="*/2 * * * *",
+ q_options={"timeout": 105, "ack_failure": True},
)
except IntegrityError:
pass
diff --git a/django/workers/tasks.py b/django/workers/tasks.py
index 280a6c8a..a14b451e 100644
--- a/django/workers/tasks.py
+++ b/django/workers/tasks.py
@@ -1,13 +1,13 @@
from django.conf import settings
from api.services.minio import get_minio_client, get_minio_object
-from func_timeout import func_timeout, FunctionTimedOut
from api.models.uploaded_vins_file import UploadedVinsFile
from api.models.uploaded_vin_record import UploadedVinRecord
-from api.decoder_constants import get_service
-from api.utilities.generic import get_map
+from api.constants.decoder import get_service
+from api.utilities.generic import get_unified_map
from api.services.decoded_vin_record import save_decoded_data
from api.services.uploaded_vin_record import parse_and_save
from django.db import transaction
+from workers.decorators.tasks import timeout
def create_minio_bucket():
@@ -18,25 +18,9 @@ def create_minio_bucket():
client.make_bucket(bucket_name)
+@transaction.atomic
+@timeout(150)
def read_uploaded_vins_file():
- # TODO: this job will probably have to become more involved; it currently just uploads whatever is in the file while skipping records
- # that encounter uniqueness conflicts.
- # we'll probably have to do an initial, chunked read from the
- # file in order to build a map of (vin, postal_code) -> (record chunk index, record index within chunk) of unique records (based on snapshot_date?),
- # then we'll have to compare the (vin, postal_code) keys to existing records in the database, and
- # determine which ones need to get bulk-inserted, and which ones bulk-updated.
- # also have to keep in mind the memory used by any data structures we use
- def close_file_response(file_response):
- if file_response is not None:
- file_response.close()
- file_response.release_conn()
-
- @transaction.atomic
- def inner(vins_file, file_response):
- if vins_file is not None and file_response is not None:
- parse_and_save(vins_file, file_response)
-
- file_response = None
vins_file = (
UploadedVinsFile.objects.filter(processed=False)
.order_by("create_timestamp")
@@ -44,60 +28,57 @@ def inner(vins_file, file_response):
)
if vins_file is not None:
file_response = get_minio_object(vins_file.filename)
- try:
- func_timeout(600, inner, args=(vins_file, file_response))
- close_file_response(file_response)
- except FunctionTimedOut:
- print("reading vins file job timed out")
- close_file_response(file_response)
- raise Exception
- except Exception:
- close_file_response(file_response)
- raise Exception
+ if file_response is not None:
+ parse_and_save(vins_file, file_response)
+ try:
+ file_response.close()
+ file_response.release_conn()
+ except Exception:
+ pass
+@timeout(90)
def batch_decode_vins(service_name, batch_size=50):
- def inner():
- max_decode_attempts = settings.MAX_DECODE_ATTEMPTS
- service = get_service(service_name)
- if service:
- decoded_vin_model = service.MODEL.value
- filters = {
- service.CURRENT_DECODE_SUCCESSFUL.value: False,
- service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value
- + "__lt": max_decode_attempts,
- }
- order_by = [
- service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value,
- "create_timestamp",
- ]
- uploaded_vin_records = UploadedVinRecord.objects.filter(**filters).order_by(
- *order_by
- )[:batch_size]
- uploaded_vins = set()
- for uploaded_record in uploaded_vin_records:
- uploaded_vins.add(uploaded_record.vin)
- vins_to_update = set()
- decoded_records_to_update_map = get_map(
- "vin", decoded_vin_model.objects.filter(vin__in=uploaded_vins)
- )
- for decoded_vin in decoded_records_to_update_map:
- vins_to_update.add(decoded_vin)
- vins_to_insert = uploaded_vins.difference(vins_to_update)
-
- decoder = service.BATCH_DECODER.value
- decoded_data = decoder(uploaded_vin_records)
+ max_decode_attempts = settings.MAX_DECODE_ATTEMPTS
+ service = get_service(service_name)
+ if service:
+ decoded_vin_model = service.MODEL.value
+ filters = {
+ service.CURRENT_DECODE_SUCCESSFUL.value: False,
+ service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value
+ + "__lt": max_decode_attempts,
+ }
+ order_by = [
+ service.NUMBER_OF_CURRENT_DECODE_ATTEMPTS.value,
+ "create_timestamp",
+ ]
+ uploaded_vin_records = (
+ UploadedVinRecord.objects.defer("data")
+ .filter(**filters)
+ .order_by(*order_by)[:batch_size]
+ )
+ uploaded_vins = set()
+ for uploaded_record in uploaded_vin_records:
+ uploaded_vins.add(uploaded_record.vin)
+ vins_to_update = set()
+ vins_to_decoded_record_ids_map = get_unified_map(
+ "vin",
+ "id",
+ decoded_vin_model.objects.only("id", "vin")
+ .filter(vin__in=uploaded_vins)
+ .values(),
+ )
+ for decoded_vin in vins_to_decoded_record_ids_map:
+ vins_to_update.add(decoded_vin)
+ vins_to_insert = uploaded_vins.difference(vins_to_update)
- save_decoded_data(
- uploaded_vin_records,
- vins_to_insert,
- decoded_records_to_update_map,
- service_name,
- decoded_data,
- )
+ decoder = service.BATCH_DECODER.value
+ decoded_data = decoder(uploaded_vin_records)
- try:
- func_timeout(45, inner)
- except FunctionTimedOut:
- print("batch decode vins job timed out")
- raise Exception
+ save_decoded_data(
+ uploaded_vin_records,
+ vins_to_insert,
+ vins_to_decoded_record_ids_map,
+ service_name,
+ decoded_data,
+ )
diff --git a/docker-compose.yml b/docker-compose.yml
index 52add316..117c935d 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -103,6 +103,14 @@ services:
- /web/node_modules
ports:
- 3000:3000
+ spring:
+ build: ./spring
+ ports:
+ - "8080:8080"
+ environment:
+ - SERVER_PORT=8080
+ volumes:
+ - ./spring:/app
volumes:
superset_home:
diff --git a/frontend/package.json b/frontend/package.json
index d528a73f..868ba3bc 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -1,6 +1,6 @@
{
"name": "frontend",
- "version": "0.2.0",
+ "version": "0.3.0",
"private": true,
"dependencies": {
"@emotion/react": "^11.6.0",
diff --git a/frontend/src/app/components/AlertDialog.js b/frontend/src/app/components/AlertDialog.js
index fe9977b0..c592bd27 100644
--- a/frontend/src/app/components/AlertDialog.js
+++ b/frontend/src/app/components/AlertDialog.js
@@ -6,7 +6,7 @@ import DialogActions from "@mui/material/DialogActions";
import DialogContent from "@mui/material/DialogContent";
import DialogContentText from "@mui/material/DialogContentText";
import DialogTitle from "@mui/material/DialogTitle";
-
+import WarningAmberIcon from '@mui/icons-material/WarningAmber';
const AlertDialog = (props) => {
const {
open,
@@ -31,11 +31,13 @@ const AlertDialog = (props) => {
aria-labelledby="alert-dialog-title"
aria-describedby="alert-dialog-description"
>
-