Skip to content

Commit

Permalink
merging edge
Browse files Browse the repository at this point in the history
  • Loading branch information
aaron-kulkarni committed Jul 26, 2024
1 parent bb66627 commit 5718edb
Show file tree
Hide file tree
Showing 737 changed files with 34,710 additions and 5,460 deletions.
148 changes: 95 additions & 53 deletions .github/workflows/analyses-snapshot-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,30 @@ on:
description: 'Branch or tag that provides the snapshot and test code at test runtime'
required: true
default: 'edge'
OPEN_PR_ON_FAILURE:
description: 'If the test fails, open a PR to update the snapshots'
type: boolean
required: true
default: false
schedule:
- cron: '26 7 * * *' # 7:26 AM UTC
- cron: '26 7 * * *' # 7:26 AM UTC
pull_request:
paths:
- 'api/**'
- '!api/tests/**'
- '!api/docs/**'
- '!api/release-notes-internal.md'
- '!api/release-notes.md'
- 'shared-data/**/*'
- '!shared-data/js/**'
- '.github/workflows/analyses-snapshot-test.yaml'
- 'analyses-snapshot-testing/**'

types:
- opened #default
- synchronize #default
- reopened #default
- labeled

concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
Expand All @@ -32,66 +47,93 @@ jobs:
env:
ANALYSIS_REF: ${{ github.event.inputs.ANALYSIS_REF || github.head_ref || 'edge' }}
SNAPSHOT_REF: ${{ github.event.inputs.SNAPSHOT_REF || github.head_ref || 'edge' }}

# If we're running because of workflow_dispatch, use the user input to decide
# whether to open a PR on failure. Otherwise, there is no user input, so always
# open a PR on failure.
OPEN_PR_ON_FAILURE: ${{ (github.event_name == 'workflow_dispatch' && github.events.inputs.OPEN_PR_ON_FAILURE) || ((github.event_name != 'workflow_dispatch') && (contains(github.event.pull_request.labels.*.name, 'gen-analyses-snapshot-pr'))) }}
PR_TARGET_BRANCH: ${{ github.event.pull_request.base.ref || 'not a pr'}}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
ref: ${{ env.SNAPSHOT_REF }}
- name: Checkout Repository
uses: actions/checkout@v4
with:
ref: ${{ env.SNAPSHOT_REF }}

- name: Are the analyses snapshots in my PR branch in sync with the target branch?
if: github.event_name == 'pull_request'
run: |
git fetch origin ${{ env.PR_TARGET_BRANCH }}
DIFF_OUTPUT=$(git diff HEAD origin/${{ env.PR_TARGET_BRANCH }} -- analyses-snapshot-testing/tests/__snapshots__/analyses_snapshot_test)
if [ -n "$DIFF_OUTPUT" ]; then
echo "Analyses snapshots do NOT match ${{ env.PR_TARGET_BRANCH }} snapshots."
echo "Is this becasue you have not pulled and merged ${{ env.PR_TARGET_BRANCH }}?"
echo "Or is this because you have already updated your snapshots and are all good 😊?"
else
echo "Analyses snapshots match ${{ env.PR_TARGET_BRANCH }} snapshots."
fi
- name: Docker Build
working-directory: analyses-snapshot-testing
run: make build-opentrons-analysis
- name: Docker Build
working-directory: analyses-snapshot-testing
run: make build-opentrons-analysis

- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: '3.12'
cache: 'pipenv'
cache-dependency-path: analyses-snapshot-testing/Pipfile.lock
- name: Set up Python 3.12
uses: actions/setup-python@v5
with:
python-version: '3.12'
cache: 'pipenv'
cache-dependency-path: analyses-snapshot-testing/Pipfile.lock

- name: Setup Python Dependencies
working-directory: analyses-snapshot-testing
run: make setup
- name: Setup Python Dependencies
working-directory: analyses-snapshot-testing
run: make setup

- name: Run Test
id: run_test
working-directory: analyses-snapshot-testing
run: make snapshot-test
- name: Run Test
id: run_test
working-directory: analyses-snapshot-testing
run: make snapshot-test

- name: Upload Report
if: '!cancelled()'
uses: actions/upload-artifact@v4
with:
name: test-report
path: analyses-snapshot-testing/results/
- name: Upload Report
if: '!cancelled()'
uses: actions/upload-artifact@v4
with:
name: test-report
path: analyses-snapshot-testing/results/

- name: Handle Test Failure
id: handle_failure
if: always() && steps.run_test.outcome == 'failure'
working-directory: analyses-snapshot-testing
run: make snapshot-test-update
- name: Handle Test Failure
id: handle_failure
if: always() && steps.run_test.outcome == 'failure' && (env.OPEN_PR_ON_FAILURE == 'true' || github.event_name == 'schedule')
working-directory: analyses-snapshot-testing
run: make snapshot-test-update

- name: Create Snapshot update Request
id: create_pull_request
if: always() && steps.handle_failure.outcome == 'success'
uses: peter-evans/create-pull-request@v6
with:
commit-message: 'fix(analyses-snapshot-testing): snapshot failure capture'
title: 'fix(analyses-snapshot-testing): ${{ env.ANALYSIS_REF }} snapshot failure capture'
body: 'This PR is an automated snapshot update request. Please review the changes and merge if they are acceptable or find your bug and fix it.'
- name: Create Snapshot update Request
id: create_pull_request
if: always() && steps.handle_failure.outcome == 'success' && env.OPEN_PR_ON_FAILURE == 'true' && github.event_name == 'pull_request'
uses: peter-evans/create-pull-request@v6
with:
commit-message: 'fix(analyses-snapshot-testing): heal analyses snapshots'
title: 'fix(analyses-snapshot-testing): heal ${{ env.ANALYSIS_REF }} snapshots'
body: 'This PR was requested on the PR https://github.com/${{ github.repository }}/pull/${{ github.event.pull_request.number }}'
branch: 'analyses-snapshot-testing/${{ env.ANALYSIS_REF }}-from-${{ env.SNAPSHOT_REF}}'
base: ${{ env.SNAPSHOT_REF}}

- name: Comment on PR
if: always() && steps.create_pull_request.outcome == 'success' && github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const message = 'A PR has been opened to address analyses snapshot changes. Please review the changes here: https://github.com/${{ github.repository }}/pull/${{ steps.create-pull-request.outputs.pull-request-number }}';
github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: message
});
- name: Comment on feature PR
if: always() && steps.create_pull_request.outcome == 'success' && github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const message = 'A PR has been opened to address analyses snapshot changes. Please review the changes here: https://github.com/${{ github.repository }}/pull/${{ steps.create_pull_request.outputs.pull-request-number }}';
github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: message
});
- name: Create Snapshot update Request on edge overnight failure
if: always() && steps.handle_failure.outcome == 'success' && github.event_name == 'schedule'
uses: peter-evans/create-pull-request@v6
with: # scheduled run uses the default values for ANALYSIS_REF and SNAPSHOT_REF which are edge
commit-message: 'fix(analyses-snapshot-testing): heal ${{ env.ANALYSIS_REF }} snapshots'
title: 'fix(analyses-snapshot-testing): heal ${{ env.ANALYSIS_REF }} snapshots'
body: 'The ${{ env.ANALYSIS_REF }} overnight analyses snapshot test is failing. This PR was opened to alert us to the failure.'
branch: 'analyses-snapshot-testing/${{ env.ANALYSIS_REF }}-from-${{ env.SNAPSHOT_REF}}'
base: ${{ env.SNAPSHOT_REF}}
78 changes: 67 additions & 11 deletions .github/workflows/app-test-build-deploy.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -185,28 +185,50 @@ jobs:
echo "both develop builds for edge"
echo 'variants=["release", "internal-release"]' >> $GITHUB_OUTPUT
echo 'type=develop' >> $GITHUB_OUTPUT
elif [ "${{ format('{0}', endsWith(github.ref, 'app-build-internal')) }}" = "true" ] ; then
echo "internal-release builds for app-build-internal suffixes"
elif [ "${{ format('{0}', contains(github.ref, 'app-build-internal')) }}" = "true" ] ; then
echo 'variants=["internal-release"]' >> $GITHUB_OUTPUT
echo 'type=develop' >> $GITHUB_OUTPUT
elif [ "${{ format('{0}', endsWith(github.ref, 'app-build')) }}" = "true" ] ; then
echo "release develop builds for app-build suffixes"
if [ "${{ format('{0}', contains(github.ref, 'as-release')) }}" = "true" ] ; then
echo "internal-release as-release builds for app-build-internal + as-release suffixes"
echo 'type=as-release' >> $GITHUB_OUTPUT
else
echo "internal-release develop builds for app-build-internal suffixes"
echo 'type=develop' >> $GITHUB_OUTPUT
fi
elif [ "${{ format('{0}', contains(github.ref, 'app-build')) }}" = "true" ] ; then
echo 'variants=["release"]' >> $GITHUB_OUTPUT
echo 'type=develop' >> $GITHUB_OUTPUT
elif [ "${{ format('{0}', endsWith(github.ref, 'app-build-both')) }}" = "true" ] ; then
echo "Both develop builds for app-build-both suffixes"
if [ "${{ format('{0}', contains(github.ref, 'as-release')) }}" = "true" ] ; then
echo "release as-release builds for app-build + as-release suffixes"
echo 'type=as-release' >> $GITHUB_OUTPUT
else
echo "release develop builds for app-build suffixes"
echo 'type=develop' >> $GITHUB_OUTPUT
fi
elif [ "${{ format('{0}', contains(github.ref, 'app-build-both')) }}" = "true" ] ; then
echo 'variants=["release", "internal-release"]' >> $GITHUB_OUTPUT
echo 'type=develop' >> $GITHUB_OUTPUT
if [ "${{ format('{0}', contains(github.ref, 'as-release')) }}" = "true" ] ; then
echo "Both as-release builds for app-build-both + as-release suffixes"
echo 'type=as-release' >> $GITHUB_OUTPUT
else
echo "Both develop builds for app-build-both + as-release suffixes"
echo 'type=develop' >> $GITHUB_OUTPUT
fi
else
echo "No build for ref ${{github.ref}} and event ${{github.event_type}}"
echo 'variants=[]' >> $GITHUB_OUTPUT
echo 'type=develop' >> $GITHUB_OUTPUT
fi
- name: set summary
run: |
echo 'Type: ${{steps.determine-build-type.outputs.type}} Variants: ${{steps.determine-build-type.outputs.variants}}' >> $GITHUB_STEP_SUMMARY
build-app:
needs: [determine-build-type]
if: needs.determine-build-type.outputs.variants != '[]'
strategy:
fail-fast: false
matrix:
os: ['windows-2022', 'ubuntu-22.04', 'macos-latest']
variant: ${{fromJSON(needs.determine-build-type.outputs.variants)}}
Expand Down Expand Up @@ -277,15 +299,49 @@ jobs:
npm config set cache ${{ github.workspace }}/.npm-cache
yarn config set cache-folder ${{ github.workspace }}/.yarn-cache
make setup-js
- name: 'Configure Windows code signing environment'
if: startsWith(matrix.os, 'windows') && contains(needs.determine-build-type.outputs.type, 'release')
shell: bash
run: |
echo "${{ secrets.SM_CLIENT_CERT_FILE_B64 }}" | base64 --decode > /d/Certificate_pkcs12.p12
echo "${{ secrets.WINDOWS_CSC_B64}}" | base64 --decode > /d/opentrons_labworks_inc.crt
echo "C:\Program Files (x86)\Windows Kits\10\App Certification Kit" >> $GITHUB_PATH
echo "C:\Program Files (x86)\Microsoft SDKs\Windows\v10.0A\bin\NETFX 4.8 Tools" >> $GITHUB_PATH
echo "C:\Program Files\DigiCert\DigiCert Keylocker Tools" >> $GITHUB_PATH
- name: 'Setup Windows code signing helpers'
if: startsWith(matrix.os, 'windows') && contains(needs.determine-build-type.outputs.type, 'release')
shell: cmd
env:
SM_HOST: ${{ secrets.SM_HOST }}
SM_CLIENT_CERT_FILE: "D:\\Certificate_pkcs12.p12"
SM_CLIENT_CERT_PASSWORD: ${{secrets.SM_CLIENT_CERT_PASSWORD}}
SM_API_KEY: ${{secrets.SM_API_KEY}}
run: |
curl -X GET https://one.digicert.com/signingmanager/api-ui/v1/releases/Keylockertools-windows-x64.msi/download -H "x-api-key:${{secrets.SM_API_KEY}}" -o Keylockertools-windows-x64.msi
msiexec /i Keylockertools-windows-x64.msi /quiet /qn
smksp_registrar.exe list
smctl.exe keypair ls
C:\Windows\System32\certutil.exe -csp "DigiCert Signing Manager KSP" -key -user
smksp_cert_sync.exe
smctl.exe healthcheck --all
# build the desktop app and deploy it
- name: 'build ${{matrix.variant}} app for ${{ matrix.os }}'
if: matrix.target == 'desktop'
timeout-minutes: 60
env:
OT_APP_MIXPANEL_ID: ${{ secrets.OT_APP_MIXPANEL_ID }}
OT_APP_INTERCOM_ID: ${{ secrets.OT_APP_INTERCOM_ID }}
WIN_CSC_LINK: ${{ secrets.OT_APP_CSC_WINDOWS }}
WIN_CSC_KEY_PASSWORD: ${{ secrets.OT_APP_CSC_KEY_WINDOWS }}
WINDOWS_SIGN: ${{ format('{0}', contains(needs.determine-build-type.outputs.type, 'release')) }}
SM_HOST: ${{secrets.SM_HOST}}
SM_CLIENT_CERT_FILE: "D:\\Certificate_pkcs12.p12"
SM_CLIENT_CERT_PASSWORD: ${{secrets.SM_CLIENT_CERT_PASSWORD}}
SM_API_KEY: ${{secrets.SM_API_KEY}}
SM_CODE_SIGNING_CERT_SHA1_HASH: ${{secrets.SM_CODE_SIGNING_CERT_SHA1_HASH}}
SM_KEYPAIR_ALIAS: ${{secrets.SM_KEYPAIR_ALIAS}}
WINDOWS_CSC_FILEPATH: "D:\\opentrons_labworks_inc.crt"
CSC_LINK: ${{ secrets.OT_APP_CSC_MACOS }}
CSC_KEY_PASSWORD: ${{ secrets.OT_APP_CSC_KEY_MACOS }}
APPLE_ID: ${{ secrets.OT_APP_APPLE_ID }}
Expand Down
6 changes: 4 additions & 2 deletions abr-testing/abr_testing/automation/google_sheets_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,6 @@ def column_letter_to_index(column_letter: str) -> int:

for col_offset, col_values in enumerate(data):
column_index = start_column_index + col_offset
# column_letter = index_to_column_letter(column_index)
for row_offset, value in enumerate(col_values):
row_index = start_row + row_offset
try:
Expand All @@ -163,7 +162,10 @@ def column_letter_to_index(column_letter: str) -> int:
)

body = {"requests": requests}
self.spread_sheet.batch_update(body=body)
try:
self.spread_sheet.batch_update(body=body)
except gspread.exceptions.APIError as e:
print(f"ERROR MESSAGE: {e}")

def update_cell(
self, sheet_title: str, row: int, column: int, single_data: Any
Expand Down
20 changes: 15 additions & 5 deletions abr-testing/abr_testing/data_collection/abr_google_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ def create_data_dictionary(
"""Pull data from run files and format into a dictionary."""
runs_and_robots: List[Any] = []
runs_and_lpc: List[Dict[str, Any]] = []
headers: List[str] = []
headers_lpc: List[str] = []
for filename in os.listdir(storage_directory):
file_path = os.path.join(storage_directory, filename)
if file_path.endswith(".json"):
Expand All @@ -49,7 +51,14 @@ def create_data_dictionary(
if not isinstance(file_results, dict):
continue
run_id = file_results.get("run_id", "NaN")
try:
start_time_test = file_results["startedAt"]
completed_time_test = file_results["completedAt"]
except KeyError:
print(f"Run {run_id} is incomplete. Skipping run.")
continue
if run_id in runs_to_save:
print("started reading run.")
robot = file_results.get("robot_name")
protocol_name = file_results["protocol"]["metadata"].get("protocolName", "")
software_version = file_results.get("API_Version", "")
Expand All @@ -74,13 +83,13 @@ def create_data_dictionary(
)
try:
start_time = datetime.strptime(
file_results.get("startedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
start_time_test, "%Y-%m-%dT%H:%M:%S.%f%z"
)
adjusted_start_time = start_time - timedelta(hours=4)
start_date = str(adjusted_start_time.date())
start_time_str = str(adjusted_start_time).split("+")[0]
complete_time = datetime.strptime(
file_results.get("completedAt", ""), "%Y-%m-%dT%H:%M:%S.%f%z"
completed_time_test, "%Y-%m-%dT%H:%M:%S.%f%z"
)
adjusted_complete_time = complete_time - timedelta(hours=4)
complete_time_str = str(adjusted_complete_time).split("+")[0]
Expand Down Expand Up @@ -130,15 +139,16 @@ def create_data_dictionary(
**pipette_dict,
**plate_measure,
}
headers: List[str] = list(row_2.keys())
# runs_and_robots[run_id] = row_2
headers = list(row_2.keys())
runs_and_robots.append(list(row_2.values()))
# LPC Data Recording
runs_and_lpc, headers_lpc = read_robot_logs.lpc_data(
file_results, row_for_lpc, runs_and_lpc
)
else:
continue
num_of_runs_read = len(runs_and_robots)
print(f"Number of runs read: {num_of_runs_read}")
transposed_runs_and_robots = list(map(list, zip(*runs_and_robots)))
transposed_runs_and_lpc = list(map(list, zip(*runs_and_lpc)))
return transposed_runs_and_robots, headers, transposed_runs_and_lpc, headers_lpc
Expand Down Expand Up @@ -207,7 +217,6 @@ def create_data_dictionary(
start_row = google_sheet.get_index_row() + 1
print(start_row)
google_sheet.batch_update_cells(transposed_runs_and_robots, "A", start_row, "0")
# Calculate Robot Lifetimes

# Add LPC to google sheet
google_sheet_lpc = google_sheets_tool.google_sheet(credentials_path, "ABR-LPC", 0)
Expand All @@ -216,4 +225,5 @@ def create_data_dictionary(
transposed_runs_and_lpc, "A", start_row_lpc, "0"
)
robots = list(set(google_sheet.get_column(1)))
# Calculate Robot Lifetimes
sync_abr_sheet.determine_lifetime(google_sheet)
Loading

0 comments on commit 5718edb

Please sign in to comment.