diff --git a/.github/workflows/test-ui.yml b/.github/workflows/test-ui.yml
index 5478460877c..bc1a5a9ecc2 100644
--- a/.github/workflows/test-ui.yml
+++ b/.github/workflows/test-ui.yml
@@ -36,6 +36,7 @@ jobs:
- pre-test
runs-on: ${{ endsWith(github.repository, '-enterprise') && fromJSON('["self-hosted", "ondemand", "linux", "type=m7a.2xlarge;m6a.2xlarge"]') || 'ubuntu-latest' }}
timeout-minutes: 30
+ continue-on-error: true
defaults:
run:
working-directory: ui
@@ -61,17 +62,25 @@ jobs:
secrets: |-
kv/data/teams/nomad/ui PERCY_TOKEN ;
- name: ember exam
+ id: exam
env:
PERCY_TOKEN: ${{ env.PERCY_TOKEN || secrets.PERCY_TOKEN }}
PERCY_PARALLEL_NONCE: ${{ needs.pre-test.outputs.nonce }}
run: |
yarn exam:parallel --split=${{ matrix.split }} --partition=${{ matrix.partition }} --json-report=test-results/test-results.json
- continue-on-error: true
+ # We have continue-on-error set to true, but we still want to alert the author if
+ # there are test failures or timeouts. Without it, we'll get errors in our output,
+ # but the workflow will still succeed / have a green checkmark.
- name: Express timeout failure
if: ${{ failure() }}
run: exit 1
+ - name: Check test status
+ if: steps.exam.outcome != 'success'
+ run: |
+ echo "Tests failed or timed out in partition ${{ matrix.partition }}"
+ exit 1
- name: Upload partition test results
- if: github.event_name == 'push' && github.ref == 'refs/heads/main'
+ if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
with:
name: test-results-${{ matrix.partition }}
@@ -101,15 +110,16 @@ jobs:
secrets: |-
kv/data/teams/nomad/ui PERCY_TOKEN ;
- name: Download all test results
- if: github.event_name == 'push' && github.ref == 'refs/heads/main'
+ if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: test-results-*
path: test-results
- name: Combine test results for comparison
- if: github.event_name == 'push' && github.ref == 'refs/heads/main'
+ if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
run: node ../scripts/combine-ui-test-results.js
+
- name: Upload combined results for comparison
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
@@ -117,12 +127,212 @@ jobs:
name: test-results-${{ github.sha }}
path: ui/combined-test-results.json
retention-days: 90
+ - name: Delete partition test results
+ if: github.event_name == 'push' && github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
+ uses: geekyeggo/delete-artifact@f275313e70c08f6120db482d7a6b98377786765b # v5.1.0
+ with:
+ name: test-results-*
+
+ - name: Upload Current PR results
+ if: github.event_name == 'pull_request'
+ uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
+ with:
+ name: pr-test-results-${{ github.sha }} # Prefix with "pr-" to avoid comparing with main during analyze step
+ path: ui/combined-test-results.json
+ retention-days: 1
- name: finalize
env:
PERCY_TOKEN: ${{ env.PERCY_TOKEN || secrets.PERCY_TOKEN }}
PERCY_PARALLEL_NONCE: ${{ needs.pre-test.outputs.nonce }}
run: yarn percy build:finalize
+
+ analyze-times:
+ # TODO: temporary comment-out with hardcoded sha
+ # needs: [tests, finalize]
+ if: github.event_name == 'pull_request'
+ runs-on: ubuntu-latest
+ defaults:
+ run:
+ working-directory: ui
+
+ steps:
+ - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+
+ # Debug step to show environment
+ - name: Debug environment
+ run: |
+ echo "GITHUB_SHA: ${{ github.sha }}"
+ echo "GITHUB_EVENT_NAME: ${{ github.event_name }}"
+ echo "GITHUB_REF: ${{ github.ref }}"
+ echo "RUN_ID: ${{ github.run_id }}"
+
+ # Try to list available artifacts first
+ - name: List artifacts
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const artifacts = await github.rest.actions.listWorkflowRunArtifacts({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ // run_id: context.runId
+ run_id: 12163157778
+ });
+ console.log('Available artifacts:');
+ console.log(JSON.stringify(artifacts.data, null, 2));
+
+ - name: Download current PR results
+ uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
+ with:
+ # name: test-results-${{ github.sha }}
+ name: pr-test-results-fe7ca11e9afc42bc98d79fe521155a37634bd232 # TODO: temporary hardcoded sha from previous run
+ path: ui
+ run-id: 12163157778
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+
+ # - name: Download historical results
+ # uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
+ # with:
+ # pattern: test-results-*
+ # path: historical-results
+ # merge-multiple: true
+
+ # Download historical results from previous main branch runs
+ - name: Download historical results
+ uses: actions/github-script@v7
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ script: |
+ const fs = require('fs');
+ const path = require('path');
+
+ const historicalDir = path.join('ui', 'historical-results');
+ // Clean up any existing directory
+ if (fs.existsSync(historicalDir)) {
+ fs.rmSync(historicalDir, { recursive: true, force: true });
+ }
+ fs.mkdirSync(historicalDir, { recursive: true });
+
+ const artifacts = await github.rest.actions.listArtifactsForRepo({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ per_page: 100
+ });
+
+ // Log out the names of each artifact
+ console.log('Available artifacts:');
+ artifacts.data.artifacts.forEach(artifact => {
+ console.log(`- ${artifact.name}`);
+ });
+
+ const testArtifacts = artifacts.data.artifacts.filter(artifact =>
+ artifact.name.startsWith('pr-test-results-')
+ );
+
+ console.log(`Found ${testArtifacts.length} test result artifacts`);
+
+ for (const artifact of testArtifacts) {
+ try {
+ console.log(`Downloading ${artifact.name}`);
+
+ // Create a temporary directory for this artifact
+ const tempDir = path.join(historicalDir, `temp-${artifact.id}`);
+ fs.mkdirSync(tempDir, { recursive: true });
+
+ try {
+ // Download to temp directory
+ await exec.exec('gh', [
+ 'run',
+ 'download',
+ '-n',
+ artifact.name,
+ '--repo',
+ `${context.repo.owner}/${context.repo.repo}`,
+ '--dir',
+ tempDir,
+ artifact.workflow_run.id.toString()
+ ]);
+
+ // Move the JSON file to the historical directory with a unique name
+ const jsonFile = path.join(tempDir, 'combined-test-results.json');
+ if (fs.existsSync(jsonFile)) {
+ fs.renameSync(
+ jsonFile,
+ path.join(historicalDir, `${artifact.name}-${artifact.id}.json`)
+ );
+ console.log(`Successfully processed ${artifact.name}`);
+ } else {
+ const files = fs.readdirSync(tempDir);
+ console.log(`Warning: No test results JSON found in ${artifact.name}. Found files:`, files);
+ console.log(`Warning: No combined-test-results.json found in ${artifact.name}`);
+ }
+ } finally {
+ // Always clean up temp directory
+ if (fs.existsSync(tempDir)) {
+ fs.rmSync(tempDir, { recursive: true, force: true });
+ }
+ }
+ } catch (error) {
+ console.log(`Error processing ${artifact.name}:`, error.message);
+ // Continue with next artifact
+ }
+ }
+
+ # Debug what we got
+ - name: Debug directories
+ run: |
+ echo "Current directory structure:"
+ ls -la
+ printf "\nHistorical results directory:\n"
+ ls -la historical-results || echo "historical-results directory not found"
+
+ cd historical-results
+ echo -e "\nContents of each file (first 10 lines):"
+ for file in *.json; do
+ if [ -f "$file" ]; then
+ echo -e "\n=== $file ==="
+ head -n 10 "$file"
+ fi
+ done
+
+ - name: Analyze test times
+ run: node ../scripts/analyze-ui-test-times.js
+
+ - name: Comment PR
+ uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
+ with:
+ script: |
+ const fs = require('fs');
+ const analysis = JSON.parse(fs.readFileSync('ui/test-time-analysis.json'));
+
+ let body = `### Test Time Analysis\n\n`;
+ body += `- Total Tests: ${analysis.summary.totalTests}\n`;
+ body += `- Significantly Slower: ${analysis.summary.significantlySlower}\n`;
+ body += `- Significantly Faster: ${analysis.summary.significantlyFaster}\n\n`;
+
+ if (analysis.testComparisons.length > 0) {
+ body += `#### Most Significant Changes:\n\n`;
+ analysis.testComparisons
+ .filter(comp => comp.percentDiff != null) // Skip invalid comparisons
+ .slice(0, 5)
+ .forEach(comp => {
+ body += `**${comp.name}**\n`;
+ body += `- Current: ${comp.currentDuration}ms\n`;
+ body += `- Historical Avg: ${comp.historicalAverage}ms\n`;
+ body += `- Change: ${comp.percentDiff?.toFixed(1) || 'N/A'}%\n\n`;
+ body += `- Trend: ${comp.trend}\n\n`;
+ });
+ }
+
+ await github.rest.issues.createComment({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ body
+ });
+
permissions:
contents: read
id-token: write
+ pull-requests: write
diff --git a/scripts/analyze-ui-test-times.js b/scripts/analyze-ui-test-times.js
new file mode 100644
index 00000000000..86643859b4d
--- /dev/null
+++ b/scripts/analyze-ui-test-times.js
@@ -0,0 +1,249 @@
+#!/usr/bin/env node
+/**
+ * Copyright (c) HashiCorp, Inc.
+ * SPDX-License-Identifier: BUSL-1.1
+ */
+
+'use strict';
+const fs = require('fs');
+const path = require('path');
+
+async function analyzeTestTimes() {
+ const currentResults = JSON.parse(
+ fs.readFileSync('combined-test-results.json')
+ );
+
+ // Create a map of test names to their durations
+ const currentTestTimes = new Map();
+ currentResults.tests.forEach(test => {
+ currentTestTimes.set(test.name, test.duration);
+ });
+
+// Load and process historical results
+console.log('[analyze-test-times] Processing historical results...\n');
+const historicalAverages = new Map();
+const historicalCounts = new Map();
+const variablesTimings = new Set();
+const jobACLDisabledTimings = new Set();
+
+// Read each historical result file
+console.log('[analyze-test-times] Reading historical results directory...\n');
+const historicalDir = 'historical-results';
+const historicalFiles = fs.readdirSync(historicalDir)
+ .filter(file => file.endsWith('.json'));
+
+console.log(`[analyze-test-times] Found ${historicalFiles.length} JSON files`);
+
+ // Debug: Show content of first file
+ if (historicalFiles.length > 0) {
+ const firstFile = fs.readFileSync(path.join(historicalDir, historicalFiles[0]), 'utf8');
+ console.log('\n[analyze-test-times] First file content sample:');
+ console.log(firstFile.substring(0, 500) + '...');
+ console.log('\n[analyze-test-times] First file parsed:');
+ const parsed = JSON.parse(firstFile);
+ console.log(JSON.stringify(parsed, null, 2).substring(0, 500) + '...');
+ }
+
+historicalFiles.forEach((file, index) => {
+ console.log(`[analyze-test-times] Reading ${file} (${index + 1} of ${historicalFiles.length})...`);
+ try {
+ const historical = JSON.parse(fs.readFileSync(path.join(historicalDir, file), 'utf8'));
+
+ // Debug output
+ console.log(`[analyze-test-times] File ${file}:`);
+ console.log(` - Has summary: ${!!historical.summary}`);
+ if (historical.summary) {
+ console.log(` - Failed tests: ${historical.summary.failed}`);
+ console.log(` - Total tests: ${historical.summary.total}`);
+ }
+ console.log(` - Has tests array: ${!!historical.tests}`);
+ if (historical.tests) {
+ console.log(` - Number of tests: ${historical.tests.length}`);
+ }
+
+ if (historical.summary && historical.summary.failed === 0) {
+ historical.tests.forEach(test => {
+ const current = historicalAverages.get(test.name) || 0;
+ const count = historicalCounts.get(test.name) || 0;
+ historicalAverages.set(test.name, current + test.duration);
+ historicalCounts.set(test.name, count + 1);
+ // Log out all timings for "Acceptance | variables > Job Variables Page: If the user has variable read access, but no variables, the subnav exists but contains only a message"
+ if (test.name === "Acceptance | variables > Job Variables Page: If the user has variable read access, but no variables, the subnav exists but contains only a message") {
+ console.log(`[analyze-test-times] Timings for ${test.name}: ${test.duration}`);
+ variablesTimings.add(test.duration);
+ }
+ if (test.name === "Unit | Ability | job: it permits job run when ACLs are disabled") {
+ console.log(`[analyze-test-times] Timings for ${test.name}: ${test.duration}`);
+ jobACLDisabledTimings.add(test.duration);
+ }
+ });
+ } else {
+ console.log(`[analyze-test-times] Skipping ${file} because it has failed tests or invalid format`);
+ }
+ } catch (error) {
+ console.log(`[analyze-test-times] Error processing ${file}:`, error.message);
+ }
+});
+
+// Debug output after processing
+console.log('\n[analyze-test-times] Processing complete');
+console.log(`Total unique tests found: ${historicalAverages.size}`);
+if (historicalAverages.size > 0) {
+ console.log('Sample of processed tests:');
+ let i = 0;
+ for (const [name, total] of historicalAverages) {
+ if (i++ >= 3) break;
+ const count = historicalCounts.get(name);
+ console.log(`- ${name}: ${total}ms total, ${count} samples`);
+ }
+}
+// Log out variablesTimings
+console.log(`[analyze-test-times] Variables timings: ${Array.from(variablesTimings).join(', ')}`);
+console.log(`[analyze-test-times] Job ACL disabled timings: ${Array.from(jobACLDisabledTimings).join(', ')}`);
+
+// After processing all files, show statistics
+console.log('\n[analyze-test-times] Sample count analysis:');
+console.log(`Total unique tests found: ${historicalAverages.size}`);
+
+
+// Sort tests by sample count to see which ones might be missing data
+const testStats = Array.from(historicalCounts.entries())
+ .sort((a, b) => b[1] - a[1]); // Sort by count, descending
+
+console.log('\nSample counts per test:');
+console.log('Format: Test name (count/total files)');
+
+// Create a Map to store timings for each test
+const testTimings = new Map();
+
+// First pass: collect all timings
+historicalFiles.forEach(file => {
+ try {
+ const historical = JSON.parse(fs.readFileSync(path.join(historicalDir, file), 'utf8'));
+ if (historical.tests) {
+ historical.tests.forEach(test => {
+ if (!testTimings.has(test.name)) {
+ testTimings.set(test.name, []);
+ }
+ testTimings.get(test.name).push(test.duration);
+ });
+ }
+ } catch (error) {
+ console.log(`Error reading file ${file}:`, error.message);
+ }
+});
+
+// Second pass: display results
+testStats.forEach(([testName, count]) => {
+ const percentage = ((count / historicalFiles.length) * 100).toFixed(1);
+ const timings = testTimings.get(testName) || [];
+ const timingsStr = timings.join(', ');
+
+ if (count < historicalFiles.length) {
+ console.log(`⚠️ ${testName}: ${count}/${historicalFiles.length} (${percentage}%)`);
+ console.log(` Timings: [${timingsStr}]`);
+ } else {
+ console.log(`✓ ${testName}: ${count}/${historicalFiles.length} (${percentage}%)`);
+ console.log(` Timings: [${timingsStr}]`);
+ }
+});
+
+// Calculate averages and compare
+ const analysis = {
+ timestamp: new Date().toISOString(),
+ sha: process.env.GITHUB_SHA,
+ summary: {
+ totalTests: currentResults.tests.length,
+ significantlySlower: 0,
+ significantlyFaster: 0
+ },
+ testComparisons: []
+ };
+
+ // In the analyzeTestTimes function:
+ console.log('[analyze-test-times] Comparing current test times with historical averages...');
+ currentTestTimes.forEach((currentDuration, testName) => {
+ const totalHistorical = historicalAverages.get(testName) || 0;
+ const count = historicalCounts.get(testName);
+ const historicalAverage = totalHistorical / count;
+
+ // Skip tests with no historical data
+ // if (historicalAverage === 0) {
+ if (!count) {
+ console.log(`[analyze-test-times] Skipping ${testName} - no historical data`);
+ return;
+ }
+
+ // Consider a test significantly different if it's 25% slower/faster
+ const percentDiff = ((currentDuration - historicalAverage) / historicalAverage) * 100;
+
+ if (Math.abs(percentDiff) >= 25) {
+ console.log(`[analyze-test-times] Found significant difference in ${testName}: ${percentDiff.toFixed(1)}% change`);
+ analysis.testComparisons.push({
+ name: testName,
+ currentDuration,
+ historicalAverage,
+ percentDiff, // This will now always be a number
+ samples: count,
+ historicalTimings: testTimings.get(testName) || [],
+ trend: createSparkline(testTimings.get(testName) || [], currentDuration)
+ });
+
+ if (percentDiff > 0) {
+ analysis.summary.significantlySlower++;
+ } else {
+ analysis.summary.significantlyFaster++;
+ }
+ }
+ });
+
+ // Sort by most significant differences first
+ analysis.testComparisons.sort((a, b) => Math.abs(b.percentDiff) - Math.abs(a.percentDiff));
+
+ // Write analysis results
+ fs.writeFileSync(
+ 'test-time-analysis.json',
+ JSON.stringify(analysis, null, 2)
+ );
+
+ // Output summary to console for GitHub Actions
+ console.log('\nTest Time Analysis Summary:');
+ console.log(`Total Tests: ${analysis.summary.totalTests}`);
+ console.log(`Significantly Slower: ${analysis.summary.significantlySlower}`);
+ console.log(`Significantly Faster: ${analysis.summary.significantlyFaster}`);
+
+ if (analysis.testComparisons.length > 0) {
+ console.log('\nMost Significant Changes:');
+ analysis.testComparisons.slice(0, 5).forEach(comp => {
+ console.log(`${comp.name}:`);
+ console.log(` Current: ${comp.currentDuration}ms`);
+ console.log(` Historical Avg: ${comp.historicalAverage}ms`);
+ console.log(` Change: ${comp.percentDiff.toFixed(1)}%`);
+ });
+ }
+}
+
+function createSparkline(timings, currentValue) {
+ const blocks = ['▁', '▂', '▃', '▄', '▅', '▆', '▇', '█'];
+ const min = Math.min(...timings);
+ const max = Math.max(...timings);
+ const range = max - min;
+
+ const sparkline = timings.map(value => {
+ const normalized = range === 0 ? 0 : (value - min) / range;
+ const blockIndex = Math.floor(normalized * (blocks.length - 1));
+ return blocks[blockIndex];
+ }).join('');
+
+ const avgHistorical = timings.reduce((a, b) => a + b, 0) / timings.length;
+ const trend = currentValue > avgHistorical ? '↑' : currentValue < avgHistorical ? '↓' : '→';
+ const trendColor = currentValue > avgHistorical ? '🔴' : currentValue < avgHistorical ? '🟢' : '⚪';
+
+ return `${sparkline} ${trend} ${trendColor}`;
+}
+
+if (require.main === module) {
+ analyzeTestTimes();
+}
+
+module.exports = analyzeTestTimes;
diff --git a/ui/app/index.html b/ui/app/index.html
index df5eb739390..9d49fe8a579 100644
--- a/ui/app/index.html
+++ b/ui/app/index.html
@@ -24,7 +24,6 @@
{{content-for "body"}}
-
{{content-for "body-footer"}}