diff --git a/scripts/analyze-ui-test-times.js b/scripts/analyze-ui-test-times.js index 42b0364b0ce..ffbf9f91bbb 100644 --- a/scripts/analyze-ui-test-times.js +++ b/scripts/analyze-ui-test-times.js @@ -23,6 +23,8 @@ async function analyzeTestTimes() { console.log('[analyze-test-times] Processing historical results...\n'); const historicalAverages = new Map(); const historicalCounts = new Map(); +const variablesTimings = new Set(); +const jobACLDisabledTimings = new Set(); // Read each historical result file console.log('[analyze-test-times] Reading historical results directory...\n'); @@ -65,6 +67,15 @@ historicalFiles.forEach((file, index) => { const count = historicalCounts.get(test.name) || 0; historicalAverages.set(test.name, current + test.duration); historicalCounts.set(test.name, count + 1); + // Log out all timings for "Acceptance | variables > Job Variables Page: If the user has variable read access, but no variables, the subnav exists but contains only a message" + if (test.name === "Acceptance | variables > Job Variables Page: If the user has variable read access, but no variables, the subnav exists but contains only a message") { + console.log(`[analyze-test-times] Timings for ${test.name}: ${test.duration}`); + variablesTimings.add(test.duration); + } + if (test.name === "Unit | Ability | job: it permits job run when ACLs are disabled") { + console.log(`[analyze-test-times] Timings for ${test.name}: ${test.duration}`); + jobACLDisabledTimings.add(test.duration); + } }); } else { console.log(`[analyze-test-times] Skipping ${file} because it has failed tests or invalid format`); @@ -86,7 +97,45 @@ if (historicalAverages.size > 0) { console.log(`- ${name}: ${total}ms total, ${count} samples`); } } - // Calculate averages and compare +// Log out variablesTimings +console.log(`[analyze-test-times] Variables timings: ${Array.from(variablesTimings).join(', ')}`); +console.log(`[analyze-test-times] Job ACL disabled timings: ${Array.from(jobACLDisabledTimings).join(', ')}`); + +// After processing all files, show statistics +console.log('\n[analyze-test-times] Sample count analysis:'); +console.log(`Total unique tests found: ${historicalAverages.size}`); + + +// Sort tests by sample count to see which ones might be missing data +const testStats = Array.from(historicalCounts.entries()) + .sort((a, b) => b[1] - a[1]); // Sort by count, descending + +console.log('\nSample counts per test:'); +console.log('Format: Test name (count/total files)'); +testStats.forEach(([testName, count]) => { + const percentage = ((count / historicalFiles.length) * 100).toFixed(1); + if (count < historicalFiles.length) { + console.log(`⚠️ ${testName}: ${count}/${historicalFiles.length} (${percentage}%)`); + } else { + console.log(`✓ ${testName}: ${count}/${historicalFiles.length} (${percentage}%)`); + } +}); + +// Show tests with lowest coverage +console.log('\nTests with fewer than expected samples:'); +const lowCoverageCutoff = historicalFiles.length * 0.9; // 90% of expected samples +const lowCoverageTests = testStats.filter(([_, count]) => count < lowCoverageCutoff); +if (lowCoverageTests.length > 0) { + console.log(`Found ${lowCoverageTests.length} tests with < 90% coverage`); + lowCoverageTests.forEach(([testName, count]) => { + const percentage = ((count / historicalFiles.length) * 100).toFixed(1); + console.log(`- ${testName}: ${count}/${historicalFiles.length} (${percentage}%)`); + }); +} else { + console.log('All tests have good sample coverage!'); +} + +// Calculate averages and compare const analysis = { timestamp: new Date().toISOString(), sha: process.env.GITHUB_SHA,