Skip to content

Commit 2d56413

Browse files
authored
Merge pull request #11674 from getsentry/develop
Gitflow: Sync develop into master
2 parents 6c8aeff + 3e614ba commit 2d56413

File tree

37 files changed

+629
-40
lines changed

37 files changed

+629
-40
lines changed

.github/ISSUE_TEMPLATE/flaky.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ body:
1818
id: job-name
1919
attributes:
2020
label: Name of Job
21-
placeholder: Build & Test / Nextjs (Node 14) Tests
21+
placeholder: "CI: Build & Test / Nextjs (Node 14) Tests"
2222
description: name of job as reported in the status report
2323
validations:
2424
required: true

.github/workflows/auto-release.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: Gitflow - Auto prepare release
1+
name: "Gitflow: Auto prepare release"
22
on:
33
pull_request:
44
types:
@@ -25,7 +25,7 @@ jobs:
2525
# Parse version from head branch
2626
text: ${{ github.head_ref }}
2727
# match: preprare-release/xx.xx.xx
28-
regex: '^prepare-release\/(\d+\.\d+\.\d+)$'
28+
regex: '^prepare-release\/(\d+\.\d+\.\d+)(?:-(alpha|beta)\.\d+)?$'
2929

3030
- name: Prepare release
3131
uses: getsentry/action-prepare-release@v1

.github/workflows/build.yml

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,13 @@
1-
name: 'Build & Test'
1+
name: 'CI: Build & Test'
22
on:
33
push:
44
branches:
55
- develop
66
- master
77
- release/**
88
pull_request:
9+
merge_group:
10+
types: [checks_requested]
911
workflow_dispatch:
1012
inputs:
1113
commit:
@@ -1051,6 +1053,7 @@ jobs:
10511053
'node-nestjs-app',
10521054
'node-exports-test-app',
10531055
'node-koa-app',
1056+
'node-connect-app',
10541057
'vue-3',
10551058
'webpack-4',
10561059
'webpack-5'

.github/workflows/canary.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: 'Canary Tests'
1+
name: 'CI: Canary Tests'
22
on:
33
schedule:
44
# Run every day at midnight

.github/workflows/clear-cache.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: Clear all GHA caches
1+
name: "Action: Clear all GHA caches"
22
on:
33
workflow_dispatch:
44

.github/workflows/codeql-analysis.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
# the `language` matrix defined below to confirm you have the correct set of
1010
# supported CodeQL languages.
1111
#
12-
name: 'CodeQL'
12+
name: 'CI: CodeQL'
1313

1414
on:
1515
push:

.github/workflows/enforce-license-compliance.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: Enforce License Compliance
1+
name: "CI: Enforce License Compliance"
22

33
on:
44
push:

.github/workflows/flaky-test-detector.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: 'Detect flaky tests'
1+
name: 'CI: Detect flaky tests'
22
on:
33
workflow_dispatch:
44
pull_request:

.github/workflows/gitflow-sync-develop.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: Gitflow - Sync master into develop
1+
name: "Gitflow: Sync master into develop"
22
on:
33
push:
44
branches:

.github/workflows/issue-package-label.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: 'Tag issue with package label'
1+
name: 'Automation: Tag issue with package label'
22

33
on:
44
issues:

.github/workflows/release-size-info.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: Add size info to release
1+
name: "Automation: Add size info to release"
22
on:
33
release:
44
types:
@@ -27,4 +27,4 @@ jobs:
2727
with:
2828
github_token: ${{ secrets.GITHUB_TOKEN }}
2929
version: ${{ steps.get_version.outputs.version }}
30-
workflow_name: 'Build & Test'
30+
workflow_name: 'CI: Build & Test'

.github/workflows/release.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
name: Prepare Release
1+
name: "Action: Prepare Release"
22
on:
33
workflow_dispatch:
44
inputs:

README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ _Bad software is everywhere, and we're tired of it. Sentry is on a mission to he
88
faster, so we can get back to enjoying technology. If you want to join us
99
[<kbd>**Check out our open positions**</kbd>](https://sentry.io/careers/)_
1010

11-
![Build & Test](https://github.com/getsentry/sentry-javascript/workflows/Build%20&%20Test/badge.svg)
12-
[![codecov](https://codecov.io/gh/getsentry/sentry-javascript/branch/master/graph/badge.svg)](https://codecov.io/gh/getsentry/sentry-javascript)
11+
![Build & Test](https://github.com/getsentry/sentry-javascript/workflows/CI:%20Build%20&%20Test/badge.svg)
12+
[![codecov](https://codecov.io/gh/getsentry/sentry-javascript/branch/develop/graph/badge.svg)](https://codecov.io/gh/getsentry/sentry-javascript)
1313
[![npm version](https://img.shields.io/npm/v/@sentry/core.svg)](https://www.npmjs.com/package/@sentry/core)
1414
[![Discord](https://img.shields.io/discord/621778831602221064)](https://discord.gg/Ww9hbqr)
1515

dev-packages/browser-integration-tests/scripts/detectFlakyTests.ts

+71-18
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,30 @@
11
import * as childProcess from 'child_process';
2+
import * as fs from 'fs';
23
import * as path from 'path';
34
import * as glob from 'glob';
45

6+
/**
7+
* The number of browsers we run the tests in.
8+
*/
9+
const NUM_BROWSERS = 3;
10+
11+
/**
12+
* Assume that each test runs for 3s.
13+
*/
14+
const ASSUMED_TEST_DURATION_SECONDS = 3;
15+
16+
/**
17+
* We keep the runtime of the detector if possible under 30min.
18+
*/
19+
const MAX_TARGET_TEST_RUNTIME_SECONDS = 30 * 60;
20+
21+
/**
22+
* Running one test 50x is what we consider enough to detect flakiness.
23+
* Running one test 5x is the bare minimum
24+
*/
25+
const MAX_PER_TEST_RUN_COUNT = 50;
26+
const MIN_PER_TEST_RUN_COUNT = 5;
27+
528
async function run(): Promise<void> {
629
let testPaths: string[] = [];
730

@@ -20,23 +43,8 @@ ${changedPaths.join('\n')}
2043
}
2144
}
2245

23-
let runCount: number;
24-
if (process.env.TEST_RUN_COUNT === 'AUTO') {
25-
// No test paths detected: run everything 5x
26-
runCount = 5;
27-
28-
if (testPaths.length > 0) {
29-
// Run everything up to 100x, assuming that total runtime is less than 60min.
30-
// We assume an average runtime of 3s per test, times 4 (for different browsers) = 12s per detected testPaths
31-
// We want to keep overall runtime under 30min
32-
const testCount = testPaths.length * 4;
33-
const expectedRuntimePerTestPath = testCount * 3;
34-
const expectedRuntime = Math.floor((30 * 60) / expectedRuntimePerTestPath);
35-
runCount = Math.min(50, Math.max(expectedRuntime, 5));
36-
}
37-
} else {
38-
runCount = parseInt(process.env.TEST_RUN_COUNT || '10');
39-
}
46+
const repeatEachCount = getPerTestRunCount(testPaths);
47+
console.log(`Running tests ${repeatEachCount} times each.`);
4048

4149
const cwd = path.join(__dirname, '../');
4250

@@ -45,7 +53,7 @@ ${changedPaths.join('\n')}
4553
const cp = childProcess.spawn(
4654
`npx playwright test ${
4755
testPaths.length ? testPaths.join(' ') : './suites'
48-
} --reporter='line' --repeat-each ${runCount}`,
56+
} --reporter='line' --repeat-each ${repeatEachCount}`,
4957
{ shell: true, cwd },
5058
);
5159

@@ -88,6 +96,33 @@ ${changedPaths.join('\n')}
8896
console.log(`☑️ All tests passed.`);
8997
}
9098

99+
/**
100+
* Returns how many time one test should run based on the chosen mode and a bunch of heuristics
101+
*/
102+
function getPerTestRunCount(testPaths: string[]) {
103+
if (process.env.TEST_RUN_COUNT === 'AUTO' && testPaths.length > 0) {
104+
// Run everything up to 100x, assuming that total runtime is less than 60min.
105+
// We assume an average runtime of 3s per test, times 4 (for different browsers) = 12s per detected testPaths
106+
// We want to keep overall runtime under 30min
107+
const estimatedNumberOfTests = testPaths.map(getApproximateNumberOfTests).reduce((a, b) => a + b);
108+
console.log(`Estimated number of tests: ${estimatedNumberOfTests}`);
109+
110+
// tests are usually run against all browsers we test with, so let's assume this
111+
const testRunCount = estimatedNumberOfTests * NUM_BROWSERS;
112+
console.log(`Estimated test runs for one round: ${testRunCount}`);
113+
114+
const estimatedTestRuntime = testRunCount * ASSUMED_TEST_DURATION_SECONDS;
115+
console.log(`Estimated test runtime: ${estimatedTestRuntime}s`);
116+
117+
const expectedPerTestRunCount = Math.floor(MAX_TARGET_TEST_RUNTIME_SECONDS / estimatedTestRuntime);
118+
console.log(`Expected per-test run count: ${expectedPerTestRunCount}`);
119+
120+
return Math.min(MAX_PER_TEST_RUN_COUNT, Math.max(expectedPerTestRunCount, MIN_PER_TEST_RUN_COUNT));
121+
}
122+
123+
return parseInt(process.env.TEST_RUN_COUNT || '5');
124+
}
125+
91126
function getTestPaths(): string[] {
92127
const paths = glob.sync('suites/**/test.{ts,js}', {
93128
cwd: path.join(__dirname, '../'),
@@ -111,4 +146,22 @@ function logError(error: unknown) {
111146
}
112147
}
113148

149+
/**
150+
* Definitely not bulletproof way of getting the number of tests in a file :D
151+
* We simply match on `it(`, `test(`, etc and count the matches.
152+
*
153+
* Note: This test completely disregards parameterized tests (`it.each`, etc) or
154+
* skipped/disabled tests and other edge cases. It's just a rough estimate.
155+
*/
156+
function getApproximateNumberOfTests(testPath: string): number {
157+
try {
158+
const content = fs.readFileSync(path.join(process.cwd(), testPath, 'test.ts'), 'utf-8');
159+
const matches = content.match(/it\(|test\(|sentryTest\(/g);
160+
return Math.max(matches ? matches.length : 1, 1);
161+
} catch (e) {
162+
console.error(`Could not read file ${testPath}`);
163+
return 1;
164+
}
165+
}
166+
114167
run();
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
@sentry:registry=http://127.0.0.1:4873
2+
@sentry-internal:registry=http://127.0.0.1:4873
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
{
2+
"name": "node-connect-app",
3+
"version": "1.0.0",
4+
"private": true,
5+
"scripts": {
6+
"start": "ts-node src/app.ts",
7+
"test": "playwright test",
8+
"clean": "npx rimraf node_modules pnpm-lock.yaml",
9+
"typecheck": "tsc",
10+
"test:build": "pnpm install && pnpm run typecheck",
11+
"test:assert": "pnpm test"
12+
},
13+
"dependencies": {
14+
"@sentry/node": "latest || *",
15+
"@sentry/types": "latest || *",
16+
"@sentry/core": "latest || *",
17+
"@sentry/utils": "latest || *",
18+
"@sentry/opentelemetry": "latest || *",
19+
"@types/node": "18.15.1",
20+
"connect": "3.7.0",
21+
"typescript": "4.9.5",
22+
"ts-node": "10.9.1"
23+
},
24+
"devDependencies": {
25+
"@sentry-internal/event-proxy-server": "link:../../../event-proxy-server",
26+
"@playwright/test": "^1.38.1"
27+
},
28+
"volta": {
29+
"extends": "../../package.json"
30+
}
31+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
import type { PlaywrightTestConfig } from '@playwright/test';
2+
import { devices } from '@playwright/test';
3+
4+
const connectPort = 3030;
5+
const eventProxyPort = 3031;
6+
7+
/**
8+
* See https://playwright.dev/docs/test-configuration.
9+
*/
10+
const config: PlaywrightTestConfig = {
11+
testDir: './tests',
12+
/* Maximum time one test can run for. */
13+
timeout: 150_000,
14+
expect: {
15+
/**
16+
* Maximum time expect() should wait for the condition to be met.
17+
* For example in `await expect(locator).toHaveText();`
18+
*/
19+
timeout: 10000,
20+
},
21+
/* Run tests in files in parallel */
22+
fullyParallel: true,
23+
/* Fail the build on CI if you accidentally left test.only in the source code. */
24+
forbidOnly: !!process.env.CI,
25+
retries: 0,
26+
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
27+
reporter: 'list',
28+
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
29+
use: {
30+
/* Maximum time each action such as `click()` can take. Defaults to 0 (no limit). */
31+
actionTimeout: 0,
32+
/* Base URL to use in actions like `await page.goto('/')`. */
33+
baseURL: `http://localhost:${connectPort}`,
34+
35+
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
36+
trace: 'on-first-retry',
37+
},
38+
39+
/* Configure projects for major browsers */
40+
projects: [
41+
{
42+
name: 'chromium',
43+
use: {
44+
...devices['Desktop Chrome'],
45+
},
46+
},
47+
],
48+
49+
/* Run your local dev server before starting the tests */
50+
webServer: [
51+
{
52+
command: 'pnpm ts-node-script start-event-proxy.ts',
53+
port: eventProxyPort,
54+
},
55+
{
56+
command: 'pnpm start',
57+
port: connectPort,
58+
},
59+
],
60+
};
61+
62+
export default config;

0 commit comments

Comments
 (0)