Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: adding emergency garbage collection at 90% for chromium-based browsers #25521

Merged
merged 3 commits into from
Jan 19, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions .circleci/workflows.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1357,7 +1357,7 @@ jobs:
path: /tmp/cypress
- store-npm-logs

memory-driver-tests:
driver-integration-memory-tests:
<<: *defaults
parameters:
<<: *defaultsParameters
Expand Down Expand Up @@ -2456,7 +2456,7 @@ linux-x64-workflow: &linux-x64-workflow
context: test-runner:cypress-record-key
requires:
- build
- memory-driver-tests:
- driver-integration-memory-tests:
requires:
- build
- run-frontend-shared-component-tests-chrome:
Expand Down Expand Up @@ -2700,8 +2700,8 @@ linux-arm64-workflow: &linux-arm64-workflow
resource_class: arm.medium
requires:
- linux-arm64-build
- memory-driver-tests:
name: linux-arm64-memory-driver-tests
- driver-integration-memory-tests:
name: linux-arm64-driver-integration-memory-tests
executor: linux-arm64
resource_class: arm.medium
requires:
Expand Down Expand Up @@ -2746,8 +2746,8 @@ darwin-x64-workflow: &darwin-x64-workflow
resource_class: macos.x86.medium.gen2
requires:
- darwin-x64-build
- memory-driver-tests:
name: darwin-x64-memory-driver-tests
- driver-integration-memory-tests:
name: darwin-x64-driver-integration-memory-tests
executor: mac
resource_class: macos.x86.medium.gen2
requires:
Expand Down Expand Up @@ -2785,8 +2785,8 @@ darwin-arm64-workflow: &darwin-arm64-workflow
resource_class: cypress-io/latest_m1
requires:
- darwin-arm64-build
- memory-driver-tests:
name: darwin-arm64-memory-driver-tests
- driver-integration-memory-tests:
name: darwin-arm64-driver-integration-memory-tests
executor: darwin-arm64
resource_class: cypress-io/latest_m1
requires:
Expand Down Expand Up @@ -2853,8 +2853,8 @@ windows-workflow: &windows-workflow
resource_class: windows.large
requires:
- windows-build
- memory-driver-tests:
name: windows-memory-driver-tests
- driver-integration-memory-tests:
name: windows-driver-integration-memory-tests
executor: windows
resource_class: windows.large
requires:
Expand Down
7 changes: 7 additions & 0 deletions packages/driver/cypress/e2e/memory/memory_large_test.cy.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
describe('memory spec', { browser: { family: 'chromium' } }, () => {
it('passes when loading page a 100 times', () => {
for (let index = 0; index < 100; index++) {
cy.visit('http://localhost:3500/memory')
}
})
})
41 changes: 29 additions & 12 deletions packages/server/lib/browsers/memory/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ const debug = debugModule('cypress:server:browsers:memory')
const debugVerbose = debugModule('cypress-verbose:server:browsers:memory')

const MEMORY_THRESHOLD_PERCENTAGE = Number(process.env.CYPRESS_INTERNAL_MEMORY_THRESHOLD_PERCENTAGE) || 50
const EMERGENCY_MEMORY_THRESHOLD_PERCENTAGE = Number(process.env.CYPRESS_INTERNAL_EMERGENCY_MEMORY_THRESHOLD_PERCENTAGE) || 90
const MEMORY_PROFILER_INTERVAL = Number(process.env.CYPRESS_INTERNAL_MEMORY_PROFILER_INTERVAL) || 1000
const MEMORY_FOLDER = process.env.CYPRESS_INTERNAL_MEMORY_FOLDER_PATH || path.join('cypress', 'logs', 'memory')
const SAVE_MEMORY_STATS = ['1', 'true'].includes(process.env.CYPRESS_INTERNAL_MEMORY_SAVE_STATS?.toLowerCase() as string)
Expand Down Expand Up @@ -44,17 +45,23 @@ export type MemoryHandler = {
/**
* Algorithm:
*
* When the test runs starts:
* When the spec run starts:
* 1. set total mem limit for the container/host by reading off cgroup memory limits (if available) otherwise use os.totalmem()
* 2. set js heap size limit by reading off the browser
* 3. turn on memory profiler
*
* On a defined interval (e.g. 1s):
* 1. set current mem available for the container/host by reading off cgroup memory usage (if available) otherwise use si.mem().available
* 2. set current renderer mem usage
* 3. set max avail render mem to minimum of v8 heap size limit and total available mem (current available mem + current renderer mem usage)
* 4. calc % of memory used, current renderer mem usage / max avail render mem
* 5. if % of memory used exceeds the emergency memory threshold percentage (e.g. 90%) do a GC
*
* Before each test:
* 1. if that exceeds the defined memory threshold percentage (e.g. 50%) do a GC
* 1. if any interval exceeded the defined memory threshold (e.g. 50%), do a GC
*
* After the spec run ends:
* 1. turn off memory profiler
*/

/**
Expand Down Expand Up @@ -203,7 +210,7 @@ export const getAvailableMemory: () => Promise<number> = measure(() => {
/**
* Calculates the memory stats used to determine if garbage collection should be run before the next test starts.
*/
export const calculateMemoryStats: () => Promise<void> = measure(async () => {
export const calculateMemoryStats: (automation: Automation) => Promise<void> = measure(async (automation: Automation) => {
// retrieve the available memory and the renderer process memory usage
const [currentAvailableMemory, rendererProcessMemRss] = await Promise.all([
getAvailableMemory(),
Expand All @@ -221,12 +228,20 @@ export const calculateMemoryStats: () => Promise<void> = measure(async () => {
const maxAvailableRendererMemory = Math.min(jsHeapSizeLimit, currentAvailableMemory + rendererProcessMemRss)

const rendererUsagePercentage = (rendererProcessMemRss / maxAvailableRendererMemory) * 100
// if we're using more than MEMORY_THRESHOLD_PERCENTAGE of the available memory,
// if the renderer's memory is above the MEMORY_THRESHOLD_PERCENTAGE, we should collect garbage on the next test
const shouldCollectGarbage = rendererUsagePercentage >= MEMORY_THRESHOLD_PERCENTAGE && !SKIP_GC

// if we should collect garbage, set the flag to true so we can collect garbage on the next test
collectGarbageOnNextTest = collectGarbageOnNextTest || shouldCollectGarbage

// if the renderer's memory is above the EMERGENCY_MEMORY_THRESHOLD_PERCENTAGE, we should perform an emergency garbage collection now
const shouldEmergencyCollectGarbage = rendererUsagePercentage >= EMERGENCY_MEMORY_THRESHOLD_PERCENTAGE && !SKIP_GC

if (shouldEmergencyCollectGarbage) {
debug('emergency garbage collection triggered')
await checkMemoryPressure(automation, shouldEmergencyCollectGarbage)
mschile marked this conversation as resolved.
Show resolved Hide resolved
}

// set all the memory stats on the stats log
statsLog.jsHeapSizeLimit = jsHeapSizeLimit
statsLog.totalMemoryLimit = totalMemoryLimit
Expand All @@ -236,6 +251,8 @@ export const calculateMemoryStats: () => Promise<void> = measure(async () => {
statsLog.currentAvailableMemory = currentAvailableMemory
statsLog.maxAvailableRendererMemory = maxAvailableRendererMemory
statsLog.shouldCollectGarbage = shouldCollectGarbage
statsLog.emergencyGarbageCollected = shouldEmergencyCollectGarbage
statsLog.emergencyRendererMemoryThreshold = maxAvailableRendererMemory * (EMERGENCY_MEMORY_THRESHOLD_PERCENTAGE / 100)
statsLog.timestamp = Date.now()
}, { name: 'calculateMemoryStats', save: true })

Expand Down Expand Up @@ -264,8 +281,8 @@ const checkMemoryPressureAndLog = async ({ automation, test }: { automation: Aut
* Collects the browser's garbage if it previously exceeded the threshold when it was measured.
* @param automation the automation client used to collect garbage
*/
const checkMemoryPressure: (automation: Automation) => Promise<void> = measure(async (automation: Automation) => {
if (collectGarbageOnNextTest) {
const checkMemoryPressure: (automation: Automation, emergencyCollectGarbage?: boolean) => Promise<void> = measure(async (automation: Automation, emergencyCollectGarbage: boolean = false) => {
if (collectGarbageOnNextTest || emergencyCollectGarbage) {
debug('forcing garbage collection')
try {
await automation.request('collect:garbage', null, null)
Expand All @@ -292,24 +309,24 @@ const addCumulativeStats = (stats: { [key: string]: any }) => {
/**
* Gathers the memory stats and schedules the next check.
*/
const gatherMemoryStats = async () => {
const gatherMemoryStats = async (automation: Automation) => {
try {
await calculateMemoryStats()
await calculateMemoryStats(automation)
addCumulativeStats(statsLog)
statsLog = {}
} catch (err) {
debug('error gathering memory stats: %o', err)
}
scheduleMemoryCheck()
scheduleMemoryCheck(automation)
}

/**
* Schedules the next gathering of memory stats based on the MEMORY_PROFILER_INTERVAL.
*/
const scheduleMemoryCheck = () => {
const scheduleMemoryCheck = (automation: Automation) => {
if (started) {
// not setinterval, since gatherMemoryStats is asynchronous
timer = setTimeout(gatherMemoryStats, MEMORY_PROFILER_INTERVAL)
timer = setTimeout(() => gatherMemoryStats(automation), MEMORY_PROFILER_INTERVAL)
}
}

Expand Down Expand Up @@ -348,7 +365,7 @@ async function startProfiling (automation: Automation, spec: { fileName: string
totalMemoryLimit = await handler.getTotalMemoryLimit(),
])

await gatherMemoryStats()
await gatherMemoryStats(automation)
} catch (err) {
debug('error starting memory profiler: %o', err)
}
Expand Down
Loading