|
| 1 | +import { ActionsUnion } from "./types" |
| 2 | +import { ActivityStatuses } from "../constants" |
| 3 | +import { calcElapsedTime } from "../../util/calc-elapsed-time" |
| 4 | +import { isActivityInProgress } from "./utils" |
| 5 | +import type { Reporter } from "../reporter" |
| 6 | +import type { GatsbyCLIStore } from "./" |
| 7 | + |
| 8 | +function calculateTimeoutDelay( |
| 9 | + envVarValue: string | undefined, |
| 10 | + defaultValue: number, |
| 11 | + min: number |
| 12 | +): number { |
| 13 | + if (!envVarValue) { |
| 14 | + return 0 |
| 15 | + } else if (envVarValue === `1`) { |
| 16 | + // Toggling env vars with "1" is quite common - because we allow to set |
| 17 | + // specific timeout values with env var, this special case is added |
| 18 | + // (1ms timeout makes little sense - that's too low value to be used as-is) |
| 19 | + return defaultValue |
| 20 | + } |
| 21 | + |
| 22 | + const parsedToNumber = parseInt(envVarValue, 10) |
| 23 | + if (isNaN(parsedToNumber)) { |
| 24 | + // It's truthy, but not a number - let's enable it with default value |
| 25 | + return defaultValue |
| 26 | + } |
| 27 | + |
| 28 | + // Allow to custom specific timeout value, but also put some minimal |
| 29 | + // timeout bound as there is little usefulness of setting it to |
| 30 | + // something less than few seconds. |
| 31 | + return Math.max(parsedToNumber, min) |
| 32 | +} |
| 33 | + |
| 34 | +type DiagnosticsMiddleware = (action: ActionsUnion) => void |
| 35 | + |
| 36 | +const FIVE_MINUTES = 1000 * 60 * 5 |
| 37 | +const FIVE_SECONDS = 1000 * 5 |
| 38 | +const TEN_MINUTES = 1000 * 60 * 10 |
| 39 | +const TEN_SECONDS = 1000 * 10 |
| 40 | + |
| 41 | +export function createStructuredLoggingDiagnosticsMiddleware( |
| 42 | + store: GatsbyCLIStore |
| 43 | +): DiagnosticsMiddleware { |
| 44 | + const stuckStatusDiagnosticTimeoutDelay = calculateTimeoutDelay( |
| 45 | + process.env.GATSBY_DIAGNOSTIC_STUCK_STATUS_TIMEOUT, |
| 46 | + FIVE_MINUTES, // default timeout |
| 47 | + FIVE_SECONDS // minimal timeout (this is mostly useful for debugging diagnostic code itself) |
| 48 | + ) |
| 49 | + |
| 50 | + const stuckStatusWatchdogTimeoutDelay = calculateTimeoutDelay( |
| 51 | + process.env.GATSBY_WATCHDOG_STUCK_STATUS_TIMEOUT, |
| 52 | + TEN_MINUTES, // default timeout |
| 53 | + TEN_SECONDS // minimal timeout (this is mostly useful for debugging diagnostic code itself) |
| 54 | + ) |
| 55 | + |
| 56 | + if (!stuckStatusDiagnosticTimeoutDelay && !stuckStatusWatchdogTimeoutDelay) { |
| 57 | + // none of timers are enabled, so this is no-op middleware |
| 58 | + return (): void => {} |
| 59 | + } |
| 60 | + |
| 61 | + let displayedStuckStatusDiagnosticWarning = false |
| 62 | + let displayingStuckStatusDiagnosticWarning = false |
| 63 | + let stuckStatusDiagnosticTimer: NodeJS.Timeout | null = null |
| 64 | + let stuckStatusWatchdogTimer: NodeJS.Timeout | null = null |
| 65 | + let reporter: Reporter |
| 66 | + |
| 67 | + function generateStuckStatusDiagnosticMessage(): string { |
| 68 | + const { activities } = store.getState().logs |
| 69 | + |
| 70 | + return JSON.stringify( |
| 71 | + Object.values(activities) |
| 72 | + .filter(activity => isActivityInProgress(activity.status)) |
| 73 | + .map(activity => { |
| 74 | + if (!activity.startTime) { |
| 75 | + return activity |
| 76 | + } |
| 77 | + |
| 78 | + return { |
| 79 | + ...activity, |
| 80 | + diagnostics_elapsed_seconds: calcElapsedTime(activity.startTime), |
| 81 | + } |
| 82 | + }), |
| 83 | + null, |
| 84 | + 2 |
| 85 | + ) |
| 86 | + } |
| 87 | + |
| 88 | + return function diagnosticMiddleware(action: ActionsUnion): void { |
| 89 | + // ignore diagnostic logs, otherwise diagnostic message itself will reset |
| 90 | + // the timers |
| 91 | + if (!displayingStuckStatusDiagnosticWarning) { |
| 92 | + const currentStatus = store.getState().logs.status |
| 93 | + |
| 94 | + if (!reporter) { |
| 95 | + // yuck, we have situation of circular dependencies here |
| 96 | + // so this `reporter` import is delayed until it's actually needed |
| 97 | + reporter = require(`../reporter`).reporter |
| 98 | + } |
| 99 | + |
| 100 | + if (stuckStatusDiagnosticTimeoutDelay) { |
| 101 | + if (stuckStatusDiagnosticTimer) { |
| 102 | + clearTimeout(stuckStatusDiagnosticTimer) |
| 103 | + stuckStatusDiagnosticTimer = null |
| 104 | + } |
| 105 | + |
| 106 | + if (displayedStuckStatusDiagnosticWarning) { |
| 107 | + // using nextTick here to prevent infinite recursion (report.warn would |
| 108 | + // result in another call of this function and so on) |
| 109 | + process.nextTick(() => { |
| 110 | + const activitiesDiagnosticsMessage = generateStuckStatusDiagnosticMessage() |
| 111 | + reporter.warn( |
| 112 | + `This is just diagnostic information (enabled by GATSBY_DIAGNOSTIC_STUCK_STATUS_TIMEOUT):\n\nThere was activity since last diagnostic message. Log action:\n\n${JSON.stringify( |
| 113 | + action, |
| 114 | + null, |
| 115 | + 2 |
| 116 | + )}\n\nCurrently Gatsby is in: "${ |
| 117 | + store.getState().logs.status |
| 118 | + }" state.${ |
| 119 | + activitiesDiagnosticsMessage.length > 0 |
| 120 | + ? `\n\nActivities preventing Gatsby from transitioning to idle state:\n\n${activitiesDiagnosticsMessage}` |
| 121 | + : `` |
| 122 | + }` |
| 123 | + ) |
| 124 | + }) |
| 125 | + displayedStuckStatusDiagnosticWarning = false |
| 126 | + } |
| 127 | + |
| 128 | + if (currentStatus === ActivityStatuses.InProgress) { |
| 129 | + stuckStatusDiagnosticTimer = setTimeout( |
| 130 | + function logStuckStatusDiagnostic() { |
| 131 | + displayingStuckStatusDiagnosticWarning = true |
| 132 | + reporter.warn( |
| 133 | + `This is just diagnostic information (enabled by GATSBY_DIAGNOSTIC_STUCK_STATUS_TIMEOUT):\n\nGatsby is in "${ |
| 134 | + store.getState().logs.status |
| 135 | + }" state without any updates for ${( |
| 136 | + stuckStatusDiagnosticTimeoutDelay / 1000 |
| 137 | + ).toFixed( |
| 138 | + 3 |
| 139 | + )} seconds. Activities preventing Gatsby from transitioning to idle state:\n\n${generateStuckStatusDiagnosticMessage()}${ |
| 140 | + stuckStatusWatchdogTimeoutDelay |
| 141 | + ? `\n\nProcess will be terminated in ${( |
| 142 | + (stuckStatusWatchdogTimeoutDelay - |
| 143 | + stuckStatusDiagnosticTimeoutDelay) / |
| 144 | + 1000 |
| 145 | + ).toFixed(3)} seconds if nothing will change.` |
| 146 | + : `` |
| 147 | + }` |
| 148 | + ) |
| 149 | + displayingStuckStatusDiagnosticWarning = false |
| 150 | + displayedStuckStatusDiagnosticWarning = true |
| 151 | + }, |
| 152 | + stuckStatusDiagnosticTimeoutDelay |
| 153 | + ) |
| 154 | + } |
| 155 | + } |
| 156 | + |
| 157 | + if (stuckStatusWatchdogTimeoutDelay) { |
| 158 | + if (stuckStatusWatchdogTimer) { |
| 159 | + clearTimeout(stuckStatusWatchdogTimer) |
| 160 | + stuckStatusWatchdogTimer = null |
| 161 | + } |
| 162 | + |
| 163 | + if (currentStatus === ActivityStatuses.InProgress) { |
| 164 | + stuckStatusWatchdogTimer = setTimeout( |
| 165 | + function fatalStuckStatusHandler() { |
| 166 | + reporter.panic( |
| 167 | + `Terminating the process (due to GATSBY_WATCHDOG_STUCK_STATUS_TIMEOUT):\n\nGatsby is in "${ |
| 168 | + store.getState().logs.status |
| 169 | + }" state without any updates for ${( |
| 170 | + stuckStatusWatchdogTimeoutDelay / 1000 |
| 171 | + ).toFixed( |
| 172 | + 3 |
| 173 | + )} seconds. Activities preventing Gatsby from transitioning to idle state:\n\n${generateStuckStatusDiagnosticMessage()}` |
| 174 | + ) |
| 175 | + }, |
| 176 | + stuckStatusWatchdogTimeoutDelay |
| 177 | + ) |
| 178 | + } |
| 179 | + } |
| 180 | + } |
| 181 | + } |
| 182 | +} |
0 commit comments