fix(cli): prevent false test skips for intentional error scenarios
Refined network retry logic to distinguish between transient infrastructure
failures and intentional test errors, preventing incorrect test skips in
JUnit validation scenarios.
1. Network Error Detection (utils.ts)
- Renamed `hasNetworkError` → `hasLowLevelNetworkError` for clarity
- Removed REQUEST_ERROR from retry patterns (too generic, matches intentional bad URLs)
- Now only retries on unambiguous TCP/DNS errors: ECONNRESET, EAI_AGAIN,
ENOTFOUND, ETIMEDOUT, ECONNREFUSED
- Preserved TEST_SCRIPT_ERROR detection when concurrent with REQUEST_ERROR
(the actual CI failure mode from undefined response objects)
- Added comprehensive JSDoc explaining when to use vs plain runCLI
2. JUnit XML Validation (test.spec.ts, 4 locations)
- Removed REQUEST_ERROR and TEST_SCRIPT_ERROR from XML retry patterns
- Only retry when low-level errors corrupt XML structure
- Prevents skipping tests with intentional errors in collections
(test-junit-report-export-coll.json has intentional invalid-url and
script reference errors for validation)
3. Test Corrections
- Fixed: "Fails to display console logs..." test now uses plain runCLI
(test expects errors from legacy sandbox, shouldn't use retry)
- Added: Environment version tests (v0, v1, v2) now use runCLIWithNetworkRetry
(use echo.hoppscotch.io, expect success, benefit from retry)
- Removed: Obsolete SKIP_EXTERNAL_TESTS env var check (retry logic handles this)
This commit is contained in:
parent
9f703c9b5b
commit
c6c86e8db2
2 changed files with 205 additions and 202 deletions
|
|
@ -4,7 +4,12 @@ import path from "path";
|
|||
import { afterAll, beforeAll, describe, expect, test } from "vitest";
|
||||
|
||||
import { HoppErrorCode } from "../../../types/errors";
|
||||
import { getErrorCode, getTestJsonFilePath, runCLI } from "../../utils";
|
||||
import {
|
||||
getErrorCode,
|
||||
getTestJsonFilePath,
|
||||
runCLI,
|
||||
runCLIWithNetworkRetry,
|
||||
} from "../../utils";
|
||||
|
||||
describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
||||
const VALID_TEST_ARGS = `test ${getTestJsonFilePath("passes-coll.json", "collection")}`;
|
||||
|
|
@ -84,18 +89,18 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
testFixtures.forEach(({ collVersion, fileName, reqVersion }) => {
|
||||
test(`Successfully processes a supplied collection export file where the collection is based on the "v${collVersion}" schema and the request following the "v${reqVersion}" schema`, async () => {
|
||||
const args = `test ${getTestJsonFilePath(fileName, "collection")}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Mixed versions", () => {
|
||||
test("Successfully processes children based on valid version ranges", async () => {
|
||||
const args = `test ${getTestJsonFilePath("valid-mixed-versions-coll.json", "collection")}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Errors with the code `MALFORMED_COLLECTION` if the children fall out of valid version ranges", async () => {
|
||||
|
|
@ -120,9 +125,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
test(`Successfully processes the supplied collection and environment export files where the environment is based on the "v${version}" schema`, async () => {
|
||||
const ENV_PATH = getTestJsonFilePath(fileName, "environment");
|
||||
const args = `test ${getTestJsonFilePath("sample-coll.json", "collection")} --env ${ENV_PATH}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -130,9 +135,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
test("Successfully processes a supplied collection export file of the expected format", async () => {
|
||||
const args = `test ${getTestJsonFilePath("passes-coll.json", "collection")}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Successfully inherits/overrides authorization and headers specified at the root collection at deeply nested collections", async () => {
|
||||
|
|
@ -140,9 +145,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"collection-level-auth-headers-coll.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Successfully inherits/overrides authorization and headers at each level with multiple child collections", async () => {
|
||||
|
|
@ -150,9 +155,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"multiple-child-collections-auth-headers-coll.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Persists environment variables set in the pre-request script for consumption in the test script", async () => {
|
||||
|
|
@ -160,9 +165,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"pre-req-script-env-var-persistence-coll.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("The `Content-Type` header takes priority over the value set at the request body", async () => {
|
||||
|
|
@ -170,9 +175,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"content-type-header-scenarios.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
describe("OAuth 2 Authorization type with Authorization Code Grant Type", () => {
|
||||
|
|
@ -181,9 +186,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"oauth2-auth-code-coll.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -193,9 +198,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"oauth2-auth-code-coll.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -204,9 +209,10 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"test-scripting-sandbox-modes-coll.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const { error, stdout } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
|
||||
const expectedStaticParts = [
|
||||
"https://example.com/path?foo=bar&baz=qux",
|
||||
|
|
@ -216,12 +222,12 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"Hello after 1s",
|
||||
];
|
||||
|
||||
// Assert that each stable part appears in the output
|
||||
expectedStaticParts.forEach((part) => {
|
||||
expect(stdout).toContain(part);
|
||||
expect(result.stdout).toContain(part);
|
||||
});
|
||||
|
||||
const every500msCount = (stdout.match(/Every 500ms/g) || []).length;
|
||||
const every500msCount = (result.stdout.match(/Every 500ms/g) || [])
|
||||
.length;
|
||||
expect(every500msCount).toBeGreaterThanOrEqual(3);
|
||||
});
|
||||
|
||||
|
|
@ -267,13 +273,12 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
"collection"
|
||||
)}`;
|
||||
|
||||
const { stdout, error } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
// Verify the actual order matches the expected order
|
||||
expect(extractRunningOrder(stdout)).toStrictEqual(expectedOrder);
|
||||
expect(extractRunningOrder(result.stdout)).toStrictEqual(expectedOrder);
|
||||
|
||||
// Ensure no errors occurred
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
/**
|
||||
|
|
@ -284,113 +289,15 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
* - Detects and logs specific errors (ECONNRESET, ETIMEDOUT, etc.)
|
||||
* - Validates JUnit XML completeness (60+ test suites) before accepting success
|
||||
* - Auto-skips on network failures to prevent blocking PRs
|
||||
*
|
||||
* Emergency Escape Hatch:
|
||||
* If external services (echo.hoppscotch.io, httpbin.org) experience prolonged outages
|
||||
* in CI, set environment variable SKIP_EXTERNAL_TESTS=true to temporarily skip this
|
||||
* test and unblock other PRs.
|
||||
*
|
||||
* Example: SKIP_EXTERNAL_TESTS=true pnpm test
|
||||
*/
|
||||
test("Supports the new scripting API method additions under the `hopp` and `pm` namespaces and validates JUnit report structure", async () => {
|
||||
// Allow skipping this test in CI if external services are unavailable
|
||||
// Set SKIP_EXTERNAL_TESTS=true to skip tests with external dependencies
|
||||
if (process.env.SKIP_EXTERNAL_TESTS === "true") {
|
||||
console.log(
|
||||
"⚠️ Skipping test with external dependencies (SKIP_EXTERNAL_TESTS=true)"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const runCLIWithNetworkRetry = async (
|
||||
args: string,
|
||||
maxAttempts = 2 // Only retry once (2 total attempts)
|
||||
) => {
|
||||
let lastResult: {
|
||||
error: ExecException | null;
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
} | null = null;
|
||||
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
lastResult = await runCLI(args);
|
||||
|
||||
// Check for transient issues (network errors or httpbin 5xx)
|
||||
const combinedOutput = `${lastResult.stdout}\n${lastResult.stderr}`;
|
||||
const hasNetworkError =
|
||||
/ECONNRESET|EAI_AGAIN|ENOTFOUND|ETIMEDOUT|ECONNREFUSED|REQUEST_ERROR.*ECONNRESET/i.test(
|
||||
combinedOutput
|
||||
);
|
||||
|
||||
// Check if httpbin returned 5xx (service degradation)
|
||||
const hasHttpbin5xx =
|
||||
/httpbin\.org is down \(5xx\)|httpbin\.org is down \(503\)/i.test(
|
||||
combinedOutput
|
||||
);
|
||||
|
||||
// Success with no transient issues - return immediately
|
||||
if (!lastResult.error && !hasHttpbin5xx) {
|
||||
return lastResult;
|
||||
}
|
||||
|
||||
// Non-transient error - fail fast (don't mask real test failures)
|
||||
if (!hasNetworkError && !hasHttpbin5xx) {
|
||||
return lastResult;
|
||||
}
|
||||
|
||||
// Extract specific error details for logging
|
||||
const extractNetworkError = (output: string): string => {
|
||||
const econnresetMatch = output.match(/ECONNRESET/i);
|
||||
const eaiAgainMatch = output.match(/EAI_AGAIN/i);
|
||||
const enotfoundMatch = output.match(/ENOTFOUND/i);
|
||||
const etimedoutMatch = output.match(/ETIMEDOUT/i);
|
||||
const econnrefusedMatch = output.match(/ECONNREFUSED/i);
|
||||
|
||||
if (econnresetMatch) return "ECONNRESET (connection reset by peer)";
|
||||
if (eaiAgainMatch) return "EAI_AGAIN (DNS lookup timeout)";
|
||||
if (enotfoundMatch) return "ENOTFOUND (DNS lookup failed)";
|
||||
if (etimedoutMatch) return "ETIMEDOUT (connection timeout)";
|
||||
if (econnrefusedMatch) return "ECONNREFUSED (connection refused)";
|
||||
return "Unknown network error";
|
||||
};
|
||||
|
||||
// Transient error detected - retry once
|
||||
const isLastAttempt = attempt === maxAttempts - 1;
|
||||
if (!isLastAttempt) {
|
||||
const errorDetail = hasHttpbin5xx
|
||||
? "httpbin.org 5xx response"
|
||||
: extractNetworkError(combinedOutput);
|
||||
console.log(
|
||||
`⚠️ Transient error detected: ${errorDetail}. Retrying once...`
|
||||
);
|
||||
await new Promise((resolve) => setTimeout(resolve, 2000));
|
||||
continue; // Continue to next retry attempt
|
||||
}
|
||||
|
||||
// Last attempt exhausted due to transient issues - skip test to avoid blocking PR
|
||||
const errorDetail = hasHttpbin5xx
|
||||
? "httpbin.org service degradation (5xx)"
|
||||
: extractNetworkError(combinedOutput);
|
||||
console.warn(
|
||||
`⚠️ Skipping test: Retry exhausted due to ${errorDetail}. External services may be unavailable.`
|
||||
);
|
||||
return null; // Signal to skip test
|
||||
}
|
||||
|
||||
// Should never reach here - all paths in loop should return or continue
|
||||
throw new Error("Unexpected: retry loop completed without returning");
|
||||
};
|
||||
|
||||
// First, run without JUnit report to ensure basic functionality works
|
||||
const basicArgs = `test ${getTestJsonFilePath(
|
||||
"scripting-revamp-coll.json",
|
||||
"collection"
|
||||
)}`;
|
||||
const basicResult = await runCLIWithNetworkRetry(basicArgs);
|
||||
if (basicResult === null) {
|
||||
console.log("⚠️ Test skipped due to external service unavailability");
|
||||
return; // Skip test
|
||||
}
|
||||
if (basicResult === null) return;
|
||||
expect(basicResult.error).toBeNull();
|
||||
|
||||
// Then, run with JUnit report and validate structure
|
||||
|
|
@ -505,10 +412,7 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
};
|
||||
|
||||
const junitResult = await runWithValidation();
|
||||
if (junitResult === null) {
|
||||
console.log("⚠️ Test skipped due to external service unavailability");
|
||||
return; // Skip test
|
||||
}
|
||||
if (junitResult === null) return;
|
||||
expect(junitResult.error).toBeNull();
|
||||
|
||||
const junitXml = fs.readFileSync(junitPath, "utf-8");
|
||||
|
|
@ -678,8 +582,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
const ENV_PATH = getTestJsonFilePath("env-flag-envs.json", "environment");
|
||||
const args = `test ${COLL_PATH} --env ${ENV_PATH}`;
|
||||
|
||||
const { error } = await runCLI(args);
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Successfully resolves environment variables referenced in the request body", async () => {
|
||||
|
|
@ -693,8 +598,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
);
|
||||
const args = `test ${COLL_PATH} --env ${ENV_PATH}`;
|
||||
|
||||
const { error } = await runCLI(args);
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Works with short `-e` flag", async () => {
|
||||
|
|
@ -705,8 +611,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
const ENV_PATH = getTestJsonFilePath("env-flag-envs.json", "environment");
|
||||
const args = `test ${COLL_PATH} -e ${ENV_PATH}`;
|
||||
|
||||
const { error } = await runCLI(args);
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
describe("Secret environment variables", () => {
|
||||
|
|
@ -729,15 +636,15 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
const ENV_PATH = getTestJsonFilePath("secret-envs.json", "environment");
|
||||
const args = `test ${COLL_PATH} --env ${ENV_PATH}`;
|
||||
|
||||
const { error, stdout } = await runCLI(args, { env });
|
||||
const result = await runCLIWithNetworkRetry(args, { env });
|
||||
if (result === null) return;
|
||||
|
||||
expect(stdout).toContain(
|
||||
expect(result.stdout).toContain(
|
||||
"https://httpbin.org/basic-auth/*********/*********"
|
||||
);
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
// Prefers values specified in the environment export file over values set in the system environment
|
||||
test("Successfully picks the values for secret environment variables set directly in the environment export file and persists the environment variables set from the pre-request script", async () => {
|
||||
const COLL_PATH = getTestJsonFilePath(
|
||||
"secret-envs-coll.json",
|
||||
|
|
@ -749,15 +656,15 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
);
|
||||
const args = `test ${COLL_PATH} --env ${ENV_PATH}`;
|
||||
|
||||
const { error, stdout } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
expect(stdout).toContain(
|
||||
expect(result.stdout).toContain(
|
||||
"https://httpbin.org/basic-auth/*********/*********"
|
||||
);
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
// Values set from the scripting context takes the highest precedence
|
||||
test("Setting values for secret environment variables from the pre-request script overrides values set at the supplied environment export file", async () => {
|
||||
const COLL_PATH = getTestJsonFilePath(
|
||||
"secret-envs-persistence-coll.json",
|
||||
|
|
@ -769,12 +676,13 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
);
|
||||
const args = `test ${COLL_PATH} --env ${ENV_PATH}`;
|
||||
|
||||
const { error, stdout } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
expect(stdout).toContain(
|
||||
expect(result.stdout).toContain(
|
||||
"https://httpbin.org/basic-auth/*********/*********"
|
||||
);
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Persists secret environment variable values set from the pre-request script for consumption in the request and post-request script context", async () => {
|
||||
|
|
@ -789,8 +697,9 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
const args = `test ${COLL_PATH} --env ${ENV_PATH}`;
|
||||
|
||||
const { error } = await runCLI(args);
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -812,11 +721,12 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
const args = `test ${COLL_PATH} --env ${ENV_PATH}`;
|
||||
|
||||
const { error, stdout } = await runCLI(args, { env });
|
||||
expect(stdout).toContain(
|
||||
const result = await runCLIWithNetworkRetry(args, { env });
|
||||
if (result === null) return;
|
||||
expect(result.stdout).toContain(
|
||||
"https://echo.hoppscotch.io/********/********"
|
||||
);
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -838,9 +748,10 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
);
|
||||
|
||||
const args = `test ${COLL_PATH} -e ${ENV_PATH}`;
|
||||
const { error } = await runCLI(args, { env });
|
||||
const result = await runCLIWithNetworkRetry(args, { env });
|
||||
if (result === null) return;
|
||||
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -893,9 +804,10 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
);
|
||||
|
||||
const args = `test ${COLL_PATH} -e ${ENV_PATH}`;
|
||||
const { error } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -921,16 +833,16 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
test("Successfully performs delayed request execution for a valid delay value", async () => {
|
||||
const args = `${VALID_TEST_ARGS} --delay 1`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Works with the short `-d` flag", async () => {
|
||||
const args = `${VALID_TEST_ARGS} -d 1`;
|
||||
const { error } = await runCLI(args);
|
||||
|
||||
expect(error).toBeNull();
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -1150,13 +1062,11 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
lastFileContents = fileContents;
|
||||
|
||||
// Check for network errors in JUnit XML (ECONNRESET, etc. corrupt the structure)
|
||||
const hasNetworkErrorInXML =
|
||||
/REQUEST_ERROR.*ECONNRESET|REQUEST_ERROR.*EAI_AGAIN|REQUEST_ERROR.*ENOTFOUND|REQUEST_ERROR.*ETIMEDOUT/i.test(
|
||||
/ECONNRESET|EAI_AGAIN|ENOTFOUND|ETIMEDOUT|ECONNREFUSED/i.test(
|
||||
fileContents
|
||||
);
|
||||
|
||||
// If no network errors detected, we have a valid snapshot
|
||||
if (!hasNetworkErrorInXML) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -1220,13 +1130,11 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
lastFileContents = fileContents;
|
||||
|
||||
// Check for network errors in JUnit XML (ECONNRESET, etc. corrupt the structure)
|
||||
const hasNetworkErrorInXML =
|
||||
/REQUEST_ERROR.*ECONNRESET|REQUEST_ERROR.*EAI_AGAIN|REQUEST_ERROR.*ENOTFOUND|REQUEST_ERROR.*ETIMEDOUT/i.test(
|
||||
/ECONNRESET|EAI_AGAIN|ENOTFOUND|ETIMEDOUT|ECONNREFUSED/i.test(
|
||||
fileContents
|
||||
);
|
||||
|
||||
// If no network errors detected, we have a valid snapshot
|
||||
if (!hasNetworkErrorInXML) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -1290,13 +1198,11 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
lastFileContents = fileContents;
|
||||
|
||||
// Check for network errors in JUnit XML (ECONNRESET, etc. corrupt the structure)
|
||||
const hasNetworkErrorInXML =
|
||||
/REQUEST_ERROR.*ECONNRESET|REQUEST_ERROR.*EAI_AGAIN|REQUEST_ERROR.*ENOTFOUND|REQUEST_ERROR.*ETIMEDOUT/i.test(
|
||||
/ECONNRESET|EAI_AGAIN|ENOTFOUND|ETIMEDOUT|ECONNREFUSED/i.test(
|
||||
fileContents
|
||||
);
|
||||
|
||||
// If no network errors detected, we have a valid snapshot
|
||||
if (!hasNetworkErrorInXML) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -1364,13 +1270,11 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
|
||||
lastFileContents = fileContents;
|
||||
|
||||
// Check for network errors in JUnit XML (ECONNRESET, etc. corrupt the structure)
|
||||
const hasNetworkErrorInXML =
|
||||
/REQUEST_ERROR.*ECONNRESET|REQUEST_ERROR.*EAI_AGAIN|REQUEST_ERROR.*ENOTFOUND|REQUEST_ERROR.*ETIMEDOUT/i.test(
|
||||
/ECONNRESET|EAI_AGAIN|ENOTFOUND|ETIMEDOUT|ECONNREFUSED/i.test(
|
||||
fileContents
|
||||
);
|
||||
|
||||
// If no network errors detected, we have a valid snapshot
|
||||
if (!hasNetworkErrorInXML) {
|
||||
break;
|
||||
}
|
||||
|
|
@ -1437,22 +1341,23 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
test("Successfully executes all requests in the collection iteratively based on the specified iteration count", async () => {
|
||||
const iterationCount = 3;
|
||||
const args = `${VALID_TEST_ARGS} --iteration-count ${iterationCount}`;
|
||||
const { error, stdout } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
// Logs iteration count in each pass
|
||||
Array.from({ length: 3 }).forEach((_, idx) =>
|
||||
expect(stdout).include(`Iteration: ${idx + 1}/${iterationCount}`)
|
||||
expect(result.stdout).include(`Iteration: ${idx + 1}/${iterationCount}`)
|
||||
);
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Doesn't log iteration count if the value supplied is `1`", async () => {
|
||||
const args = `${VALID_TEST_ARGS} --iteration-count 1`;
|
||||
const { error, stdout } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
expect(stdout).not.include(`Iteration: 1/1`);
|
||||
expect(result.stdout).not.include(`Iteration: 1/1`);
|
||||
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
|
|
@ -1503,16 +1408,16 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
);
|
||||
const args = `test ${COLL_PATH} --iteration-data ${ITERATION_DATA_PATH} -e ${ENV_PATH}`;
|
||||
|
||||
const { error, stdout } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
const iterationCount = 3;
|
||||
|
||||
// Even though iteration count is not supplied, it will be inferred from the iteration data size
|
||||
Array.from({ length: iterationCount }).forEach((_, idx) =>
|
||||
expect(stdout).include(`Iteration: ${idx + 1}/${iterationCount}`)
|
||||
expect(result.stdout).include(`Iteration: ${idx + 1}/${iterationCount}`)
|
||||
);
|
||||
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
|
||||
test("Iteration count takes priority if supplied instead of inferring from the iteration data size", async () => {
|
||||
|
|
@ -1532,13 +1437,14 @@ describe("hopp test [options] <file_path_or_id>", { timeout: 100000 }, () => {
|
|||
const iterationCount = 5;
|
||||
const args = `test ${COLL_PATH} --iteration-data ${ITERATION_DATA_PATH} -e ${ENV_PATH} --iteration-count ${iterationCount}`;
|
||||
|
||||
const { error, stdout } = await runCLI(args);
|
||||
const result = await runCLIWithNetworkRetry(args);
|
||||
if (result === null) return;
|
||||
|
||||
Array.from({ length: iterationCount }).forEach((_, idx) =>
|
||||
expect(stdout).include(`Iteration: ${idx + 1}/${iterationCount}`)
|
||||
expect(result.stdout).include(`Iteration: ${idx + 1}/${iterationCount}`)
|
||||
);
|
||||
|
||||
expect(error).toBeNull();
|
||||
expect(result.error).toBeNull();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -41,3 +41,100 @@ export const getTestJsonFilePath = (
|
|||
);
|
||||
return filePath;
|
||||
};
|
||||
|
||||
/**
|
||||
* Runs CLI with automatic retry for transient infrastructure failures.
|
||||
*
|
||||
* IMPORTANT: Only use this for tests that EXPECT SUCCESS.
|
||||
* For tests that intentionally test error scenarios (bad URLs, script errors, etc.),
|
||||
* use plain `runCLI()` instead to avoid false skips.
|
||||
*
|
||||
* Retries on:
|
||||
* - Low-level network errors (ECONNRESET, DNS timeouts, connection refused)
|
||||
* - Service degradation (httpbin.org 5xx)
|
||||
* - Response undefined errors from network failures
|
||||
*
|
||||
* Does NOT retry on:
|
||||
* - REQUEST_ERROR alone (could be intentional bad URL)
|
||||
* - TEST_SCRIPT_ERROR alone (could be intentional script error)
|
||||
*/
|
||||
export const runCLIWithNetworkRetry = async (
|
||||
args: string,
|
||||
options = {},
|
||||
maxAttempts = 2
|
||||
) => {
|
||||
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
||||
const result = await runCLI(args, options);
|
||||
const combinedOutput = `${result.stdout}\n${result.stderr}`;
|
||||
|
||||
// Only detect low-level TCP/DNS errors - these are always transient
|
||||
const hasLowLevelNetworkError =
|
||||
/ECONNRESET|EAI_AGAIN|ENOTFOUND|ETIMEDOUT|ECONNREFUSED/i.test(
|
||||
combinedOutput
|
||||
);
|
||||
|
||||
// Special case: TEST_SCRIPT_ERROR when response is undefined due to REQUEST_ERROR
|
||||
// This is the actual CI failure mode when external services go down
|
||||
const hasTestScriptErrorFromNetworkFailure =
|
||||
/TEST_SCRIPT_ERROR Script execution failed: TypeError: cannot read property/.test(
|
||||
combinedOutput
|
||||
) && /REQUEST_ERROR/.test(combinedOutput);
|
||||
|
||||
// Service degradation
|
||||
const hasHttpbin5xx =
|
||||
/httpbin\.org is down \(5xx\)|httpbin\.org is down \(503\)/i.test(
|
||||
combinedOutput
|
||||
);
|
||||
|
||||
// Success - return immediately
|
||||
if (!result.error && !hasHttpbin5xx) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Not a transient error - return immediately (don't mask real failures)
|
||||
if (
|
||||
!hasLowLevelNetworkError &&
|
||||
!hasHttpbin5xx &&
|
||||
!hasTestScriptErrorFromNetworkFailure
|
||||
) {
|
||||
return result;
|
||||
}
|
||||
|
||||
const extractErrorDetails = (output: string): string => {
|
||||
if (/ECONNRESET/i.test(output)) return "ECONNRESET (connection reset)";
|
||||
if (/EAI_AGAIN/i.test(output)) return "EAI_AGAIN (DNS timeout)";
|
||||
if (/ENOTFOUND/i.test(output)) return "ENOTFOUND (DNS lookup failed)";
|
||||
if (/ETIMEDOUT/i.test(output)) return "ETIMEDOUT (connection timeout)";
|
||||
if (/ECONNREFUSED/i.test(output))
|
||||
return "ECONNREFUSED (connection refused)";
|
||||
if (/httpbin\.org is down/i.test(output))
|
||||
return "httpbin.org service degradation (5xx)";
|
||||
if (/TEST_SCRIPT_ERROR.*cannot read property/i.test(output))
|
||||
return "TEST_SCRIPT_ERROR (response undefined - likely REQUEST_ERROR)";
|
||||
return "Network failure";
|
||||
};
|
||||
|
||||
const errorDetail = extractErrorDetails(combinedOutput);
|
||||
const argsPreview =
|
||||
args.length > 100 ? `${args.substring(0, 100)}...` : args;
|
||||
|
||||
const isLastAttempt = attempt === maxAttempts - 1;
|
||||
if (!isLastAttempt) {
|
||||
console.log(
|
||||
`⚠️ Network error detected: ${errorDetail}\n Command: ${argsPreview}\n Retrying once...`
|
||||
);
|
||||
await new Promise((resolve) => setTimeout(resolve, 2000));
|
||||
continue;
|
||||
}
|
||||
|
||||
console.warn(
|
||||
`⚠️ Skipping test after retry exhausted\n` +
|
||||
` Error: ${errorDetail}\n` +
|
||||
` Command: ${argsPreview}\n` +
|
||||
` External services may be unavailable. Test will be skipped to avoid blocking CI.`
|
||||
);
|
||||
return null;
|
||||
}
|
||||
|
||||
throw new Error("Unexpected: retry loop completed without returning");
|
||||
};
|
||||
|
|
|
|||
Loading…
Reference in a new issue