-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathtest.ts
202 lines (169 loc) · 6.26 KB
/
test.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
/**
* Custom testing script which runs the generator on all the schemas in the "tests" directory, and
* validates that the output matches the expected outputs (in the "expected" directory within the
* test folders). If a test is expected to fail, the expected error message should be put in an
* "expected-error.txt" file in the relevant test folder. Optionally, a test can include a
* "test-types.ts" file, which will be type-checked during the test, and will fail the test if there
* are any errors.
*
* You can run specific tests by passing them as arguments to this script:
* npm run test -- options-behavior validation-errors
*
* You can use the "--keep-output" argument to keep the __TEST_TMP__ directories after the tests
* have run, even if they are successful:
* npm run test -- --keep-output
*/
import { exec } from "node:child_process";
import { promisify } from "node:util";
import { rimraf } from "rimraf";
import fs from "node:fs/promises";
import path from "node:path";
const TEMP_TEST_DIRNAME = "__TEST_TMP__";
const RED = "\x1b[1;97;41m";
const GREEN = "\x1b[1;102;30m";
const RESET = "\x1b[0m";
const execAsync = promisify(exec);
const trimMultiLine = (s: string) =>
s
.trim()
.split("\n")
.map((l) => l.trim())
.join("\n");
async function validPath(path: string) {
try {
await fs.access(path);
return path; // Return the path if it is accessible
} catch (e) {
if (e instanceof Error && "code" in e && e.code === "ENOENT") {
return null; // Return null when file is not found
}
throw e;
}
}
async function readFile(path: string) {
try {
return await fs.readFile(path, { encoding: "utf-8" });
} catch (e) {
if (e instanceof Error && "code" in e && e.code === "ENOENT") {
return null; // Return null when file is not found
}
throw e;
}
}
async function readDir(path: string) {
try {
return await fs.readdir(path, { withFileTypes: true });
} catch (e) {
if (e instanceof Error && "code" in e && e.code === "ENOENT") {
return []; // Return empty array when dir not found
}
throw e;
}
}
const testFilters = process.argv.slice(2);
const keepOutputIdx = testFilters.indexOf("--keep-output");
const keepOutput = keepOutputIdx !== -1;
if (keepOutput) {
testFilters.splice(keepOutputIdx, 1);
}
const tests = (await readDir("tests"))
.filter((d) => d.isDirectory() && (!testFilters.length || testFilters.includes(d.name)))
.map((d) => path.join(d.path, d.name));
if (!tests.length) {
console.error("No tests found!");
process.exit(1);
}
// Get the length of the longest test name, so we can pad the output
const longestName = Math.max(...tests.map((t) => t.length));
console.log("\nRunning tests...");
let hasErrors = false;
for (const test of tests) {
try {
process.stdout.write(` ${test}${" ".repeat(longestName - test.length + 2)}`);
const schema = await readFile(path.join(test, "schema.prisma"));
if (!schema) {
throw new Error(`Test ${test} has no schema.prisma!`);
}
let expectedError = await readFile(path.join(test, "expected-error.txt"));
const typeTester = await validPath(path.join(test, "test-types.ts"));
const expectedFiles: Map<string, string | null> = new Map();
for (const entry of await readDir(path.join(test, "expected"))) {
if (entry.isFile()) {
expectedFiles.set(entry.name, await readFile(path.join(test, "expected", entry.name)));
}
}
if (expectedFiles.size === 0 && !expectedError && !typeTester) {
throw new Error(`Test ${test} has no expected files or errors!`);
}
const testDir = path.join(test, TEMP_TEST_DIRNAME);
await rimraf(testDir); // Ensure test dir is clean before running
await fs.mkdir(testDir, { recursive: true });
await fs.writeFile(path.join(testDir, "schema.prisma"), schema);
try {
await execAsync(`prisma generate --schema=${path.join(testDir, "schema.prisma")}`);
} catch (e) {
const error = e as { code: number; stdout: string; stderr: string };
if (expectedError && trimMultiLine(error.stderr) === trimMultiLine(expectedError)) {
// Expected error occurred, set expectedError to null so we don't throw later
expectedError = null;
} else if (expectedError) {
throw new Error("Stderr does not match expected error! Stderr:\n\n" + error.stderr);
} else {
throw new Error("Error running Prisma! Stderr:\n\n" + error.stderr);
}
}
if (expectedError) {
throw new Error("Expected error did not occur!");
}
if (typeTester) {
try {
await execAsync(`tsc --pretty --noEmit --strict --module NodeNext ${typeTester}`);
} catch (e) {
const error = e as { code: number; stdout: string; stderr: string };
throw new Error(
`Error validating types! tsc exited with code ${error.code}:\n\n${error.stdout}`,
);
}
}
const errors: string[] = [];
const uncheckedFileNames = new Set(expectedFiles.keys());
for (const entry of await readDir(testDir)) {
if (!entry.isFile() || entry.name === "schema.prisma") {
continue;
}
uncheckedFileNames.delete(entry.name);
const filePath = path.join(testDir, entry.name);
const fileContents = await readFile(filePath);
const expectedContents = expectedFiles.get(entry.name);
if (!expectedContents) {
errors.push(`Unexpected file ${entry.name} in test output! See ${filePath}`);
continue;
}
if (fileContents !== expectedContents) {
errors.push(
`Generated ${entry.name} does not match expected contents! Check the output in ${filePath}`,
);
}
}
for (const file of uncheckedFileNames) {
errors.push(`Expected file ${file} was not generated!`);
}
if (errors.length) {
throw new Error("Errors:\n" + errors.map((e) => ` - ${e}`).join("\n"));
}
process.stdout.write(GREEN + " PASS " + RESET + "\n");
if (!keepOutput) {
await rimraf(testDir); // Clean up test dir on success
}
} catch (e) {
process.stdout.write(RED + " FAIL " + RESET + "\n\n");
console.error((e as Error).message, "\n");
hasErrors = true;
}
}
if (hasErrors) {
console.error("\nSome tests failed!");
process.exit(1);
} else {
console.log("\nAll tests passed!");
}