fix(core): changes to the daemon in order to support windows (#7211)
This commit is contained in:
parent
83a6ffb47a
commit
0d84a61b72
@ -1,8 +1,8 @@
|
|||||||
import { logger } from '@nrwl/devkit';
|
import { logger, normalizePath } from '@nrwl/devkit';
|
||||||
import { appRootPath } from '@nrwl/tao/src/utils/app-root';
|
import { appRootPath } from '@nrwl/tao/src/utils/app-root';
|
||||||
import { spawn, spawnSync } from 'child_process';
|
import { spawn, spawnSync } from 'child_process';
|
||||||
import { ensureFileSync } from 'fs-extra';
|
import { ensureFileSync } from 'fs-extra';
|
||||||
import { join, sep } from 'path';
|
import { join } from 'path';
|
||||||
import { dirSync } from 'tmp';
|
import { dirSync } from 'tmp';
|
||||||
import {
|
import {
|
||||||
DaemonJson,
|
DaemonJson,
|
||||||
@ -17,9 +17,9 @@ export async function startInBackground(): Promise<void> {
|
|||||||
* starting the server, as well as providing a reference to where any subsequent
|
* starting the server, as well as providing a reference to where any subsequent
|
||||||
* log files can be found.
|
* log files can be found.
|
||||||
*/
|
*/
|
||||||
const tmpDirPrefix = `nx-daemon--${appRootPath.replace(
|
const tmpDirPrefix = `nx-daemon--${normalizePath(appRootPath).replace(
|
||||||
// Replace the occurrences of / on unix systems, the \ on windows, with a -
|
// Replace the occurrences of / in the unix-style normalized path with a -
|
||||||
new RegExp(escapeRegExp(sep), 'g'),
|
new RegExp(escapeRegExp('/'), 'g'),
|
||||||
'-'
|
'-'
|
||||||
)}`;
|
)}`;
|
||||||
const serverLogOutputDir = dirSync({
|
const serverLogOutputDir = dirSync({
|
||||||
@ -43,7 +43,7 @@ export async function startInBackground(): Promise<void> {
|
|||||||
|
|
||||||
try {
|
try {
|
||||||
const backgroundProcess = spawn(
|
const backgroundProcess = spawn(
|
||||||
'node',
|
process.execPath,
|
||||||
['./start.js', serverLogOutputFile],
|
['./start.js', serverLogOutputFile],
|
||||||
{
|
{
|
||||||
cwd: __dirname,
|
cwd: __dirname,
|
||||||
@ -64,8 +64,8 @@ export async function startInBackground(): Promise<void> {
|
|||||||
* Ensure the server is actually available to connect to via IPC before resolving
|
* Ensure the server is actually available to connect to via IPC before resolving
|
||||||
*/
|
*/
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
const id = setInterval(() => {
|
const id = setInterval(async () => {
|
||||||
if (isServerAvailable()) {
|
if (await isServerAvailable()) {
|
||||||
clearInterval(id);
|
clearInterval(id);
|
||||||
resolve();
|
resolve();
|
||||||
}
|
}
|
||||||
@ -80,7 +80,7 @@ export async function startInBackground(): Promise<void> {
|
|||||||
export function startInCurrentProcess(): void {
|
export function startInCurrentProcess(): void {
|
||||||
logger.info(`NX Daemon Server - Starting in the current process...`);
|
logger.info(`NX Daemon Server - Starting in the current process...`);
|
||||||
|
|
||||||
spawnSync('node', ['./start.js'], {
|
spawnSync(process.execPath, ['./start.js'], {
|
||||||
cwd: __dirname,
|
cwd: __dirname,
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
});
|
});
|
||||||
@ -89,7 +89,7 @@ export function startInCurrentProcess(): void {
|
|||||||
export function stop(): void {
|
export function stop(): void {
|
||||||
logger.info(`NX Daemon Server - Stopping...`);
|
logger.info(`NX Daemon Server - Stopping...`);
|
||||||
|
|
||||||
spawnSync('node', ['./stop.js'], {
|
spawnSync(process.execPath, ['./stop.js'], {
|
||||||
cwd: __dirname,
|
cwd: __dirname,
|
||||||
stdio: 'inherit',
|
stdio: 'inherit',
|
||||||
});
|
});
|
||||||
|
|||||||
@ -59,13 +59,13 @@ describe('Daemon Server', () => {
|
|||||||
|
|
||||||
describe('isServerAvailable()', () => {
|
describe('isServerAvailable()', () => {
|
||||||
it('should return true if the daemon server is available for connections', async () => {
|
it('should return true if the daemon server is available for connections', async () => {
|
||||||
expect(isServerAvailable()).toBe(false);
|
expect(await isServerAvailable()).toBe(false);
|
||||||
|
|
||||||
await startServer({});
|
await startServer({});
|
||||||
expect(isServerAvailable()).toBe(true);
|
expect(await isServerAvailable()).toBe(true);
|
||||||
|
|
||||||
await stopServer();
|
await stopServer();
|
||||||
expect(isServerAvailable()).toBe(false);
|
expect(await isServerAvailable()).toBe(false);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@ -118,10 +118,38 @@ function createAndSerializeProjectGraph(): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* For now we just invoke the existing `createProjectGraph()` utility and return the project
|
* We need to make sure that we instantiate the PerformanceObserver only once, otherwise
|
||||||
* graph upon connection to the server
|
* we will end up with duplicate entries in the server logs.
|
||||||
*/
|
*/
|
||||||
let performanceObserver: PerformanceObserver | undefined;
|
let performanceObserver: PerformanceObserver | undefined;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the server and register a connection callback.
|
||||||
|
*
|
||||||
|
* NOTE: It is important that we do not eagerly perform any work directly upon connection
|
||||||
|
* of a client.
|
||||||
|
*
|
||||||
|
* This is because there are two different scenarios for connection that we need to support:
|
||||||
|
* 1) Checking if the server is fundamentally available
|
||||||
|
* 2) Requesting the project graph to be built
|
||||||
|
*
|
||||||
|
* For case (1) we want the operation to be as fast as possible - we just need to know if the
|
||||||
|
* server exists to connect to, and so this is why we do not perform any work directly upon
|
||||||
|
* connection.
|
||||||
|
*
|
||||||
|
* For case (2) we have a simple known data payload which the client will send to the server
|
||||||
|
* in order to trigger the more expensive graph construction logic.
|
||||||
|
*
|
||||||
|
* The real reason we need have these two separate steps in which we decouple connection from
|
||||||
|
* the request to actually build the project graph is down to the fact that on Windows, the named
|
||||||
|
* pipe behaves subtly differently from Unix domain sockets in that when you check if it exists
|
||||||
|
* as if it were a file (e.g. using fs.existsSync() or fs.open() or fs.statSync() etc), the native
|
||||||
|
* code which runs behind the scenes on the OS will actually trigger a connection to the server.
|
||||||
|
* Therefore if we were to simply perform the graph creation upon connection we would end up
|
||||||
|
* repeating work and throwing `EPIPE` errors.
|
||||||
|
*/
|
||||||
|
const REQUEST_PROJECT_GRAPH_PAYLOAD = 'REQUEST_PROJECT_GRAPH_PAYLOAD';
|
||||||
|
|
||||||
const server = createServer((socket) => {
|
const server = createServer((socket) => {
|
||||||
if (!performanceObserver) {
|
if (!performanceObserver) {
|
||||||
performanceObserver = new PerformanceObserver((list) => {
|
performanceObserver = new PerformanceObserver((list) => {
|
||||||
@ -129,105 +157,116 @@ const server = createServer((socket) => {
|
|||||||
// Slight indentation to improve readability of the overall log file
|
// Slight indentation to improve readability of the overall log file
|
||||||
serverLog(` Time taken for '${entry.name}'`, `${entry.duration}ms`);
|
serverLog(` Time taken for '${entry.name}'`, `${entry.duration}ms`);
|
||||||
});
|
});
|
||||||
|
performanceObserver.observe({ entryTypes: ['measure'], buffered: false });
|
||||||
}
|
}
|
||||||
performanceObserver.observe({ entryTypes: ['measure'], buffered: false });
|
|
||||||
|
|
||||||
performance.mark('server-connection');
|
socket.on('data', (data) => {
|
||||||
serverLog('Connection Received');
|
|
||||||
|
|
||||||
const currentGitHead = gitRevParseHead(appRootPath);
|
|
||||||
|
|
||||||
let serializedProjectGraph: string | undefined;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Cached HEAD has changed, we must perform full file-hashing initialization work and
|
|
||||||
* recompute the project graph
|
|
||||||
*/
|
|
||||||
if (currentGitHead !== cachedGitHead) {
|
|
||||||
serverLog(
|
|
||||||
` [SERVER STATE]: Cached HEAD does not match current (${currentGitHead}), performing full file hash init and recomputing project graph...`
|
|
||||||
);
|
|
||||||
/**
|
/**
|
||||||
* Update the cached values for the HEAD and untracked and uncommitted state which was computed
|
* If anything other than the known project graph creation request payload is sent to
|
||||||
* as part of full init()
|
* the server, we throw an error.
|
||||||
*/
|
*/
|
||||||
const untrackedAndUncommittedFileHashes = defaultFileHasher.init();
|
const payload = data.toString();
|
||||||
hashAndCacheUntrackedUncommittedState(untrackedAndUncommittedFileHashes);
|
if (payload !== REQUEST_PROJECT_GRAPH_PAYLOAD) {
|
||||||
cachedGitHead = currentGitHead;
|
throw new Error(`Unsupported payload sent to daemon server: ${payload}`);
|
||||||
serializedProjectGraph = createAndSerializeProjectGraph();
|
|
||||||
} else {
|
|
||||||
/**
|
|
||||||
* We know at this point that the cached HEAD has not changed but we must still always use git
|
|
||||||
* to check for untracked and uncommitted changes (and we then create and cache a hash which
|
|
||||||
* represents their overall state).
|
|
||||||
*
|
|
||||||
* We cannot ever skip this particular git operation, but we can compare its result to our
|
|
||||||
* previously cached hash which represents the overall state for untracked and uncommitted changes
|
|
||||||
* and then potentially skip project graph creation altogether if it is unchanged and we have an
|
|
||||||
* existing cached graph.
|
|
||||||
*/
|
|
||||||
const previousUntrackedUncommittedState = cachedUntrackedUncommittedState;
|
|
||||||
const untrackedAndUncommittedFileHashes =
|
|
||||||
defaultFileHasher.incrementalUpdate();
|
|
||||||
hashAndCacheUntrackedUncommittedState(untrackedAndUncommittedFileHashes);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Skip project graph creation if the untracked and uncommitted state is unchanged and we have
|
|
||||||
* a cached version of the graph available in memory.
|
|
||||||
*/
|
|
||||||
if (
|
|
||||||
previousUntrackedUncommittedState === cachedUntrackedUncommittedState &&
|
|
||||||
cachedSerializedProjectGraph
|
|
||||||
) {
|
|
||||||
serverLog(
|
|
||||||
` [SERVER STATE]: State unchanged since last request, resolving in-memory cached project graph...`
|
|
||||||
);
|
|
||||||
serializedProjectGraph = cachedSerializedProjectGraph;
|
|
||||||
} else {
|
|
||||||
serverLog(
|
|
||||||
` [SERVER STATE]: Hashed untracked/uncommitted file state changed (now ${cachedUntrackedUncommittedState}), recomputing project graph...`
|
|
||||||
);
|
|
||||||
serializedProjectGraph = createAndSerializeProjectGraph();
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
performance.mark('server-connection');
|
||||||
* Cache the latest version of the project graph in memory so that we can potentially skip a lot
|
serverLog('Client Request for Project Graph Received');
|
||||||
* of expensive work on the next client request.
|
|
||||||
*
|
|
||||||
* For reference, on the very large test repo https://github.com/vsavkin/interstellar the project
|
|
||||||
* graph nxdeps.json file is about 24MB, so memory utilization should not be a huge concern.
|
|
||||||
*/
|
|
||||||
cachedSerializedProjectGraph = serializedProjectGraph;
|
|
||||||
|
|
||||||
performance.mark('serialized-project-graph-ready');
|
const currentGitHead = gitRevParseHead(appRootPath);
|
||||||
performance.measure(
|
|
||||||
'total for creating and serializing project graph',
|
let serializedProjectGraph: string | undefined;
|
||||||
'server-connection',
|
|
||||||
'serialized-project-graph-ready'
|
|
||||||
);
|
|
||||||
|
|
||||||
socket.write(serializedProjectGraph, () => {
|
|
||||||
performance.mark('serialized-project-graph-written-to-client');
|
|
||||||
performance.measure(
|
|
||||||
'write project graph to socket',
|
|
||||||
'serialized-project-graph-ready',
|
|
||||||
'serialized-project-graph-written-to-client'
|
|
||||||
);
|
|
||||||
/**
|
/**
|
||||||
* Close the connection once all data has been written to the socket so that the client
|
* Cached HEAD has changed, we must perform full file-hashing initialization work and
|
||||||
* knows when to read it.
|
* recompute the project graph
|
||||||
*/
|
*/
|
||||||
socket.end();
|
if (currentGitHead !== cachedGitHead) {
|
||||||
|
serverLog(
|
||||||
|
` [SERVER STATE]: Cached HEAD does not match current (${currentGitHead}), performing full file hash init and recomputing project graph...`
|
||||||
|
);
|
||||||
|
/**
|
||||||
|
* Update the cached values for the HEAD and untracked and uncommitted state which was computed
|
||||||
|
* as part of full init()
|
||||||
|
*/
|
||||||
|
const untrackedAndUncommittedFileHashes = defaultFileHasher.init();
|
||||||
|
hashAndCacheUntrackedUncommittedState(untrackedAndUncommittedFileHashes);
|
||||||
|
cachedGitHead = currentGitHead;
|
||||||
|
serializedProjectGraph = createAndSerializeProjectGraph();
|
||||||
|
} else {
|
||||||
|
/**
|
||||||
|
* We know at this point that the cached HEAD has not changed but we must still always use git
|
||||||
|
* to check for untracked and uncommitted changes (and we then create and cache a hash which
|
||||||
|
* represents their overall state).
|
||||||
|
*
|
||||||
|
* We cannot ever skip this particular git operation, but we can compare its result to our
|
||||||
|
* previously cached hash which represents the overall state for untracked and uncommitted changes
|
||||||
|
* and then potentially skip project graph creation altogether if it is unchanged and we have an
|
||||||
|
* existing cached graph.
|
||||||
|
*/
|
||||||
|
const previousUntrackedUncommittedState = cachedUntrackedUncommittedState;
|
||||||
|
const untrackedAndUncommittedFileHashes =
|
||||||
|
defaultFileHasher.incrementalUpdate();
|
||||||
|
hashAndCacheUntrackedUncommittedState(untrackedAndUncommittedFileHashes);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Skip project graph creation if the untracked and uncommitted state is unchanged and we have
|
||||||
|
* a cached version of the graph available in memory.
|
||||||
|
*/
|
||||||
|
if (
|
||||||
|
previousUntrackedUncommittedState === cachedUntrackedUncommittedState &&
|
||||||
|
cachedSerializedProjectGraph
|
||||||
|
) {
|
||||||
|
serverLog(
|
||||||
|
` [SERVER STATE]: State unchanged since last request, resolving in-memory cached project graph...`
|
||||||
|
);
|
||||||
|
serializedProjectGraph = cachedSerializedProjectGraph;
|
||||||
|
} else {
|
||||||
|
serverLog(
|
||||||
|
` [SERVER STATE]: Hashed untracked/uncommitted file state changed (now ${cachedUntrackedUncommittedState}), recomputing project graph...`
|
||||||
|
);
|
||||||
|
serializedProjectGraph = createAndSerializeProjectGraph();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cache the latest version of the project graph in memory so that we can potentially skip a lot
|
||||||
|
* of expensive work on the next client request.
|
||||||
|
*
|
||||||
|
* For reference, on the very large test repo https://github.com/vsavkin/interstellar the project
|
||||||
|
* graph nxdeps.json file is about 32MB, so memory utilization should not be a huge concern.
|
||||||
|
*/
|
||||||
|
cachedSerializedProjectGraph = serializedProjectGraph;
|
||||||
|
|
||||||
|
performance.mark('serialized-project-graph-ready');
|
||||||
performance.measure(
|
performance.measure(
|
||||||
'total for server response',
|
'total for creating and serializing project graph',
|
||||||
'server-connection',
|
'server-connection',
|
||||||
'serialized-project-graph-written-to-client'
|
'serialized-project-graph-ready'
|
||||||
);
|
|
||||||
const bytesWritten = Buffer.byteLength(serializedProjectGraph, 'utf-8');
|
|
||||||
serverLog(
|
|
||||||
`Closed Connection to Client (${bytesWritten} bytes transferred)`
|
|
||||||
);
|
);
|
||||||
|
|
||||||
|
socket.write(serializedProjectGraph, () => {
|
||||||
|
performance.mark('serialized-project-graph-written-to-client');
|
||||||
|
performance.measure(
|
||||||
|
'write project graph to socket',
|
||||||
|
'serialized-project-graph-ready',
|
||||||
|
'serialized-project-graph-written-to-client'
|
||||||
|
);
|
||||||
|
/**
|
||||||
|
* Close the connection once all data has been written to the socket so that the client
|
||||||
|
* knows when to read it.
|
||||||
|
*/
|
||||||
|
socket.end();
|
||||||
|
performance.measure(
|
||||||
|
'total for server response',
|
||||||
|
'server-connection',
|
||||||
|
'serialized-project-graph-written-to-client'
|
||||||
|
);
|
||||||
|
const bytesWritten = Buffer.byteLength(serializedProjectGraph, 'utf-8');
|
||||||
|
serverLog(
|
||||||
|
`Closed Connection to Client (${bytesWritten} bytes transferred)`
|
||||||
|
);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -246,7 +285,7 @@ export async function startServer({
|
|||||||
}
|
}
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
server.listen(fullOSSocketPath, () => {
|
server.listen(fullOSSocketPath, () => {
|
||||||
serverLog(`Started`);
|
serverLog(`Started listening on: ${fullOSSocketPath}`);
|
||||||
return resolve(server);
|
return resolve(server);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@ -283,12 +322,28 @@ export function killSocketOrPath(): void {
|
|||||||
} catch {}
|
} catch {}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function isServerAvailable(): boolean {
|
/**
|
||||||
|
* As noted in the comments above the createServer() call, in order to reliably (meaning it works
|
||||||
|
* cross-platform) check whether or not the server is availabe to request a project graph from we
|
||||||
|
* need to actually attempt connecting to it.
|
||||||
|
*
|
||||||
|
* Because of the behavior of named pipes on Windows, we cannot simply treat them as a file and
|
||||||
|
* check for their existence on disk (unlike with Unix Sockets).
|
||||||
|
*/
|
||||||
|
export async function isServerAvailable(): Promise<boolean> {
|
||||||
try {
|
try {
|
||||||
statSync(fullOSSocketPath);
|
const socket = connect(fullOSSocketPath);
|
||||||
return true;
|
return new Promise((resolve) => {
|
||||||
|
socket.on('connect', () => {
|
||||||
|
socket.destroy();
|
||||||
|
resolve(true);
|
||||||
|
});
|
||||||
|
socket.on('error', () => {
|
||||||
|
resolve(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
} catch {
|
} catch {
|
||||||
return false;
|
return Promise.resolve(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,22 +374,33 @@ export async function getProjectGraphFromServer(): Promise<ProjectGraph> {
|
|||||||
return reject(new Error(errorMessage) || err);
|
return reject(new Error(errorMessage) || err);
|
||||||
});
|
});
|
||||||
|
|
||||||
let serializedProjectGraph = '';
|
/**
|
||||||
socket.on('data', (data) => {
|
* Immediately after connecting to the server we send it the known project graph creation
|
||||||
serializedProjectGraph += data.toString();
|
* request payload. See the notes above createServer() for more context as to why we explicitly
|
||||||
});
|
* request the graph from the client like this.
|
||||||
|
*/
|
||||||
|
socket.on('connect', () => {
|
||||||
|
socket.write(REQUEST_PROJECT_GRAPH_PAYLOAD);
|
||||||
|
|
||||||
socket.on('end', () => {
|
let serializedProjectGraph = '';
|
||||||
try {
|
socket.on('data', (data) => {
|
||||||
const projectGraph = JSON.parse(serializedProjectGraph) as ProjectGraph;
|
serializedProjectGraph += data.toString();
|
||||||
logger.info('NX Daemon Client - Resolved ProjectGraph');
|
});
|
||||||
return resolve(projectGraph);
|
|
||||||
} catch {
|
socket.on('end', () => {
|
||||||
logger.error(
|
try {
|
||||||
'NX Daemon Client - Error: Could not deserialize the ProjectGraph'
|
const projectGraph = JSON.parse(
|
||||||
);
|
serializedProjectGraph
|
||||||
return reject();
|
) as ProjectGraph;
|
||||||
}
|
logger.info('NX Daemon Client - Resolved ProjectGraph');
|
||||||
|
return resolve(projectGraph);
|
||||||
|
} catch {
|
||||||
|
logger.error(
|
||||||
|
'NX Daemon Client - Error: Could not deserialize the ProjectGraph'
|
||||||
|
);
|
||||||
|
return reject();
|
||||||
|
}
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@ -175,7 +175,7 @@ export async function createProjectGraphAsync(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!isServerAvailable()) {
|
if (!(await isServerAvailable())) {
|
||||||
logger.warn(
|
logger.warn(
|
||||||
'\nWARNING: You set NX_DAEMON=true but the Daemon Server is not running. Starting Daemon Server in the background...'
|
'\nWARNING: You set NX_DAEMON=true but the Daemon Server is not running. Starting Daemon Server in the background...'
|
||||||
);
|
);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user