fix(core): handle unique constraint errors when adding duplicate hashes to the cache db (#28310)

<!-- Please make sure you have read the submission guidelines before
posting an PR -->
<!--
https://github.com/nrwl/nx/blob/master/CONTRIBUTING.md#-submitting-a-pr
-->

<!-- Please make sure that your commit message follows our format -->
<!-- Example: `fix(nx): must begin with lowercase` -->

<!-- If this is a particularly complex change or feature addition, you
can request a dedicated Nx release for this pull request branch. Mention
someone from the Nx team or the `@nrwl/nx-pipelines-reviewers` and they
will confirm if the PR warrants its own release for testing purposes,
and generate it for you if appropriate. -->

## Current Behavior
<!-- This is the behavior we have today -->

## Expected Behavior
<!-- This is the behavior we should expect with the changes in this PR
-->

## Related Issue(s)
<!-- Please link the issue being fixed so it gets closed when this is
merged. -->

Fixes #
This commit is contained in:
Jonathan Cammisuli 2024-10-04 15:16:03 -04:00 committed by GitHub
parent acb19a6439
commit d714099501
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 16 additions and 20 deletions

View File

@ -48,7 +48,7 @@ impl NxCache {
workspace_root: PathBuf::from(workspace_root),
cache_directory: cache_path.to_normalized_string(),
cache_path,
link_task_details: link_task_details.unwrap_or(true)
link_task_details: link_task_details.unwrap_or(true),
};
r.setup()?;
@ -80,11 +80,7 @@ impl NxCache {
"
};
self.db
.execute_batch(
query,
)
.map_err(anyhow::Error::from)
self.db.execute_batch(query).map_err(anyhow::Error::from)
}
#[napi]
@ -144,7 +140,7 @@ impl NxCache {
create_dir_all(&task_dir)?;
// Write the terminal outputs into a file
let task_outputs_path: _ = self.get_task_outputs_path_internal(&hash);
let task_outputs_path = self.get_task_outputs_path_internal(&hash);
trace!("Writing terminal outputs to: {:?}", &task_outputs_path);
write(task_outputs_path, terminal_output)?;
@ -192,9 +188,7 @@ impl NxCache {
fn record_to_cache(&self, hash: String, code: i16) -> anyhow::Result<()> {
self.db.execute(
"INSERT INTO cache_outputs
(hash, code)
VALUES (?1, ?2)",
"INSERT OR REPLACE INTO cache_outputs (hash, code) VALUES (?1, ?2)",
params![hash, code],
)?;
Ok(())
@ -258,19 +252,16 @@ impl NxCache {
// Checks that the number of cache records in the database
// matches the number of cache directories on the filesystem.
// If they don't match, it means that the cache is out of sync.
let cache_records_exist = self.db.query_row(
"SELECT EXISTS (SELECT 1 FROM cache_outputs)",
[],
|row| {
let cache_records_exist =
self.db
.query_row("SELECT EXISTS (SELECT 1 FROM cache_outputs)", [], |row| {
let exists: bool = row.get(0)?;
Ok(exists)
},
)?;
})?;
if !cache_records_exist {
let hash_regex = Regex::new(r"^\d+$").expect("Hash regex is invalid");
let fs_entries = std::fs::read_dir(&self.cache_path)
.map_err(anyhow::Error::from)?;
let fs_entries = std::fs::read_dir(&self.cache_path).map_err(anyhow::Error::from)?;
for entry in fs_entries {
let entry = entry?;

View File

@ -59,4 +59,9 @@ describe('Cache', () => {
'output contents 123'
);
});
it('should handle storing hashes that already exist in the cache', async () => {
cache.put('123', 'output 123', ['dist'], 0);
expect(() => cache.put('123', 'output 123', ['dist'], 0)).not.toThrow();
});
});