fix(core): handle unique constraint errors when adding duplicate hashes to the cache db (#28310)
<!-- Please make sure you have read the submission guidelines before posting an PR --> <!-- https://github.com/nrwl/nx/blob/master/CONTRIBUTING.md#-submitting-a-pr --> <!-- Please make sure that your commit message follows our format --> <!-- Example: `fix(nx): must begin with lowercase` --> <!-- If this is a particularly complex change or feature addition, you can request a dedicated Nx release for this pull request branch. Mention someone from the Nx team or the `@nrwl/nx-pipelines-reviewers` and they will confirm if the PR warrants its own release for testing purposes, and generate it for you if appropriate. --> ## Current Behavior <!-- This is the behavior we have today --> ## Expected Behavior <!-- This is the behavior we should expect with the changes in this PR --> ## Related Issue(s) <!-- Please link the issue being fixed so it gets closed when this is merged. --> Fixes #
This commit is contained in:
parent
acb19a6439
commit
d714099501
31
packages/nx/src/native/cache/cache.rs
vendored
31
packages/nx/src/native/cache/cache.rs
vendored
@ -48,7 +48,7 @@ impl NxCache {
|
|||||||
workspace_root: PathBuf::from(workspace_root),
|
workspace_root: PathBuf::from(workspace_root),
|
||||||
cache_directory: cache_path.to_normalized_string(),
|
cache_directory: cache_path.to_normalized_string(),
|
||||||
cache_path,
|
cache_path,
|
||||||
link_task_details: link_task_details.unwrap_or(true)
|
link_task_details: link_task_details.unwrap_or(true),
|
||||||
};
|
};
|
||||||
|
|
||||||
r.setup()?;
|
r.setup()?;
|
||||||
@ -80,11 +80,7 @@ impl NxCache {
|
|||||||
"
|
"
|
||||||
};
|
};
|
||||||
|
|
||||||
self.db
|
self.db.execute_batch(query).map_err(anyhow::Error::from)
|
||||||
.execute_batch(
|
|
||||||
query,
|
|
||||||
)
|
|
||||||
.map_err(anyhow::Error::from)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[napi]
|
#[napi]
|
||||||
@ -144,7 +140,7 @@ impl NxCache {
|
|||||||
create_dir_all(&task_dir)?;
|
create_dir_all(&task_dir)?;
|
||||||
|
|
||||||
// Write the terminal outputs into a file
|
// Write the terminal outputs into a file
|
||||||
let task_outputs_path: _ = self.get_task_outputs_path_internal(&hash);
|
let task_outputs_path = self.get_task_outputs_path_internal(&hash);
|
||||||
trace!("Writing terminal outputs to: {:?}", &task_outputs_path);
|
trace!("Writing terminal outputs to: {:?}", &task_outputs_path);
|
||||||
write(task_outputs_path, terminal_output)?;
|
write(task_outputs_path, terminal_output)?;
|
||||||
|
|
||||||
@ -192,9 +188,7 @@ impl NxCache {
|
|||||||
|
|
||||||
fn record_to_cache(&self, hash: String, code: i16) -> anyhow::Result<()> {
|
fn record_to_cache(&self, hash: String, code: i16) -> anyhow::Result<()> {
|
||||||
self.db.execute(
|
self.db.execute(
|
||||||
"INSERT INTO cache_outputs
|
"INSERT OR REPLACE INTO cache_outputs (hash, code) VALUES (?1, ?2)",
|
||||||
(hash, code)
|
|
||||||
VALUES (?1, ?2)",
|
|
||||||
params![hash, code],
|
params![hash, code],
|
||||||
)?;
|
)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -258,19 +252,16 @@ impl NxCache {
|
|||||||
// Checks that the number of cache records in the database
|
// Checks that the number of cache records in the database
|
||||||
// matches the number of cache directories on the filesystem.
|
// matches the number of cache directories on the filesystem.
|
||||||
// If they don't match, it means that the cache is out of sync.
|
// If they don't match, it means that the cache is out of sync.
|
||||||
let cache_records_exist = self.db.query_row(
|
let cache_records_exist =
|
||||||
"SELECT EXISTS (SELECT 1 FROM cache_outputs)",
|
self.db
|
||||||
[],
|
.query_row("SELECT EXISTS (SELECT 1 FROM cache_outputs)", [], |row| {
|
||||||
|row| {
|
let exists: bool = row.get(0)?;
|
||||||
let exists: bool = row.get(0)?;
|
Ok(exists)
|
||||||
Ok(exists)
|
})?;
|
||||||
},
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if !cache_records_exist {
|
if !cache_records_exist {
|
||||||
let hash_regex = Regex::new(r"^\d+$").expect("Hash regex is invalid");
|
let hash_regex = Regex::new(r"^\d+$").expect("Hash regex is invalid");
|
||||||
let fs_entries = std::fs::read_dir(&self.cache_path)
|
let fs_entries = std::fs::read_dir(&self.cache_path).map_err(anyhow::Error::from)?;
|
||||||
.map_err(anyhow::Error::from)?;
|
|
||||||
|
|
||||||
for entry in fs_entries {
|
for entry in fs_entries {
|
||||||
let entry = entry?;
|
let entry = entry?;
|
||||||
|
|||||||
@ -59,4 +59,9 @@ describe('Cache', () => {
|
|||||||
'output contents 123'
|
'output contents 123'
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
it('should handle storing hashes that already exist in the cache', async () => {
|
||||||
|
cache.put('123', 'output 123', ['dist'], 0);
|
||||||
|
expect(() => cache.put('123', 'output 123', ['dist'], 0)).not.toThrow();
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user