diff --git a/src/aspire/commands/cov3d.py b/src/aspire/commands/cov3d.py index 3a0752740..61fca0696 100644 --- a/src/aspire/commands/cov3d.py +++ b/src/aspire/commands/cov3d.py @@ -52,10 +52,10 @@ def cov3d( source = source.whiten() basis = FBBasis3D((max_resolution, max_resolution, max_resolution)) - mean_estimator = MeanEstimator(source, basis, batch_size=8192) + mean_estimator = MeanEstimator(source, basis, batch_size=512) mean_est = mean_estimator.estimate() - noise_estimator = WhiteNoiseEstimator(source, batch_size=500) + noise_estimator = WhiteNoiseEstimator(source, batch_size=512) # Estimate the noise variance. This is needed for the covariance estimation step below. noise_variance = noise_estimator.estimate() logger.info(f"Noise Variance = {noise_variance}") diff --git a/src/aspire/covariance/covar2d.py b/src/aspire/covariance/covar2d.py index 30106909a..d0bb73b57 100644 --- a/src/aspire/covariance/covar2d.py +++ b/src/aspire/covariance/covar2d.py @@ -513,10 +513,14 @@ class BatchedRotCov2D(RotCov2D): be extracted. :param basis: The `FBBasis2D` object used to decompose the images. By default, this is set to `FFBBasis2D((src.L, src.L))`. - :param batch_size: The number of images to process at a time (default 8192). + :param batch_size: The number of images to process at a time (default 512). + 512 is a good starting point for large images with a GPU where + memory is a concern. If the GPU runs out of memory, try + scaling down `batch_size`. For hi-memory CPU applications, + scaling up to a larger value such as 8192 may yield better performance. """ - def __init__(self, src, basis=None, batch_size=8192): + def __init__(self, src, basis=None, batch_size=512): self.src = src self.basis = basis self.batch_size = batch_size