Skip to content

Commit e49758e

Browse files
authored
Merge pull request #931 from ComputationalCryoEM/workaround_patches
Workaround patches
2 parents 249164b + e64b81c commit e49758e

File tree

5 files changed

+13
-3
lines changed

5 files changed

+13
-3
lines changed

.github/workflows/workflow.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,14 +64,16 @@ jobs:
6464
shell: bash -el {0}
6565
strategy:
6666
matrix:
67-
os: [ubuntu-latest, ubuntu-20.04, macOS-latest, macOS-11, windows-2019]
67+
os: [ubuntu-latest, ubuntu-20.04, macOS-latest, macOS-11]
6868
backend: [default, openblas]
6969
python-version: ['3.8']
7070
include:
7171
- os: ubuntu-latest
7272
backend: intel
7373
- os: macOS-latest
7474
backend: accelerate
75+
- os: windows-2019
76+
backend: default
7577

7678
steps:
7779
- uses: actions/checkout@v3

gallery/experiments/experimental_abinitio_pipeline_10028.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,9 @@
9898
# logger.info("Invert the global density contrast")
9999
# src = src.invert_contrast()
100100

101+
# Caching is used for speeding up large datasets on high memory machines.
102+
src = src.cache()
103+
101104
# %%
102105
# Optional: CWF Denoising
103106
# -----------------------
@@ -119,6 +122,8 @@
119122
cwf_denoiser = DenoiserCov2D(src)
120123
# Use denoised src for classification
121124
classification_src = cwf_denoiser.denoise()
125+
# Cache for speedup. Avoids recomputing.
126+
classification_src = classification_src.cache()
122127
# Peek, what do the denoised images look like...
123128
if interactive:
124129
classification_src.images[:10].show()

gallery/experiments/experimental_abinitio_pipeline_10081.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,9 @@
7575
aiso_noise_estimator = AnisotropicNoiseEstimator(src)
7676
src.whiten(aiso_noise_estimator.filter)
7777

78+
# Caching is used for speeding up large datasets on high memory machines.
79+
src = src.cache()
80+
7881
# %%
7982
# Class Averaging
8083
# ----------------------

src/aspire/source/coordinates.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ def _extract_ctf(self, data_block):
366366

367367
# get unique ctfs from the data block
368368
# i'th entry of `indices` contains the index of `filter_params` with corresponding CTF params
369-
ctf_data = np.stack(data_block[c] for c in CTF_params).astype(self.dtype).T
369+
ctf_data = np.stack([data_block[c] for c in CTF_params]).astype(self.dtype).T
370370
filter_params, indices = np.unique(
371371
ctf_data,
372372
return_inverse=True,

src/aspire/source/relion.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def __init__(
107107
# If these all exist in the STAR file, we may create CTF filters for the source
108108
if set(CTF_params).issubset(metadata.keys()):
109109
# partition particles according to unique CTF parameters
110-
ctf_data = np.stack(metadata[k] for k in CTF_params).T
110+
ctf_data = np.stack([metadata[k] for k in CTF_params]).T
111111
filter_params, filter_indices = np.unique(
112112
ctf_data,
113113
return_inverse=True,

0 commit comments

Comments
 (0)