Skip to content

Commit

Permalink
Merge pull request #979 from flatironinstitute/dev
Browse files Browse the repository at this point in the history
Merge dev to master for release
  • Loading branch information
pgunn authored May 25, 2022
2 parents 428d2b2 + a27587c commit a8e7127
Show file tree
Hide file tree
Showing 12 changed files with 48 additions and 27 deletions.
9 changes: 9 additions & 0 deletions CHANGELOG.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,14 @@
Please check this file after new releases; it will cover (only) major changes or changes that will impact how CaImAn runs, and will not generally cover new features or minor changes (see the version history on Github for that). Most recent changes are at the top.

1.9.10
------
Python 3.8-3.10 are supported versions of Caiman. 3.7 is no longer supported.

1.9.9
-----
Dependencies have updated; internal hdf5 file semantics use is changed, and both tensorflow and h5py
use now is compatible with modern versions of those libraries (tensorflow 2.4.x+ is now required)

1.9.0
-----
This implements a storage layer that, if enabled, will try not to write files
Expand Down
8 changes: 6 additions & 2 deletions caiman/base/movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -1657,9 +1657,13 @@ def rgb2gray(rgb):
return movie(**f).astype(outtype)

elif extension in ('.hdf5', '.h5', '.nwb'):
# TODO: Merge logic here with utilities.py:get_file_size()
with h5py.File(file_name, "r") as f:
fkeys = list(f.keys())
if len(fkeys) == 1: # If the hdf5 file we're parsing has only one dataset inside it, ignore the arg and pick that dataset
ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. Sync with utils.py:get_file_size() !!
fkeys = list(filter(lambda x: x not in ignore_keys, f.keys()))
if len(fkeys) == 1 and 'Dataset' in str(type(f[fkeys[0]])): # If the hdf5 file we're parsing has only one dataset inside it,
# ignore the arg and pick that dataset
# TODO: Consider recursing into a group to find a dataset
var_name_hdf5 = fkeys[0]

if extension == '.nwb': # Apparently nwb files are specially-formatted hdf5 files
Expand Down
4 changes: 2 additions & 2 deletions caiman/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,9 +223,9 @@ def start_server(slurm_script: str = None, ipcluster: str = "ipcluster", ncpus:
if slurm_script is None:

if ipcluster == "ipcluster":
subprocess.Popen("ipcluster start -n {0}".format(ncpus), shell=True, close_fds=(os.name != 'nt'))
subprocess.Popen(f"ipcluster start -n {ncpus}", shell=True, close_fds=(os.name != 'nt'))
else:
subprocess.Popen(shlex.split("{0} start -n {1}".format(ipcluster, ncpus)),
subprocess.Popen(shlex.split(f"{ipcluster} start -n {ncpus}"),
shell=True,
close_fds=(os.name != 'nt'))
time.sleep(1.5)
Expand Down
4 changes: 2 additions & 2 deletions caiman/components_evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ def evaluate_components_CNN(A,
model_file = model_name + ".json"
model_weights = model_name + ".h5"
else:
raise FileNotFoundError("File for requested model {} not found".format(model_name))
raise FileNotFoundError(f"File for requested model {model_name} not found")
with open(model_file, 'r') as json_file:
print('USING MODEL:' + model_file)
loaded_model_json = json_file.read()
Expand All @@ -313,7 +313,7 @@ def evaluate_components_CNN(A,
elif os.path.isfile(model_name + ".h5.pb"):
model_file = model_name + ".h5.pb"
else:
raise FileNotFoundError("File for requested model {} not found".format(model_name))
raise FileNotFoundError(f"File for requested model {model_name} not found")
loaded_model = load_graph(model_file)

logging.debug("Loaded model from disk")
Expand Down
14 changes: 8 additions & 6 deletions caiman/motion_correction.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,8 +438,8 @@ def apply_shifts_movie(self, fname, rigid_shifts:bool=None, save_memmap:bool=Fal
if rigid_shifts is not None:
logging.warning('The rigid_shifts flag is deprecated and it is ' +
'being ignored. The value is read directly from' +
' mc.pw_rigid and is current set to the opposite' +
' of {}'.format(self.pw_rigid))
' mc.pw_rigid and is currently set to the opposite' +
f' of {self.pw_rigid}')

if self.pw_rigid is False:
if self.is3D:
Expand Down Expand Up @@ -3097,7 +3097,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0
dims, T = cm.source_extraction.cnmf.utilities.get_file_size(fname, var_name_hdf5=var_name_hdf5)
z = np.zeros(dims)
dims = z[indices].shape
logging.debug('Number of Splits: {}'.format(splits))
logging.debug(f'Number of Splits: {splits}')
if isinstance(splits, int):
if subidx is None:
rng = range(T)
Expand All @@ -3110,7 +3110,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0
idxs = splits
save_movie = False
if template is None:
raise Exception('Not implemented')
raise Exception('motion_correction_piecewise(): Templateless not implemented')

shape_mov = (np.prod(dims), T)
# if is3D:
Expand All @@ -3125,6 +3125,8 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0
if save_movie:
if base_name is None:
base_name = os.path.split(fname)[1][:-4]
base_name = caiman.paths.fn_relocated(base_name)

fname_tot:Optional[str] = caiman.paths.memmap_frames_filename(base_name, dims, T, order)
if isinstance(fname, tuple):
fname_tot = os.path.join(os.path.split(fname[0])[0], fname_tot)
Expand All @@ -3133,13 +3135,13 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0

np.memmap(fname_tot, mode='w+', dtype=np.float32,
shape=prepare_shape(shape_mov), order=order)
logging.info('Saving file as {}'.format(fname_tot))
logging.info(f'Saving file as {fname_tot}')
else:
fname_tot = None

pars = []
for idx in idxs:
logging.debug('Processing: frames: {}'.format(idx))
logging.debug(f'Processing: frames: {idx}')
pars.append([fname, fname_tot, idx, shape_mov, template, strides, overlaps, max_shifts, np.array(
add_to_movie, dtype=np.float32), max_deviation_rigid, upsample_factor_grid,
newoverlaps, newstrides, shifts_opencv, nonneg_movie, gSig_filt, is_fiji,
Expand Down
16 changes: 8 additions & 8 deletions caiman/source_extraction/cnmf/estimates.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ def nb_view_components(self, Yr=None, img=None, idx=None,

plt.ion()
nr, T = self.C.shape
if self.R is None:
if self.R is None or self.R == b'NoneType':
self.R = self.YrA
if self.R.shape != [nr, T]:
if self.YrA is None:
Expand Down Expand Up @@ -433,7 +433,7 @@ def hv_view_components(self, Yr=None, img=None, idx=None,

plt.ion()
nr, T = self.C.shape
if self.R is None:
if self.R is None or self.R == b'NoneType':
self.R = self.YrA
if self.R.shape != [nr, T]:
if self.YrA is None:
Expand Down Expand Up @@ -500,7 +500,7 @@ def nb_view_components_3d(self, Yr=None, image_type='mean', dims=None,
dims = self.dims
plt.ion()
nr, T = self.C.shape
if self.R is None:
if self.R is None or self.R == b'NoneType':
self.R = self.YrA
if self.R.shape != [nr, T]:
if self.YrA is None:
Expand Down Expand Up @@ -1421,7 +1421,7 @@ def remove_small_large_neurons(self, min_size_neuro, max_size_neuro,
indeces of components with size within the acceptable range
'''
if self.A_thr is None:
raise Exception('You need to compute thresolded components before calling remove_duplicates: use the threshold_components method')
raise Exception('You need to compute thresholded components before calling remove_duplicates: use the threshold_components method')

A_gt_thr_bin = self.A_thr.toarray() > 0
size_neurons_gt = A_gt_thr_bin.sum(0)
Expand Down Expand Up @@ -1451,7 +1451,7 @@ def remove_duplicates(self, predictions=None, r_values=None, dist_thr=0.1,
plot_duplicates
'''
if self.A_thr is None:
raise Exception('You need to compute thresolded components before calling remove_duplicates: use the threshold_components method')
raise Exception('You need to compute thresholded components before calling remove_duplicates: use the threshold_components method')

A_gt_thr_bin = (self.A_thr.toarray() > 0).reshape([self.dims[0], self.dims[1], -1], order='F').transpose([2, 0, 1]) * 1.

Expand Down Expand Up @@ -1492,7 +1492,7 @@ def masks_2_neurofinder(self, dataset_name):
"""
if self.A_thr is None:
raise Exception(
'You need to compute thresolded components before calling this method: use the threshold_components method')
'You need to compute thresholded components before calling this method: use the threshold_components method')
bin_masks = self.A_thr.reshape([self.dims[0], self.dims[1], -1], order='F').transpose([2, 0, 1])
return nf_masks_to_neurof_dict(bin_masks, dataset_name)

Expand Down Expand Up @@ -1711,10 +1711,10 @@ def compare_components(estimate_gt, estimate_cmp, Cn=None, thresh_cost=.8, min_
labels=['GT', 'CMP'], plot_results=False):
if estimate_gt.A_thr is None:
raise Exception(
'You need to compute thresolded components for first argument before calling remove_duplicates: use the threshold_components method')
'You need to compute thresholded components for first argument before calling remove_duplicates: use the threshold_components method')
if estimate_cmp.A_thr is None:
raise Exception(
'You need to compute thresolded components for second argument before calling remove_duplicates: use the threshold_components method')
'You need to compute thresholded components for second argument before calling remove_duplicates: use the threshold_components method')

if plot_results:
plt.figure(figsize=(20, 10))
Expand Down
2 changes: 1 addition & 1 deletion caiman/source_extraction/cnmf/params.py
Original file line number Diff line number Diff line change
Expand Up @@ -1007,7 +1007,7 @@ def get_group(self, group):

def __eq__(self, other):

if not instance(other, CNMFParams):
if not isinstance(other, CNMFParams):
return False

parent_dict1 = self.to_dict()
Expand Down
2 changes: 1 addition & 1 deletion caiman/source_extraction/cnmf/spatial.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@ def construct_ellipse_parallel(pars):
return np.sqrt(np.sum([old_div((dist_cm * V[:, k]) ** 2, dkk[k]) for k in range(len(dkk))], 0)) <= dist

def threshold_components(A, dims, medw=None, thr_method='max', maxthr=0.1, nrgthr=0.9999, extract_cc=True,
se=None, ss=None, dview=None):
se=None, ss=None, dview=None) -> np.ndarray:
"""
Post-processing of spatial components which includes the following steps
Expand Down
5 changes: 3 additions & 2 deletions caiman/source_extraction/cnmf/utilities.py
Original file line number Diff line number Diff line change
Expand Up @@ -1014,8 +1014,9 @@ def get_file_size(file_name, var_name_hdf5='mov'):
# FIXME this doesn't match the logic in movies.py:load()
# Consider pulling a lot of the "data source" code out into one place
with h5py.File(file_name, "r") as f:
kk = list(f.keys())
if len(kk) == 1:
ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. Sync with movies.my:load() !!
kk = list(filter(lambda x: x not in ignore_keys, f.keys()))
if len(kk) == 1 and 'Dataset' in str(type(f[kk[0]])): # TODO: Consider recursing into a group to find a dataset
siz = f[kk[0]].shape
elif var_name_hdf5 in f:
if extension == '.nwb':
Expand Down
7 changes: 6 additions & 1 deletion caiman/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,6 +544,10 @@ def recursively_load_dict_contents_from_group(h5file:h5py.File, path:str) -> Dic
Starting with Caiman 1.9.9 we started saving strings as attributes rather than independent datasets,
which gets us a better syntax and less damage to the strings, at the cost of scanning properly for them
being a little more involved. In future versions of Caiman we may store all scalars as attributes.
There's some special casing here that should be solved in a more general way; anything serialised into
hdf5 and then deserialised should probably go back through the class constructor, and revalidated
so all the fields end up with appropriate data types.
'''

ans:Dict = {}
Expand All @@ -560,7 +564,6 @@ def recursively_load_dict_contents_from_group(h5file:h5py.File, path:str) -> Dic
ans[key] = item[()]

elif key in ['dims', 'medw', 'sigma_smooth_snmf', 'dxy', 'max_shifts', 'strides', 'overlaps']:

if isinstance(item[()], np.ndarray):
ans[key] = tuple(item[()])
else:
Expand All @@ -570,6 +573,8 @@ def recursively_load_dict_contents_from_group(h5file:h5py.File, path:str) -> Dic
ans[key] = bool(item[()])
else:
ans[key] = item[()]
if isinstance(ans[key], bytes) and ans[key] == b'NoneType':
ans[key] = None

elif isinstance(item, h5py._hl.group.Group):
if key in ('A', 'W', 'Ab', 'downscale_matrix', 'upscale_matrix'):
Expand Down
2 changes: 1 addition & 1 deletion environment-minimal.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
channels:
- conda-forge
dependencies:
- python >=3.9
- python >=3.10
- cython
- future
- h5py
Expand Down
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
channels:
- conda-forge
dependencies:
- python >=3.9
- python >=3.10
- bokeh
- coverage
- cython
Expand Down

0 comments on commit a8e7127

Please sign in to comment.