Skip to content

Commit

Permalink
Merge branch 'development' into timecourse_regression
Browse files Browse the repository at this point in the history
  • Loading branch information
namsaraeva committed Apr 22, 2024
2 parents 1d58a0c + b716a47 commit 312d6f8
Show file tree
Hide file tree
Showing 25 changed files with 74 additions and 180 deletions.
18 changes: 18 additions & 0 deletions example_data/example_2/config_example2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,24 @@ CytosolSegmentationCellpose:
model: "cyto2"
chunk_size: 50
filtering_threshold: 0.95
ShardedCytosolSegmentationCellpose:
input_channels: 2
output_masks: 2
shard_size: 120000000 # maxmimum number of pixel per tile
overlap_px: 100
nGPUs: 1
chunk_size: 50 # chunk size for chunked HDF5 storage. is needed for correct caching and high performance reading. should be left at 50.
threads: 2 # number of shards / tiles segmented at the same size. should be adapted to the maximum amount allowed by memory.
cache: "."
lower_quantile_normalization: 0.001
upper_quantile_normalization: 0.999
median_filter_size: 6 # Size in pixels
nucleus_segmentation:
model: "nuclei"
cytosol_segmentation:
model: "cyto2"
chunk_size: 50
filtering_threshold: 0.95
HDF5CellExtraction:
compression: True
threads: 80 # threads used in multithreading
Expand Down
43 changes: 43 additions & 0 deletions example_data/example_5/config_example5.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
---
name: "Example Timecourse Project"
input_channels: 3
output_channels: 5
Cytosol_Cellpose_TimecourseSegmentation:
input_channels: 3
output_masks: 2
shard_size: 4000000 # Average number of pixel per tile. 10.000 * 10.000 pixel are recommended. Can be adapted to memory and computation needs.
chunk_size: 50 # chunk size for chunked HDF5 storage. is needed for correct caching and high performance reading. should be left at 50.
cache: "."
lower_quantile_normalization: 0.001
upper_quantile_normalization: 0.999
median_filter_size: 4 # Size in pixels
nucleus_segmentation:
model: "nuclei"
cytosol_segmentation:
model: "cyto2"
chunk_size: 50
filtering_threshold: 0.95
Multithreaded_Cytosol_Cellpose_TimecourseSegmentation:
input_channels: 3
output_masks: 2
shard_size: 4000000 # Average number of pixel per tile. 10.000 * 10.000 pixel are recommended. Can be adapted to memory and computation needs.
chunk_size: 50 # chunk size for chunked HDF5 storage. is needed for correct caching and high performance reading. should be left at 50.
threads: 5 # number of shards / tiles segmented at the same size. should be adapted to the maximum amount allowed by memory.
cache: "."
lower_quantile_normalization: 0.001
upper_quantile_normalization: 0.999
median_filter_size: 4 # Size in pixels
nucleus_segmentation:
model: "nuclei"
cytosol_segmentation:
model: "cyto2"
chunk_size: 50
filtering_threshold: 0.95
TimecourseHDF5CellExtraction:
compression: True
threads: 80 # threads used in multithreading
image_size: 128 # image size in pixel
cache: "."
hdf5_rdcc_nbytes: 5242880000 # 5gb 1024 * 1024 * 5000
hdf5_rdcc_w0: 1
hdf5_rdcc_nslots: 50000
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
3 changes: 3 additions & 0 deletions example_data/example_5/plate_layout.tsv
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Well Genotype Treatment
Row02_Well02 Genotype1 Unstimulated
Row02_Well04 Genotype2 Stimulated
92 changes: 0 additions & 92 deletions example_data/example_config_files/config_screen 2.yml

This file was deleted.

74 changes: 0 additions & 74 deletions example_data/example_config_files/config_training 2.yml

This file was deleted.

2 changes: 1 addition & 1 deletion src/sparcscore/ml/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,4 +102,4 @@ def __init__(self, channels=[0,1,2,3,4], num_channels=5):

def __call__(self, tensor):
return tensor[self.channels,:,:]


22 changes: 9 additions & 13 deletions src/sparcscore/pipeline/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -802,7 +802,10 @@ def resolve_sharding(self, sharding_plan):
shutil.rmtree(self.shard_directory, ignore_errors=True)

gc.collect()


def initializer_function(self, gpu_id_list):
current_process().gpu_id_list = gpu_id_list

def process(self, input_image):
self.save_zarr = False
self.save_input_image(input_image)
Expand Down Expand Up @@ -866,13 +869,10 @@ def process(self, input_image):
for gpu_ids in range(available_GPUs):
for _ in range(processes_per_GPU):
gpu_id_list.append(gpu_ids)

def initializer_function(gpu_id_list):
current_process().gpu_id_list = gpu_id_list

self.log(f"Beginning segmentation on {available_GPUs} GPUs.")

with Pool(processes=n_processes, initializer=initializer_function, initargs=[gpu_id_list]) as pool:
with Pool(processes=n_processes, initializer=self.initializer_function, initargs=[gpu_id_list]) as pool:
results = list(
tqdm(
pool.imap(self.method.call_as_shard, shard_list),
Expand Down Expand Up @@ -983,13 +983,10 @@ def complete_segmentation(self, input_image):
for gpu_ids in range(available_GPUs):
for _ in range(processes_per_GPU):
gpu_id_list.append(gpu_ids)

def initializer_function(gpu_id_list):
current_process().gpu_id_list = gpu_id_list

self.log(f"Beginning segmentation on {available_GPUs}.")

with Pool(processes=n_processes, initializer=initializer_function, initargs=[gpu_id_list]) as pool:
with Pool(processes=n_processes, initializer=self.initializer_function, initargs=[gpu_id_list]) as pool:
results = list(
tqdm(
pool.imap(self.method.call_as_shard, shard_list),
Expand Down Expand Up @@ -1323,6 +1320,8 @@ def __init__(self, *args, **kwargs):
raise AttributeError(
"No Segmentation method defined, please set attribute ``method``"
)
def initializer_function(self, gpu_id_list):
current_process().gpu_id_list = gpu_id_list

def process(self):
# global _tmp_seg
Expand Down Expand Up @@ -1374,13 +1373,10 @@ def process(self):
for gpu_ids in range(available_GPUs):
for _ in range(processes_per_GPU):
gpu_id_list.append(gpu_ids)

def initializer_function(gpu_id_list):
current_process().gpu_id_list = gpu_id_list

self.log(f"Beginning segmentation on {available_GPUs}.")

with Pool(processes=n_processes, initializer=initializer_function, initargs=[gpu_id_list]) as pool:
with Pool(processes=n_processes, initializer=self.initializer_function, initargs=[gpu_id_list]) as pool:
results = list(
tqdm(
pool.imap(self.method.call_as_shard, segmentation_list),
Expand Down

0 comments on commit 312d6f8

Please sign in to comment.