Skip to content
This repository has been archived by the owner on Feb 11, 2023. It is now read-only.

Commit

Permalink
Refactor: more deepsource suggestions (#62)
Browse files Browse the repository at this point in the history
* Unnecessary comprehension
* Consider using a set comprehension
* Unnecessary `elif` / `else` block after `continue`
* Useless `return` detected
* Assigning result of a function call, where the function returns `None`
* Function contains unused argument
* Unused variable found
* Use `items()` to iterate over a dictionary
* Re-defined variable from outer scope
* Consider using `$HOME` instead of tilde `~` to expand in quotes
* Use double quote to prevent globbing and word splitting
* Unnecessary `delete` statement in a local scope
* Unnecessary `None` provided as default
* Chained comparison detected

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
  • Loading branch information
Borda and pre-commit-ci[bot] authored Jul 15, 2021
1 parent 4013b29 commit 15ecc5c
Show file tree
Hide file tree
Showing 26 changed files with 99 additions and 114 deletions.
54 changes: 26 additions & 28 deletions birl/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@
create_basic_parser,
exec_commands,
Experiment,
get_nb_workers,
iterate_mproc_map,
nb_workers,
parse_arg_params,
string_dict,
)
Expand Down Expand Up @@ -93,7 +93,6 @@ class ImRegBenchmark(Experiment):
>>> benchmark = ImRegBenchmark(params)
>>> benchmark.run()
True
>>> del benchmark
>>> shutil.rmtree(path_out, ignore_errors=True)
>>> # Running in multiple parallel threads:
Expand All @@ -108,14 +107,13 @@ class ImRegBenchmark(Experiment):
>>> benchmark = ImRegBenchmark(params)
>>> benchmark.run()
True
>>> del benchmark
>>> shutil.rmtree(path_out, ignore_errors=True)
"""

#: timeout for executing single image registration, NOTE: does not work for Py2
EXECUTE_TIMEOUT = 60 * 60 # default = 1 hour
#: default number of threads used by benchmarks
NB_WORKERS_USED = nb_workers(0.8)
NB_WORKERS_USED = get_nb_workers(0.8)
#: some needed files
NAME_CSV_REGISTRATION_PAIRS = 'registration-results.csv'
#: default file for exporting results in table format
Expand Down Expand Up @@ -190,10 +188,10 @@ def __init__(self, params):
logging.info(self.__doc__)
self._df_overview = None
self._df_experiments = None
self.nb_workers = params.get('nb_workers', nb_workers(0.25))
self.nb_workers = params.get('nb_workers', get_nb_workers(0.25))
self._path_csv_regist = os.path.join(self.params['path_exp'], self.NAME_CSV_REGISTRATION_PAIRS)

def _absolute_path(self, path, destination='data', base_path=None):
def _absolute_path(self, path, destination='data', base_path=''):
""" update te path to the dataset or output
:param str path: original path
Expand All @@ -206,6 +204,8 @@ def _absolute_path(self, path, destination='data', base_path=None):
path = os.path.join(self.params['path_dataset'], path)
elif destination and destination == 'expt' and 'path_exp' in self.params:
path = os.path.join(self.params['path_exp'], path)
elif base_path:
path = os.path.join(base_path, path)
path = update_path(path, absolute=True)
return path

Expand Down Expand Up @@ -260,7 +260,7 @@ def _get_paths(self, item, prefer_pproc=True):
"""

def __path_img(col):
is_temp = isinstance(item.get(col + self.COL_IMAGE_EXT_TEMP, None), str)
is_temp = isinstance(item.get(col + self.COL_IMAGE_EXT_TEMP), str)
if prefer_pproc and is_temp:
path = self._absolute_path(item[col + self.COL_IMAGE_EXT_TEMP], destination='expt')
else:
Expand Down Expand Up @@ -375,7 +375,7 @@ def __path_img(path_img, pproc):

def __save_img(col, path_img_new, img):
col_temp = col + self.COL_IMAGE_EXT_TEMP
if isinstance(item.get(col_temp, None), str):
if isinstance(item.get(col_temp), str):
path_img = self._absolute_path(item[col_temp], destination='expt')
os.remove(path_img)
save_image(path_img_new, img)
Expand Down Expand Up @@ -422,12 +422,12 @@ def __remove_pproc_images(self, item):
for col_in, col_warp in [(self.COL_IMAGE_REF, self.COL_IMAGE_REF_WARP),
(self.COL_IMAGE_MOVE, self.COL_IMAGE_MOVE_WARP)]:
col_temp = col_in + self.COL_IMAGE_EXT_TEMP
is_temp = isinstance(item.get(col_temp, None), str)
is_temp = isinstance(item.get(col_temp), str)
# skip if the field is empty
if not is_temp:
continue
# the warped image is not the same as pre-process image is equal
elif item.get(col_warp, None) != item.get(col_temp, None):
if item.get(col_warp) != item.get(col_temp):
# update the path to the pre-process image in experiment folder
path_img = self._absolute_path(item[col_temp], destination='expt')
# remove image and from the field
Expand All @@ -448,7 +448,7 @@ def _perform_registration(self, df_row):
path_dir_reg = self._get_path_reg_dir(row)
# check whether the particular experiment already exists and have result
if self.__check_exist_regist(idx, path_dir_reg):
return None
return
create_folder(path_dir_reg)

time_start = time.time()
Expand All @@ -458,14 +458,14 @@ def _perform_registration(self, df_row):
row = self._prepare_img_registration(row)
# if the pre-processing failed, return back None
if not row:
return None
return

# measure execution time
time_start = time.time()
row = self._execute_img_registration(row)
# if the experiment failed, return back None
if not row:
return None
return
# compute the registration time in minutes
row[self.COL_TIME] = (time.time() - time_start) / 60.
# remove some temporary images
Expand All @@ -474,15 +474,15 @@ def _perform_registration(self, df_row):
row = self._parse_regist_results(row)
# if the post-processing failed, return back None
if not row:
return None
return
row = self._clear_after_registration(row)

if self.params.get('visual', False):
logging.debug('-> visualise results of experiment: %r', idx)
self.visualise_registration(
(idx, row),
path_dataset=self.params.get('path_dataset', None),
path_experiment=self.params.get('path_exp', None),
path_dataset=self.params.get('path_dataset'),
path_experiment=self.params.get('path_exp'),
)

return row
Expand All @@ -494,8 +494,8 @@ def _evaluate(self):
_compute_landmarks_statistic = partial(
self.compute_registration_statistic,
df_experiments=self._df_experiments,
path_dataset=self.params.get('path_dataset', None),
path_experiment=self.params.get('path_exp', None),
path_dataset=self.params.get('path_dataset'),
path_experiment=self.params.get('path_exp'),
)
self.__execute_method(_compute_landmarks_statistic, self._df_experiments, desc='compute TRE', nb_workers=1)

Expand Down Expand Up @@ -588,7 +588,7 @@ def _extract_execution_time(self, item):
:return float|None: time in minutes
"""
_ = self._get_path_reg_dir(item)
return None
return 0

def _parse_regist_results(self, item):
""" evaluate rests of the experiment and identity the registered image
Expand Down Expand Up @@ -659,7 +659,7 @@ def _image_diag(cls, item, path_img_ref=None):
:param str path_img_ref: optional path to the reference image
:return float|None: image diagonal
"""
img_diag = dict(item).get(cls.COL_IMAGE_DIAGONAL, None)
img_diag = dict(item).get(cls.COL_IMAGE_DIAGONAL)
if not img_diag and path_img_ref and os.path.isfile(path_img_ref):
_, img_diag = image_sizes(path_img_ref)
return img_diag
Expand Down Expand Up @@ -703,7 +703,7 @@ def compute_registration_statistic(
)

# define what is the target and init state according to the experiment results
use_move_warp = isinstance(row.get(cls.COL_POINTS_MOVE_WARP, None), str)
use_move_warp = isinstance(row.get(cls.COL_POINTS_MOVE_WARP), str)
if use_move_warp:
points_init, points_target = points_move, points_ref
col_source, col_target = cls.COL_POINTS_MOVE, cls.COL_POINTS_REF
Expand Down Expand Up @@ -811,7 +811,7 @@ def _load_warped_image(cls, item, path_experiment=None):
:param str|None path_experiment: path to the experiment folder
:return ndarray:
"""
name_img = item.get(cls.COL_IMAGE_MOVE_WARP, None)
name_img = item.get(cls.COL_IMAGE_MOVE_WARP)
if not isinstance(name_img, str):
logging.warning('Missing registered image in "%s"', cls.COL_IMAGE_MOVE_WARP)
image_warp = None
Expand All @@ -834,7 +834,7 @@ def _visual_image_move_warp_lnds_move_warp(cls, item, path_dataset=None, path_ex
:param str|None path_experiment: path to the experiment folder
:return obj|None:
"""
if not isinstance(item.get(cls.COL_POINTS_MOVE_WARP, None), str):
if not isinstance(item.get(cls.COL_POINTS_MOVE_WARP), str):
raise ValueError('Missing registered points in "%s"' % cls.COL_POINTS_MOVE_WARP)
path_points_warp = update_path(item[cls.COL_POINTS_MOVE_WARP], pre_path=path_experiment)
if not os.path.isfile(path_points_warp):
Expand All @@ -856,7 +856,6 @@ def _visual_image_move_warp_lnds_move_warp(cls, item, path_dataset=None, path_ex
# visualise the landmarks move during registration
image_ref = load_image(path_img_ref)
fig = draw_images_warped_landmarks(image_ref, image_warp, points_move, points_ref, points_warp)
del image_ref, image_warp
return fig

@classmethod
Expand All @@ -868,7 +867,7 @@ def _visual_image_move_warp_lnds_ref_warp(cls, item, path_dataset=None, path_exp
:param str|None path_experiment: path to the experiment folder
:return obj|None:
"""
if not isinstance(item.get(cls.COL_POINTS_REF_WARP, None), str):
if not isinstance(item.get(cls.COL_POINTS_REF_WARP), str):
raise ValueError('Missing registered points in "%s"' % cls.COL_POINTS_REF_WARP)
path_points_warp = update_path(item[cls.COL_POINTS_REF_WARP], pre_path=path_experiment)
if not os.path.isfile(path_points_warp):
Expand Down Expand Up @@ -896,7 +895,6 @@ def _visual_image_move_warp_lnds_ref_warp(cls, item, path_dataset=None, path_exp

# visualise the landmarks move during registration
fig = draw_images_warped_landmarks(image_ref, image_move, points_ref, points_move, points_warp)
del image_ref, image_move
return fig

@classmethod
Expand All @@ -912,9 +910,9 @@ def visualise_registration(cls, idx_row, path_dataset=None, path_experiment=None
row = dict(row) # convert even series to dictionary
fig, path_fig = None, None
# visualise particular experiment by idx
if isinstance(row.get(cls.COL_POINTS_MOVE_WARP, None), str):
if isinstance(row.get(cls.COL_POINTS_MOVE_WARP), str):
fig = cls._visual_image_move_warp_lnds_move_warp(row, path_dataset, path_experiment)
elif isinstance(row.get(cls.COL_POINTS_REF_WARP, None), str):
elif isinstance(row.get(cls.COL_POINTS_REF_WARP), str):
fig = cls._visual_image_move_warp_lnds_ref_warp(row, path_dataset, path_experiment)
else:
logging.error('Visualisation: no output image or landmarks')
Expand Down
2 changes: 1 addition & 1 deletion birl/utilities/data_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def load_landmarks(path_file):
"""
if not os.path.isfile(path_file):
logging.warning('missing landmarks "%s"', path_file)
return None
return
_, ext = os.path.splitext(path_file)
if ext == '.csv':
return load_landmarks_csv(path_file)
Expand Down
13 changes: 6 additions & 7 deletions birl/utilities/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ def is_point_inside_perpendicular(point_begin, point_end, point_test):
angle_a = norm_angle(line_angle_2d(point_end, point_test, deg=True) - angle_line, deg=True)
# compute angle of begin - test compare to begin - end
angle_b = norm_angle(line_angle_2d(point_begin, point_test, deg=True) - angle_line, deg=True)
if (angle_a >= 90) and (angle_b <= 90):
if angle_a >= 90 >= angle_b:
state = 1
elif (angle_a <= -90) and (angle_b >= -90):
state = -1
Expand Down Expand Up @@ -448,7 +448,7 @@ def compute_half_polygon(landmarks, idx_start=0, idx_end=-1):
poly = [pt_begin]

def _in(pt0, pts):
return any([np.array_equal(pt, pt0) for pt in pts])
return any(np.array_equal(pt, pt0) for pt in pts)

def _disturbed(poly, pt_new, pt_test):
last = is_point_in_quadrant_left(poly[-1], pt_new, pt_test) == 1
Expand All @@ -465,9 +465,8 @@ def _disturbed(poly, pt_new, pt_test):
# find a point which does not have any point on the left perpendic
if any(_disturbed(poly, pt0, pt) for pt in points if not _in(pt, poly + [pt0])):
continue
else:
poly.append(pt0)
break
poly.append(pt0)
break
poly = np.array(poly).tolist()
return poly

Expand Down Expand Up @@ -746,7 +745,7 @@ def convert_landmarks_to_itk(lnds, image_size):
[150, 50],
[100, 150]])
"""
height, width = image_size
height, _ = image_size
# swap rows-columns to X-Y
lnds = np.array(lnds)[:, [1, 0]]
# revert the Y by height
Expand All @@ -773,7 +772,7 @@ def convert_landmarks_from_itk(lnds, image_size):
>>> np.array_equal(lnds, lnds2)
True
"""
height, width = image_size
height, _ = image_size
# swap rows-columns to X-Y
lnds = np.array(lnds)[:, [1, 0]]
# revert the Y by height
Expand Down
2 changes: 1 addition & 1 deletion birl/utilities/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
ITERABLE_TYPES = (list, tuple, types.GeneratorType)


def nb_workers(ratio):
def get_nb_workers(ratio):
"""Given usage `ratio` return nb of cpu to use."""
return max(1, int(CPU_COUNT * ratio))

Expand Down
12 changes: 6 additions & 6 deletions bm_ANHIR/evaluate_submission.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,9 @@
from birl.benchmark import _df_drop_unnamed, COL_PAIRED_LANDMARKS, filter_paired_landmarks, ImRegBenchmark
from birl.utilities.data_io import create_folder, load_landmarks, save_landmarks, update_path
from birl.utilities.dataset import parse_path_scale
from birl.utilities.experiments import FORMAT_DATE_TIME, iterate_mproc_map, nb_workers, parse_arg_params
from birl.utilities.experiments import FORMAT_DATE_TIME, get_nb_workers, iterate_mproc_map, parse_arg_params

NB_WORKERS = nb_workers(0.9)
NB_WORKERS = get_nb_workers(0.9)
NAME_CSV_RESULTS = 'registration-results.csv'
NAME_JSON_COMPUTER = 'computer-performances.json'
NAME_JSON_RESULTS = 'metrics.json'
Expand Down Expand Up @@ -220,15 +220,15 @@ def parse_landmarks(idx_row):
item = {
'name-tissue': os.path.basename(os.path.dirname(path_dir)),
'scale-tissue': parse_path_scale(os.path.basename(path_dir)),
'type-tissue': row.get(COL_TISSUE, None),
'type-tissue': row.get(COL_TISSUE),
'name-reference': os.path.splitext(os.path.basename(row[ImRegBenchmark.COL_POINTS_REF]))[0],
'name-source': os.path.splitext(os.path.basename(row[ImRegBenchmark.COL_POINTS_MOVE]))[0],
# 'reference landmarks': np.round(lnds_ref, 1).tolist(),
# 'warped landmarks': np.round(lnds_warp, 1).tolist(),
'matched-landmarks': match_lnds,
'Robustness': np.round(row.get(ImRegBenchmark.COL_ROBUSTNESS, 0), 3),
'Norm-Time_minutes': np.round(row.get(COL_NORM_TIME, None), 5),
'Status': row.get(ImRegBenchmark.COL_STATUS, None),
'Norm-Time_minutes': np.round(row.get(COL_NORM_TIME), 5),
'Status': row.get(ImRegBenchmark.COL_STATUS),
}

def _round_val(row, col):
Expand Down Expand Up @@ -431,7 +431,7 @@ def replicate_missing_warped_landmarks(df_experiments, path_dataset, path_experi
# iterate over whole table and check if the path is valid
for idx, row in df_experiments.iterrows():
# select refence/moving warped landmarks
use_move_warp = isinstance(row.get(ImRegBenchmark.COL_POINTS_MOVE_WARP, None), str)
use_move_warp = isinstance(row.get(ImRegBenchmark.COL_POINTS_MOVE_WARP), str)
col_lnds_warp = ImRegBenchmark.COL_POINTS_MOVE_WARP \
if use_move_warp else ImRegBenchmark.COL_POINTS_REF_WARP
# extract the CSV path
Expand Down
2 changes: 1 addition & 1 deletion bm_ANHIR/generate_regist_pairs.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def create_dataset_cover(name, dataset, path_images, path_landmarks, path_out, s
if os.path.isdir(p)]

reg_pairs = []
logging.debug('found: %r', sorted(set([os.path.basename(tp[1]) for tp in tissues])))
logging.debug('found: %r', sorted({os.path.basename(tp[1]) for tp in tissues}))
for tissue, p_tissue in tqdm.tqdm(sorted(tissues)):
sc = dataset[tissue][name]
rp_lnds, rp_imgs = list_landmarks_images(p_tissue, sc, path_landmarks, path_images)
Expand Down
14 changes: 7 additions & 7 deletions bm_CIMA/run-SOTA-experiments.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,49 +13,49 @@ apps="~/TEMP/Applications"
python ./bm_experiments/bm_bUnwarpJ.py \
-t $table -d $dataset -o $results \
--run_comp_benchmark \
-Fiji $apps/Fiji.app/ImageJ-linux64 \
-Fiji "$apps/Fiji.app/ImageJ-linux64" \
-cfg ./configs/ImageJ_bUnwarpJ_histol.yaml \
--nb_workers $jobs --unique

python ./bm_experiments/bm_bUnwarpJ.py \
-t $table -d $dataset -o $results \
--run_comp_benchmark \
-Fiji $apps/Fiji.app/ImageJ-linux64 \
-Fiji "$apps/Fiji.app/ImageJ-linux64" \
-cfg ./configs/ImageJ_bUnwarpJ-SIFT_histol.yaml \
--nb_workers $jobs --unique

python ./bm_experiments/bm_DROP2.py \
-t $table -d $dataset -o $results \
--run_comp_benchmark \
-DROP $apps/DROP2/dropreg \
-DROP "$apps/DROP2/dropreg" \
-cfg ./configs/DROP2.txt \
--nb_workers $jobs --unique

export LD_LIBRARY_PATH=$apps/elastix/bin:$LD_LIBRARY_PATH
python ./bm_experiments/bm_elastix.py \
-t $table -d $dataset -o $results \
--run_comp_benchmark \
-elastix $apps/elastix/bin \
-elastix "$apps/elastix/bin" \
-cfg ./configs/elastix_bspline.txt \
--nb_workers $jobs --unique

python ./bm_experiments/bm_rNiftyReg.py \
-t $table -d $dataset -o $results \
--run_comp_benchmark \
-R $apps/R-3.5.3/bin/Rscript \
-R "$apps/R-3.5.3/bin/Rscript" \
-script ./scripts/Rscript/RNiftyReg_linear.r \
--nb_workers $jobs --unique

python ./bm_experiments/bm_RVSS.py \
-t $table -d $dataset -o $results \
--run_comp_benchmark \
-Fiji $apps/Fiji.app/ImageJ-linux64 \
-Fiji "$apps/Fiji.app/ImageJ-linux64" \
-cfg ./configs/ImageJ_RVSS_histol.yaml \
--nb_workers $jobs --unique

python ./bm_experiments/bm_ANTs.py \
-t $table -d $dataset -o $results \
--run_comp_benchmark \
-ANTs $apps/antsbin/bin \
-ANTs "$apps/antsbin/bin" \
-cfg ./configs/ANTs_SyN.txt \
--nb_workers $jobs --unique
Loading

0 comments on commit 15ecc5c

Please sign in to comment.