Skip to content

Commit

Permalink
Data transformations for Greyscale 1D channel images with uint16 enco…
Browse files Browse the repository at this point in the history
…ding.
  • Loading branch information
GMW99 committed May 13, 2024
1 parent 24c1ad7 commit 685831c
Show file tree
Hide file tree
Showing 3 changed files with 194 additions and 2 deletions.
4 changes: 2 additions & 2 deletions src/kompressor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@
# SOFTWARE.


from kompressor import image, volume, mapping, utils, dataloaders
from kompressor import image, volume, mapping, utils, dataloaders, greyscale

VERSION = 'v1.0a'
VERSION = "v1.0a"
25 changes: 25 additions & 0 deletions src/kompressor/greyscale/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# MIT License
#
# Copyright (c) 2020 Joss Whittle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.


# Import the 1D image utility functions
from . import data
167 changes: 167 additions & 0 deletions src/kompressor/greyscale/data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
# MIT License
#
# Copyright (c) 2020 Gabryel Mason-Williams
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.


import tensorflow as tf


from kompressor.image.utils import validate_chunk
from typing import Callable

UINT16_MAX_VALUE = 65536.0


def transform_extract_level_from_highres(
level: int,
) -> Callable[[tf.Tensor], tf.Tensor]:
"""Return Method to Skip Sample High Resolution Tensor to Skip Level.
Args:
level: Skip level: 2 ** level so a level of 2 would result in a skip size of 4. Which would quarter the size of an image.
Returns:
A function that takes a tensor and returns a tensor reduced by the skip level with neccesary padding.
"""
assert level >= 0
skip = 2**level

def fn(highres: tf.Tensor) -> tf.Tensor:
"""
Skip Sample Image
Args:
highres: High Resolution Tensor to be skip sampled
Returns:
Skipped Tensor
"""
# Downsample by skip sampling
highres = highres[::skip, ::skip]

# Determine the size of the input and the padding to apply
padding_height, padding_width = (tf.shape(highres)[0] + 1) % 2, (
tf.shape(highres)[1] + 1
) % 2

# Pad highres using reflect to match lowres padded with symmetric
return tf.pad(
highres, ((0, padding_height), (0, padding_width), (0, 0)), mode="REFLECT"
)

return fn


def transform_random_crop_highres(chunk):
# Assert chunk size is valid
ch, cw = validate_chunk(chunk)

def fn(highres):
return tf.image.random_crop(highres, (ch, cw, 1))

return fn


def transform_lowres_and_targets_from_highres(padding):
assert padding >= 0

def fn(highres):
highres = tf.expand_dims(highres, axis=0)

# Downsample by skip sampling
lowres = highres[:, ::2, ::2]

# Pad only the 2 spatial dimensions
lowres = tf.pad(
lowres,
((0, 0), (padding, padding), (padding, padding), (0, 0)),
mode="SYMMETRIC",
)

# Slice out each value of the pluses
lmap = highres[:, 1::2, :-1:2]
rmap = highres[:, 1::2, 2::2]
umap = highres[:, :-1:2, 1::2]
dmap = highres[:, 2::2, 1::2]
cmap = highres[:, 1::2, 1::2]

# Stack the vectors LRUDC order with dim [B,H,W,5,...]
targets = tf.stack([lmap, rmap, umap, dmap, cmap], axis=3)

return dict(
lowres=tf.cast(lowres[0], tf.float32) / UINT16_MAX_VALUE,
targets=tf.cast(targets[0], tf.float32) / UINT16_MAX_VALUE,
)

return fn


def random_chunk_dataset(
dataset,
padding=0,
chunk=64,
chunks_per_sample=1,
chunks_shuffle_buffer=None,
levels=1,
):
assert padding >= 0
assert levels > 0

# Construct separate datasets for each level requested, each dataset outputs the same chunk size
ds_levels = list()
for level in range(levels):
# From the input dataset of consistently sized highres images for level=0
ds = dataset

# Extract the skip-sampled highres image for the given level and apply padding if needed
ds = ds.map(
transform_extract_level_from_highres(level),
num_parallel_calls=tf.data.AUTOTUNE,
)

# Repeat the same image multiple times to allow different random chunks to be sampled from it
if chunks_per_sample > 1:
ds = ds.flat_map(
lambda highres: tf.data.Dataset.from_tensors(highres).repeat(
chunks_per_sample
)
)

# Extract a random chunk from the highres image
ds = ds.map(
transform_random_crop_highres(chunk), num_parallel_calls=tf.data.AUTOTUNE
)

ds_levels.append(ds)

# Interleave the streams of skip-sampled, padded, and cropped highres images from each level
ds = tf.data.Dataset.sample_from_datasets(ds_levels)

# Shuffle the stream of highres images from each level
if (chunks_shuffle_buffer is not None) and (chunks_shuffle_buffer > 0):
ds = ds.shuffle(chunks_shuffle_buffer, reshuffle_each_iteration=True)

# From the highres images extract the lowres inputs and prediction targets
ds = ds.map(
transform_lowres_and_targets_from_highres(padding),
num_parallel_calls=tf.data.AUTOTUNE,
)

return ds

0 comments on commit 685831c

Please sign in to comment.