Transformation
Transformation acts as a prprocessing and augmentation layers for model training. For 3D transformation, the expected format is: depth, height, width, channel
. The transformation are implemented for a single sample. All the transformations are implemented using tensorflow
in order to able to run with tf.data
API with keras
multi-backend library.
img_path = 'images/coronacases_001.nii.gz'
mask_path = 'masks/coronacases_001.nii.gz'
nib_x = nib.load(img_path) # (512, 512, 301)
nib_y = nib.load(mask_path) # (512, 512, 301)
image = nib_x.get_fdata().transpose(2, 0, 1)[...,None] # (301, 512, 512, 1)
label = nib_y.get_fdata().transpose(2, 0, 1)[...,None] # (301, 512, 512, 1)
Preprocessing
Resize
Resize the input image to given spatial size. Implemented using tf.image.resize
and depth_interpolate
.
from medicai.transforms import (
TensorBundle,
Resize,
)
inputs = TensorBundle({"image": image, "label": label})
transform = Resize(
keys=["image", "label"],
spatial_shape=(96, 96, 96)
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
(TensorShape([96, 96, 96, 1]), TensorShape([96, 96, 96, 1]))
ScaleIntensityRange
Scale the intensity of the entire array from the range [a_min, a_max]
to [b_min, b_max]
, with an optional clipping feature.
from medicai.transforms import (
TensorBundle,
ScaleIntensityRange,
)
inputs = TensorBundle({"image": image, "label": label})
transform = ScaleIntensityRange(
keys=["image"],
a_min=-175,
a_max=250,
b_min=0.0,
b_max=1.0,
clip=True
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
transform_image.numpy().min(), transform_image.numpy().max()
(0.0, 1.0)
np.unique(transform_label)
array([0., 1., 2., 3.])
CropForeground
Crop an image using a bounding box, where the bounding box is generated by selecting the foreground through the select_fn
function at the specified channel_indices. A margin is added to each spatial dimension of the bounding box.
from medicai.transforms import (
CropForeground,
TensorBundle,
)
inputs = TensorBundle({"image": image, "label": label})
transform = CropForeground(
keys=("image", "label"),
source_key="image"
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
((301, 512, 415, 1), (301, 512, 415, 1))
Spacing
Resample input image into the specified pixdim. It will require affine
meta information.
from medicai.transforms import (
TensorBundle,
Spacing,
)
affine = nib_x.affine
affine[:, [0, 1, 2]] = affine[:, [2, 0, 1]] # (H, W, D) -> (D, H, W)
trans_affine = affine.astype(np.float32)
inputs = TensorBundle(
{
"image": image,
"label": label
},
meta={
'affine': trans_affine
}
)
transform = Spacing(
keys=["image", "label"],
pixdim=[2.0, 1.5, 1.5]
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
(TensorShape([121, 276, 341, 1]), TensorShape([121, 276, 341, 1]))
Orientation
Change the orientation of the input image to the specified one based on the provided axcodes
. It will require affine
meta information.
from medicai.transforms import (
TensorBundle,
Orientation,
)
affine = nib_x.affine
affine[:, [0, 1, 2]] = affine[:, [2, 0, 1]] # (H, W, D) -> (D, H, W)
trans_affine = affine.astype(np.float32)
inputs = TensorBundle(
{
"image": image,
"label": label
},
meta={
'affine': trans_affine
}
)
transform = Orientation(
keys=["image", "label"],
axcodes="RAS"
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
(TensorShape([301, 512, 512, 1]), TensorShape([301, 512, 512, 1]))
Radnom Preprocessing
RandRotate90
Rotate the input sample 90 degree at random.
from medicai.transforms import (
TensorBundle,
RandRotate90,
)
inputs = TensorBundle({"image": image, "label": label})
transform = RandRotate90(
keys=["image", "label"],
prob=1.0,
max_k=3
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
(TensorShape([301, 512, 512, 1]), TensorShape([301, 512, 512, 1]))
RandShiftIntensity
Randomly shift the intensity of the image by applying a randomly selected offset.
from medicai.transforms import (
TensorBundle,
RandShiftIntensity,
)
inputs = TensorBundle({"image": image, "label": label})
transform = RandShiftIntensity(
keys=["image", "label"],
offsets=(-0.2, 0.8),
prob=1.0
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
(TensorShape([301, 512, 512, 1]), TensorShape([301, 512, 512, 1]))
RandCropByPosNegLabel
Randomly crop fixed-sized regions from the image, with the center of each crop being either a foreground or background voxel based on the specified Positive-Negative Ratio. The function will return a list of arrays for all the cropped images.
from medicai.transforms import (
TensorBundle,
RandCropByPosNegLabel,
)
inputs = TensorBundle({"image": image, "label": label})
transform = RandCropByPosNegLabel(
keys=["image", "label"],
spatial_size=(96, 96, 96),
pos=1,
neg=1,
num_samples=1
)
output = transform(inputs)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
(TensorShape([96, 96, 96, 1]), TensorShape([96, 96, 96, 1]))
Compose
The Compose
api allow to chain a series of callables in a sequential order. Each transform in the sequence must accept a single argument and return a single value, enabling the creation of a pipeline of transformations that are applied one after another. A sample example is shown bleow.
from medicai.transforms import (
Compose,
Orientation,
Spacing,
)
transform = Compose(
[
Orientation(keys=["image", "label"], axcodes="RAS"),
Spacing(keys=["image", "label"], pixdim=[1.0, 1.2, 1.2])
]
)
inputs = {"image": image, "label": label}
meta = {'affine': trans_affine}
output = transform(inputs, meta)
transform_image = output.data["image"]
transform_label = output.data["label"]
transform_image.shape, transform_label.shape
(TensorShape([243, 345, 426, 1]), TensorShape([243, 345, 426, 1]))