diff --git a/config.yaml b/config.yaml
index 0a03c362..b6d78115 100644
--- a/config.yaml
+++ b/config.yaml
@@ -12,7 +12,7 @@
carpentry: 'IDEAS'
# Overall title for pages.
-title: 'Medical-Image-Registration-Short-Course'
+title: 'MPHY0025 IPMI Registration Practical Exercises Instructions'
# Date the lesson was created (YYYY-MM-DD, this is empty by default)
created: '2024-05-13'
diff --git a/episodes/fig/CT-PET-VI-02.svg b/episodes/fig/CT-PET-VI-02.svg
deleted file mode 100644
index ed484498..00000000
--- a/episodes/fig/CT-PET-VI-02.svg
+++ /dev/null
@@ -1,18051 +0,0 @@
-
-
diff --git a/episodes/fig/Figure_1.png b/episodes/fig/Figure_1.png
deleted file mode 100644
index 40363516..00000000
Binary files a/episodes/fig/Figure_1.png and /dev/null differ
diff --git a/episodes/fig/colour-map-editor.png b/episodes/fig/colour-map-editor.png
deleted file mode 100644
index d8d48a67..00000000
Binary files a/episodes/fig/colour-map-editor.png and /dev/null differ
diff --git a/episodes/fig/final_output.png b/episodes/fig/final_output.png
deleted file mode 100644
index 319da063..00000000
Binary files a/episodes/fig/final_output.png and /dev/null differ
diff --git a/episodes/fig/final_results_vid.mp4 b/episodes/fig/final_results_vid.mp4
deleted file mode 100644
index 7603c893..00000000
Binary files a/episodes/fig/final_results_vid.mp4 and /dev/null differ
diff --git a/episodes/fig/image-info.png b/episodes/fig/image-info.png
deleted file mode 100644
index 688f560f..00000000
Binary files a/episodes/fig/image-info.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-16-32-bits.svg b/episodes/fig/itk-snap-16-32-bits.svg
deleted file mode 100644
index 961779e6..00000000
--- a/episodes/fig/itk-snap-16-32-bits.svg
+++ /dev/null
@@ -1,6817 +0,0 @@
-
-
diff --git a/episodes/fig/itk-snap-additional-image.png b/episodes/fig/itk-snap-additional-image.png
deleted file mode 100644
index 1ee08f11..00000000
Binary files a/episodes/fig/itk-snap-additional-image.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-calculating-registrations.png b/episodes/fig/itk-snap-calculating-registrations.png
deleted file mode 100644
index f6b7ce24..00000000
Binary files a/episodes/fig/itk-snap-calculating-registrations.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-colour-overlay-as-semi-transparent-image.svg b/episodes/fig/itk-snap-colour-overlay-as-semi-transparent-image.svg
deleted file mode 100644
index 418fc235..00000000
--- a/episodes/fig/itk-snap-colour-overlay-as-semi-transparent-image.svg
+++ /dev/null
@@ -1,8173 +0,0 @@
-
-
diff --git a/episodes/fig/itk-snap-colour-overlay-as-separate-image.svg b/episodes/fig/itk-snap-colour-overlay-as-separate-image.svg
deleted file mode 100644
index fdc8c124..00000000
--- a/episodes/fig/itk-snap-colour-overlay-as-separate-image.svg
+++ /dev/null
@@ -1,4964 +0,0 @@
-
-
diff --git a/episodes/fig/itk-snap-image-manual-registration.png b/episodes/fig/itk-snap-image-manual-registration.png
deleted file mode 100644
index 0a5939ab..00000000
Binary files a/episodes/fig/itk-snap-image-manual-registration.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-layer-inspector-color-map-tab.png b/episodes/fig/itk-snap-layer-inspector-color-map-tab.png
deleted file mode 100644
index 687348c2..00000000
Binary files a/episodes/fig/itk-snap-layer-inspector-color-map-tab.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-layer-inspector-contrast-tab.png b/episodes/fig/itk-snap-layer-inspector-contrast-tab.png
deleted file mode 100644
index 0796d5cb..00000000
Binary files a/episodes/fig/itk-snap-layer-inspector-contrast-tab.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-layer-inspector-general-tab.png b/episodes/fig/itk-snap-layer-inspector-general-tab.png
deleted file mode 100644
index 01d41910..00000000
Binary files a/episodes/fig/itk-snap-layer-inspector-general-tab.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-layer-inspector-info-tab.png b/episodes/fig/itk-snap-layer-inspector-info-tab.png
deleted file mode 100644
index 78c209ca..00000000
Binary files a/episodes/fig/itk-snap-layer-inspector-info-tab.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-layer-inspector-metadata-tab.png b/episodes/fig/itk-snap-layer-inspector-metadata-tab.png
deleted file mode 100644
index b03f6bc3..00000000
Binary files a/episodes/fig/itk-snap-layer-inspector-metadata-tab.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-main-window.svg b/episodes/fig/itk-snap-main-window.svg
deleted file mode 100644
index ea5386cf..00000000
--- a/episodes/fig/itk-snap-main-window.svg
+++ /dev/null
@@ -1,372 +0,0 @@
-
-
diff --git a/episodes/fig/itk-snap-opening-dicoms.svg b/episodes/fig/itk-snap-opening-dicoms.svg
deleted file mode 100644
index ddfa7f88..00000000
--- a/episodes/fig/itk-snap-opening-dicoms.svg
+++ /dev/null
@@ -1,133 +0,0 @@
-
-
diff --git a/episodes/fig/itk-snap-overlay-semi-transparent-cropped-affine.png b/episodes/fig/itk-snap-overlay-semi-transparent-cropped-affine.png
deleted file mode 100644
index 542fcbed..00000000
Binary files a/episodes/fig/itk-snap-overlay-semi-transparent-cropped-affine.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-save-ct-for-pet.png b/episodes/fig/itk-snap-save-ct-for-pet.png
deleted file mode 100644
index 415616e3..00000000
Binary files a/episodes/fig/itk-snap-save-ct-for-pet.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-thumbnails.png b/episodes/fig/itk-snap-thumbnails.png
deleted file mode 100644
index 2665e465..00000000
Binary files a/episodes/fig/itk-snap-thumbnails.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-tools-reorient-image.png b/episodes/fig/itk-snap-tools-reorient-image.png
deleted file mode 100644
index c05a3934..00000000
Binary files a/episodes/fig/itk-snap-tools-reorient-image.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-viewing-dicoms.svg b/episodes/fig/itk-snap-viewing-dicoms.svg
deleted file mode 100644
index 66383053..00000000
--- a/episodes/fig/itk-snap-viewing-dicoms.svg
+++ /dev/null
@@ -1,4593 +0,0 @@
-
-
diff --git a/episodes/fig/itk-snap_ct_for_pet_cropped_nii_gz.png b/episodes/fig/itk-snap_ct_for_pet_cropped_nii_gz.png
deleted file mode 100644
index 4551a949..00000000
Binary files a/episodes/fig/itk-snap_ct_for_pet_cropped_nii_gz.png and /dev/null differ
diff --git a/episodes/fig/layers-panel.png b/episodes/fig/layers-panel.png
deleted file mode 100644
index 5c7c7ee4..00000000
Binary files a/episodes/fig/layers-panel.png and /dev/null differ
diff --git a/episodes/fig/level1.png b/episodes/fig/level1.png
deleted file mode 100644
index a673dfe3..00000000
Binary files a/episodes/fig/level1.png and /dev/null differ
diff --git a/episodes/fig/level2.png b/episodes/fig/level2.png
deleted file mode 100644
index 36a6476c..00000000
Binary files a/episodes/fig/level2.png and /dev/null differ
diff --git a/episodes/fig/level3.png b/episodes/fig/level3.png
deleted file mode 100644
index aa5ceaf4..00000000
Binary files a/episodes/fig/level3.png and /dev/null differ
diff --git a/episodes/fig/new-itk-snap-window.png b/episodes/fig/new-itk-snap-window.png
deleted file mode 100644
index b4bec624..00000000
Binary files a/episodes/fig/new-itk-snap-window.png and /dev/null differ
diff --git a/episodes/fig/open-additional-image.png b/episodes/fig/open-additional-image.png
deleted file mode 100644
index dc9444cd..00000000
Binary files a/episodes/fig/open-additional-image.png and /dev/null differ
diff --git a/episodes/fig/overlay-wizard.png b/episodes/fig/overlay-wizard.png
deleted file mode 100644
index 1488d2f9..00000000
Binary files a/episodes/fig/overlay-wizard.png and /dev/null differ
diff --git a/episodes/fig/p4-comp-jac.png b/episodes/fig/p4-comp-jac.png
deleted file mode 100644
index 664f9bb6..00000000
Binary files a/episodes/fig/p4-comp-jac.png and /dev/null differ
diff --git a/episodes/fig/p4-def-field-comp.png b/episodes/fig/p4-def-field-comp.png
deleted file mode 100644
index 412b2678..00000000
Binary files a/episodes/fig/p4-def-field-comp.png and /dev/null differ
diff --git a/episodes/fig/p4-def-field-zoom.png b/episodes/fig/p4-def-field-zoom.png
deleted file mode 100644
index 8a78024f..00000000
Binary files a/episodes/fig/p4-def-field-zoom.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output-numlev1.png b/episodes/fig/p4-reg-output-numlev1.png
deleted file mode 100644
index 6ff54299..00000000
Binary files a/episodes/fig/p4-reg-output-numlev1.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output-sigelastic0.5.png b/episodes/fig/p4-reg-output-sigelastic0.5.png
deleted file mode 100644
index 33dc26d6..00000000
Binary files a/episodes/fig/p4-reg-output-sigelastic0.5.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output-sigelastic0.png b/episodes/fig/p4-reg-output-sigelastic0.png
deleted file mode 100644
index 66e11eda..00000000
Binary files a/episodes/fig/p4-reg-output-sigelastic0.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output-sigfluid0.png b/episodes/fig/p4-reg-output-sigfluid0.png
deleted file mode 100644
index 300b6f17..00000000
Binary files a/episodes/fig/p4-reg-output-sigfluid0.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output.png b/episodes/fig/p4-reg-output.png
deleted file mode 100644
index 81e52cf5..00000000
Binary files a/episodes/fig/p4-reg-output.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output2-numlev1.png b/episodes/fig/p4-reg-output2-numlev1.png
deleted file mode 100644
index e539fe18..00000000
Binary files a/episodes/fig/p4-reg-output2-numlev1.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output2-sigelastic0.5.png b/episodes/fig/p4-reg-output2-sigelastic0.5.png
deleted file mode 100644
index 7c8df108..00000000
Binary files a/episodes/fig/p4-reg-output2-sigelastic0.5.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output2-sigelastic0.png b/episodes/fig/p4-reg-output2-sigelastic0.png
deleted file mode 100644
index d28011cd..00000000
Binary files a/episodes/fig/p4-reg-output2-sigelastic0.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output2-sigfluid0.png b/episodes/fig/p4-reg-output2-sigfluid0.png
deleted file mode 100644
index 13d155c3..00000000
Binary files a/episodes/fig/p4-reg-output2-sigfluid0.png and /dev/null differ
diff --git a/episodes/fig/p4-reg-output2.png b/episodes/fig/p4-reg-output2.png
deleted file mode 100644
index 8b7bf789..00000000
Binary files a/episodes/fig/p4-reg-output2.png and /dev/null differ
diff --git a/episodes/fig/itk-snap-additional-image-1.png b/episodes/fig/practical1-additional-image-1.png
similarity index 100%
rename from episodes/fig/itk-snap-additional-image-1.png
rename to episodes/fig/practical1-additional-image-1.png
diff --git a/episodes/fig/itk-snap-additional-image-2.png b/episodes/fig/practical1-additional-image-2.png
similarity index 100%
rename from episodes/fig/itk-snap-additional-image-2.png
rename to episodes/fig/practical1-additional-image-2.png
diff --git a/episodes/fig/itk-snap-additional-image-3.png b/episodes/fig/practical1-additional-image-3.png
similarity index 100%
rename from episodes/fig/itk-snap-additional-image-3.png
rename to episodes/fig/practical1-additional-image-3.png
diff --git a/episodes/fig/itk-snap-additional-image-4.png b/episodes/fig/practical1-additional-image-4.png
similarity index 100%
rename from episodes/fig/itk-snap-additional-image-4.png
rename to episodes/fig/practical1-additional-image-4.png
diff --git a/episodes/fig/itk-snap-additional-image-5.png b/episodes/fig/practical1-additional-image-5.png
similarity index 100%
rename from episodes/fig/itk-snap-additional-image-5.png
rename to episodes/fig/practical1-additional-image-5.png
diff --git a/episodes/fig/itk-snap-additional-image-6.png b/episodes/fig/practical1-additional-image-6.png
similarity index 100%
rename from episodes/fig/itk-snap-additional-image-6.png
rename to episodes/fig/practical1-additional-image-6.png
diff --git a/episodes/fig/itk-snap-aligned-images-1.png b/episodes/fig/practical1-aligned-images-1.png
similarity index 100%
rename from episodes/fig/itk-snap-aligned-images-1.png
rename to episodes/fig/practical1-aligned-images-1.png
diff --git a/episodes/fig/itk-snap-aligned-images-2.png b/episodes/fig/practical1-aligned-images-2.png
similarity index 100%
rename from episodes/fig/itk-snap-aligned-images-2.png
rename to episodes/fig/practical1-aligned-images-2.png
diff --git a/episodes/fig/itk-snap-cropped-ct-aligned-1.png b/episodes/fig/practical1-cropped-ct-aligned-1.png
similarity index 100%
rename from episodes/fig/itk-snap-cropped-ct-aligned-1.png
rename to episodes/fig/practical1-cropped-ct-aligned-1.png
diff --git a/episodes/fig/itk-snap-cropped-ct-aligned-2.png b/episodes/fig/practical1-cropped-ct-aligned-2.png
similarity index 100%
rename from episodes/fig/itk-snap-cropped-ct-aligned-2.png
rename to episodes/fig/practical1-cropped-ct-aligned-2.png
diff --git a/episodes/fig/itk-snap-cropped-ct.png b/episodes/fig/practical1-cropped-ct.png
similarity index 100%
rename from episodes/fig/itk-snap-cropped-ct.png
rename to episodes/fig/practical1-cropped-ct.png
diff --git a/episodes/fig/itk-snap-ct-pet-overlay.png b/episodes/fig/practical1-ct-pet-overlay.png
similarity index 100%
rename from episodes/fig/itk-snap-ct-pet-overlay.png
rename to episodes/fig/practical1-ct-pet-overlay.png
diff --git a/episodes/fig/itk-snap-cursor-inspector.png b/episodes/fig/practical1-cursor-inspector.png
similarity index 100%
rename from episodes/fig/itk-snap-cursor-inspector.png
rename to episodes/fig/practical1-cursor-inspector.png
diff --git a/episodes/fig/itk-snap-image-layer-inspector-color-map.png b/episodes/fig/practical1-image-layer-inspector-color-map.png
similarity index 100%
rename from episodes/fig/itk-snap-image-layer-inspector-color-map.png
rename to episodes/fig/practical1-image-layer-inspector-color-map.png
diff --git a/episodes/fig/itk-snap-image-layer-inspector-contrast.png b/episodes/fig/practical1-image-layer-inspector-contrast.png
similarity index 100%
rename from episodes/fig/itk-snap-image-layer-inspector-contrast.png
rename to episodes/fig/practical1-image-layer-inspector-contrast.png
diff --git a/episodes/fig/itk-snap-image-layer-inspector-general.png b/episodes/fig/practical1-image-layer-inspector-general.png
similarity index 100%
rename from episodes/fig/itk-snap-image-layer-inspector-general.png
rename to episodes/fig/practical1-image-layer-inspector-general.png
diff --git a/episodes/fig/itk-snap-image-layer-inspector-info.png b/episodes/fig/practical1-image-layer-inspector-info.png
similarity index 100%
rename from episodes/fig/itk-snap-image-layer-inspector-info.png
rename to episodes/fig/practical1-image-layer-inspector-info.png
diff --git a/episodes/fig/itk-snap-image-layer-inspector-metadata.png b/episodes/fig/practical1-image-layer-inspector-metadata.png
similarity index 100%
rename from episodes/fig/itk-snap-image-layer-inspector-metadata.png
rename to episodes/fig/practical1-image-layer-inspector-metadata.png
diff --git a/episodes/fig/itk-snap-info-16.png b/episodes/fig/practical1-info-16.png
similarity index 100%
rename from episodes/fig/itk-snap-info-16.png
rename to episodes/fig/practical1-info-16.png
diff --git a/episodes/fig/itk-snap-info-32.png b/episodes/fig/practical1-info-32.png
similarity index 100%
rename from episodes/fig/itk-snap-info-32.png
rename to episodes/fig/practical1-info-32.png
diff --git a/episodes/fig/itk-snap-inhale-and-exhale-scans.png b/episodes/fig/practical1-inhale-and-exhale-scans.png
similarity index 100%
rename from episodes/fig/itk-snap-inhale-and-exhale-scans.png
rename to episodes/fig/practical1-inhale-and-exhale-scans.png
diff --git a/episodes/fig/itk-snap-inhale-scan.png b/episodes/fig/practical1-inhale-scan.png
similarity index 100%
rename from episodes/fig/itk-snap-inhale-scan.png
rename to episodes/fig/practical1-inhale-scan.png
diff --git a/episodes/fig/itk-snap-layout-pref-reminder.png b/episodes/fig/practical1-layout-pref-reminder.png
similarity index 100%
rename from episodes/fig/itk-snap-layout-pref-reminder.png
rename to episodes/fig/practical1-layout-pref-reminder.png
diff --git a/episodes/fig/itk-snap-metadata-16.png b/episodes/fig/practical1-metadata-16.png
similarity index 100%
rename from episodes/fig/itk-snap-metadata-16.png
rename to episodes/fig/practical1-metadata-16.png
diff --git a/episodes/fig/itk-snap-metadata-32.png b/episodes/fig/practical1-metadata-32.png
similarity index 100%
rename from episodes/fig/itk-snap-metadata-32.png
rename to episodes/fig/practical1-metadata-32.png
diff --git a/episodes/fig/itk-snap-misaligned-images.png b/episodes/fig/practical1-misaligned-images.png
similarity index 100%
rename from episodes/fig/itk-snap-misaligned-images.png
rename to episodes/fig/practical1-misaligned-images.png
diff --git a/episodes/fig/itk-snap-nifti-header.png b/episodes/fig/practical1-nifti-header.png
similarity index 100%
rename from episodes/fig/itk-snap-nifti-header.png
rename to episodes/fig/practical1-nifti-header.png
diff --git a/episodes/fig/itk-snap-open-image-1.png b/episodes/fig/practical1-open-image-1.png
similarity index 100%
rename from episodes/fig/itk-snap-open-image-1.png
rename to episodes/fig/practical1-open-image-1.png
diff --git a/episodes/fig/itk-snap-open-image-2.png b/episodes/fig/practical1-open-image-2.png
similarity index 100%
rename from episodes/fig/itk-snap-open-image-2.png
rename to episodes/fig/practical1-open-image-2.png
diff --git a/episodes/fig/itk-snap-open-image-3.png b/episodes/fig/practical1-open-image-3.png
similarity index 100%
rename from episodes/fig/itk-snap-open-image-3.png
rename to episodes/fig/practical1-open-image-3.png
diff --git a/episodes/fig/itk-snap-recent-images.png b/episodes/fig/practical1-recent-images.png
similarity index 100%
rename from episodes/fig/itk-snap-recent-images.png
rename to episodes/fig/practical1-recent-images.png
diff --git a/episodes/fig/itk-snap-save-image-1.png b/episodes/fig/practical1-save-image-1.png
similarity index 100%
rename from episodes/fig/itk-snap-save-image-1.png
rename to episodes/fig/practical1-save-image-1.png
diff --git a/episodes/fig/trans_rot_final_image_pad0_nn.gif b/episodes/fig/practical2-NN-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image_pad0_nn.gif
rename to episodes/fig/practical2-NN-anim.gif
diff --git a/episodes/fig/trans_rot_image_nn_spline.png b/episodes/fig/practical2-NN-spline-final.png
similarity index 100%
rename from episodes/fig/trans_rot_image_nn_spline.png
rename to episodes/fig/practical2-NN-spline-final.png
diff --git a/episodes/fig/trans_rot_final_image_compose_nearest.gif b/episodes/fig/practical2-comp-NN-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image_compose_nearest.gif
rename to episodes/fig/practical2-comp-NN-anim.gif
diff --git a/episodes/fig/trans_rot_final_image_compose_linear.gif b/episodes/fig/practical2-comp-linear-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image_compose_linear.gif
rename to episodes/fig/practical2-comp-linear-anim.gif
diff --git a/episodes/fig/trans_rot_final_image_compose_splinef2d.gif b/episodes/fig/practical2-comp-spline-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image_compose_splinef2d.gif
rename to episodes/fig/practical2-comp-spline-anim.gif
diff --git a/episodes/fig/trans_diff_image_intlims.png b/episodes/fig/practical2-diff-image-half-int-lims.png
similarity index 100%
rename from episodes/fig/trans_diff_image_intlims.png
rename to episodes/fig/practical2-diff-image-half-int-lims.png
diff --git a/episodes/fig/trans_diff_image_10.5.png b/episodes/fig/practical2-diff-image-half.png
similarity index 100%
rename from episodes/fig/trans_diff_image_10.5.png
rename to episodes/fig/practical2-diff-image-half.png
diff --git a/episodes/fig/trans_diff_image.png b/episodes/fig/practical2-diff-image-whole.png
similarity index 100%
rename from episodes/fig/trans_diff_image.png
rename to episodes/fig/practical2-diff-image-whole.png
diff --git a/episodes/fig/trans_rot_final_image_pad0.gif b/episodes/fig/practical2-pad0-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image_pad0.gif
rename to episodes/fig/practical2-pad0-anim.gif
diff --git a/episodes/fig/trans_rot_final_image_pad0.png b/episodes/fig/practical2-pad0-final.png
similarity index 100%
rename from episodes/fig/trans_rot_final_image_pad0.png
rename to episodes/fig/practical2-pad0-final.png
diff --git a/episodes/fig/trans_rot_final_image.gif b/episodes/fig/practical2-padnan-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image.gif
rename to episodes/fig/practical2-padnan-anim.gif
diff --git a/episodes/fig/trans_rot_final_image.png b/episodes/fig/practical2-padnan-final.png
similarity index 100%
rename from episodes/fig/trans_rot_final_image.png
rename to episodes/fig/practical2-padnan-final.png
diff --git a/episodes/fig/trans_rot_final_image_push.gif b/episodes/fig/practical2-push-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image_push.gif
rename to episodes/fig/practical2-push-anim.gif
diff --git a/episodes/fig/trans_disp_image.png b/episodes/fig/practical2-reorientated-image.png
similarity index 100%
rename from episodes/fig/trans_disp_image.png
rename to episodes/fig/practical2-reorientated-image.png
diff --git a/episodes/fig/trans_rot_image.png b/episodes/fig/practical2-rotated-image.png
similarity index 100%
rename from episodes/fig/trans_rot_image.png
rename to episodes/fig/practical2-rotated-image.png
diff --git a/episodes/fig/trans_rot_final_image_pad0_spline.gif b/episodes/fig/practical2-spline-anim.gif
similarity index 100%
rename from episodes/fig/trans_rot_final_image_pad0_spline.gif
rename to episodes/fig/practical2-spline-anim.gif
diff --git a/episodes/fig/trans_disp_image_translated.png b/episodes/fig/practical2-translated-image.png
similarity index 100%
rename from episodes/fig/trans_disp_image_translated.png
rename to episodes/fig/practical2-translated-image.png
diff --git a/episodes/fig/p3-entropies-values.png b/episodes/fig/practical3-entropies-values.png
similarity index 100%
rename from episodes/fig/p3-entropies-values.png
rename to episodes/fig/practical3-entropies-values.png
diff --git a/episodes/fig/p3-images.png b/episodes/fig/practical3-images.png
similarity index 100%
rename from episodes/fig/p3-images.png
rename to episodes/fig/practical3-images.png
diff --git a/episodes/fig/practical3-in-out-alignment-anim.gif b/episodes/fig/practical3-in-out-alignment-anim.gif
new file mode 100644
index 00000000..57b0bb82
Binary files /dev/null and b/episodes/fig/practical3-in-out-alignment-anim.gif differ
diff --git a/episodes/fig/p3-int8-intensities.png b/episodes/fig/practical3-int8-intensities.png
similarity index 100%
rename from episodes/fig/p3-int8-intensities.png
rename to episodes/fig/practical3-int8-intensities.png
diff --git a/episodes/fig/p3-mi-values.png b/episodes/fig/practical3-mi-values.png
similarity index 100%
rename from episodes/fig/p3-mi-values.png
rename to episodes/fig/practical3-mi-values.png
diff --git a/episodes/fig/p3-msd-values.png b/episodes/fig/practical3-msd-values.png
similarity index 100%
rename from episodes/fig/p3-msd-values.png
rename to episodes/fig/practical3-msd-values.png
diff --git a/episodes/fig/p3-ncc-values.png b/episodes/fig/practical3-ncc-values.png
similarity index 100%
rename from episodes/fig/p3-ncc-values.png
rename to episodes/fig/practical3-ncc-values.png
diff --git a/episodes/fig/p3-nmi-values.png b/episodes/fig/practical3-nmi-values.png
similarity index 100%
rename from episodes/fig/p3-nmi-values.png
rename to episodes/fig/practical3-nmi-values.png
diff --git a/episodes/fig/p3-rotated-image.png b/episodes/fig/practical3-rotated-image.png
similarity index 100%
rename from episodes/fig/p3-rotated-image.png
rename to episodes/fig/practical3-rotated-image.png
diff --git a/episodes/fig/p3-ssd-values.png b/episodes/fig/practical3-ssd-values.png
similarity index 100%
rename from episodes/fig/p3-ssd-values.png
rename to episodes/fig/practical3-ssd-values.png
diff --git a/episodes/fig/practical4-comp-def-field-jac.png b/episodes/fig/practical4-comp-def-field-jac.png
new file mode 100644
index 00000000..7b6b0d8a
Binary files /dev/null and b/episodes/fig/practical4-comp-def-field-jac.png differ
diff --git a/episodes/fig/practical4-comp-final.png b/episodes/fig/practical4-comp-final.png
new file mode 100644
index 00000000..ada84e16
Binary files /dev/null and b/episodes/fig/practical4-comp-final.png differ
diff --git a/episodes/fig/practical4-def-field-zoom.png b/episodes/fig/practical4-def-field-zoom.png
new file mode 100644
index 00000000..128867d2
Binary files /dev/null and b/episodes/fig/practical4-def-field-zoom.png differ
diff --git a/episodes/fig/practical4-defparams-end.png b/episodes/fig/practical4-defparams-end.png
new file mode 100644
index 00000000..7201710a
Binary files /dev/null and b/episodes/fig/practical4-defparams-end.png differ
diff --git a/episodes/fig/practical4-defparams-final.png b/episodes/fig/practical4-defparams-final.png
new file mode 100644
index 00000000..36f31739
Binary files /dev/null and b/episodes/fig/practical4-defparams-final.png differ
diff --git a/episodes/fig/p4-reg-first-step.png b/episodes/fig/practical4-defparams-start.png
similarity index 100%
rename from episodes/fig/p4-reg-first-step.png
rename to episodes/fig/practical4-defparams-start.png
diff --git a/episodes/fig/p4-diff-images.png b/episodes/fig/practical4-diff-images.png
similarity index 100%
rename from episodes/fig/p4-diff-images.png
rename to episodes/fig/practical4-diff-images.png
diff --git a/episodes/fig/practical4-elastic0-final.png b/episodes/fig/practical4-elastic0-final.png
new file mode 100644
index 00000000..3d3c798f
Binary files /dev/null and b/episodes/fig/practical4-elastic0-final.png differ
diff --git a/episodes/fig/practical4-elastic05-final-jac.png b/episodes/fig/practical4-elastic05-final-jac.png
new file mode 100644
index 00000000..1a609865
Binary files /dev/null and b/episodes/fig/practical4-elastic05-final-jac.png differ
diff --git a/episodes/fig/practical4-elastic05-final.png b/episodes/fig/practical4-elastic05-final.png
new file mode 100644
index 00000000..f2f8bd53
Binary files /dev/null and b/episodes/fig/practical4-elastic05-final.png differ
diff --git a/episodes/fig/practical4-fig-controls.png b/episodes/fig/practical4-fig-controls.png
new file mode 100644
index 00000000..7d9ef6a6
Binary files /dev/null and b/episodes/fig/practical4-fig-controls.png differ
diff --git a/episodes/fig/practical4-fluid0-end.png b/episodes/fig/practical4-fluid0-end.png
new file mode 100644
index 00000000..c2c7a331
Binary files /dev/null and b/episodes/fig/practical4-fluid0-end.png differ
diff --git a/episodes/fig/practical4-fluid0-final.png b/episodes/fig/practical4-fluid0-final.png
new file mode 100644
index 00000000..0e4ea13a
Binary files /dev/null and b/episodes/fig/practical4-fluid0-final.png differ
diff --git a/episodes/fig/p4-displayed-images.png b/episodes/fig/practical4-images.png
similarity index 100%
rename from episodes/fig/p4-displayed-images.png
rename to episodes/fig/practical4-images.png
diff --git a/episodes/fig/p4-jac-binarymask.png b/episodes/fig/practical4-jac-binarymask.png
similarity index 100%
rename from episodes/fig/p4-jac-binarymask.png
rename to episodes/fig/practical4-jac-binarymask.png
diff --git a/episodes/fig/practical4-numlev1-final.png b/episodes/fig/practical4-numlev1-final.png
new file mode 100644
index 00000000..ee5d595e
Binary files /dev/null and b/episodes/fig/practical4-numlev1-final.png differ
diff --git a/episodes/fig/registration_animation.gif b/episodes/fig/registration_animation.gif
deleted file mode 100644
index 5481ed5e..00000000
Binary files a/episodes/fig/registration_animation.gif and /dev/null differ
diff --git a/episodes/fig/registration_animation_comp.gif b/episodes/fig/registration_animation_comp.gif
deleted file mode 100644
index 95708e63..00000000
Binary files a/episodes/fig/registration_animation_comp.gif and /dev/null differ
diff --git a/episodes/fig/registration_animation_elastic0.5fluid1.gif b/episodes/fig/registration_animation_elastic0.5fluid1.gif
deleted file mode 100644
index 3ffce01e..00000000
Binary files a/episodes/fig/registration_animation_elastic0.5fluid1.gif and /dev/null differ
diff --git a/episodes/fig/registration_animation_elastic0.gif b/episodes/fig/registration_animation_elastic0.gif
deleted file mode 100644
index d5c7ae7f..00000000
Binary files a/episodes/fig/registration_animation_elastic0.gif and /dev/null differ
diff --git a/episodes/fig/registration_animation_fluid0.gif b/episodes/fig/registration_animation_fluid0.gif
deleted file mode 100644
index 222a7fb7..00000000
Binary files a/episodes/fig/registration_animation_fluid0.gif and /dev/null differ
diff --git a/episodes/fig/registration_animation_level1.gif b/episodes/fig/registration_animation_level1.gif
deleted file mode 100644
index 1957520c..00000000
Binary files a/episodes/fig/registration_animation_level1.gif and /dev/null differ
diff --git a/episodes/fig/rotated_images.gif b/episodes/fig/rotated_images.gif
deleted file mode 100644
index 19ae5322..00000000
Binary files a/episodes/fig/rotated_images.gif and /dev/null differ
diff --git a/episodes/fig/itk-snap-install-1.png b/episodes/fig/setup-itk-snap-install-1.png
similarity index 100%
rename from episodes/fig/itk-snap-install-1.png
rename to episodes/fig/setup-itk-snap-install-1.png
diff --git a/episodes/fig/itk-snap-install-2.png b/episodes/fig/setup-itk-snap-install-2.png
similarity index 100%
rename from episodes/fig/itk-snap-install-2.png
rename to episodes/fig/setup-itk-snap-install-2.png
diff --git a/episodes/fig/setup-vscode-ipmi-reg-folder.png b/episodes/fig/setup-vscode-ipmi-reg-folder.png
new file mode 100644
index 00000000..0ebe7ad9
Binary files /dev/null and b/episodes/fig/setup-vscode-ipmi-reg-folder.png differ
diff --git a/episodes/fig/setup-vscode-select-kernel-install-packages.png b/episodes/fig/setup-vscode-select-kernel-install-packages.png
new file mode 100644
index 00000000..e0e0f479
Binary files /dev/null and b/episodes/fig/setup-vscode-select-kernel-install-packages.png differ
diff --git a/episodes/fig/setup-vscode-select-kernel-ipmi-reg.png b/episodes/fig/setup-vscode-select-kernel-ipmi-reg.png
new file mode 100644
index 00000000..447e0f33
Binary files /dev/null and b/episodes/fig/setup-vscode-select-kernel-ipmi-reg.png differ
diff --git a/episodes/fig/vscode-ideas-reg.png b/episodes/fig/vscode-ideas-reg.png
deleted file mode 100644
index d6e1106a..00000000
Binary files a/episodes/fig/vscode-ideas-reg.png and /dev/null differ
diff --git a/episodes/fig/vscode-python-env.png b/episodes/fig/vscode-python-env.png
deleted file mode 100644
index f448826a..00000000
Binary files a/episodes/fig/vscode-python-env.png and /dev/null differ
diff --git a/episodes/fig/vscode-select-kernel.png b/episodes/fig/vscode-select-kernel.png
deleted file mode 100644
index 0bda1512..00000000
Binary files a/episodes/fig/vscode-select-kernel.png and /dev/null differ
diff --git a/episodes/fig/vscode.png b/episodes/fig/vscode.png
deleted file mode 100644
index 623b9e83..00000000
Binary files a/episodes/fig/vscode.png and /dev/null differ
diff --git a/episodes/files/demonsReg.py b/episodes/files/demonsReg.py
new file mode 100644
index 00000000..36aa7d11
--- /dev/null
+++ b/episodes/files/demonsReg.py
@@ -0,0 +1,427 @@
+"""
+function to peform a registration between two 2D images using the demons algorithm
+
+Author: Jamie McClelland, UCL
+Contributors:
+- Clea Dronne, UCL
+- Zakaria Senousy, UCL-ARC
+- Miguel Xochicale, UCL-ARC
+"""
+
+import matplotlib.pyplot as plt
+import numpy as np
+from skimage.transform import rescale, resize
+from scipy.ndimage import gaussian_filter
+from utils import dispImage, resampImageWithDefField, calcMSD, dispDefField, calcJacobian
+
+def demonsReg(source, target, sigma_elastic=1, sigma_fluid=1, num_lev=3, use_composition=False,
+ use_target_grad=False, max_it=1000, check_MSD=True, disp_freq=5, disp_spacing=2,
+ scale_update_for_display=10, disp_method_df='grid', disp_method_up='arrows'):
+ """
+ SYNTAX:
+ demonsReg(source, target)
+ demonsReg(source, target, ..., variable=value, ...)
+ warped_image = demonsReg(...)
+ warped_image, def_field = demonsReg(...)
+
+ DESCRIPTION:
+ Perform a registration between the 2D source image and the 2D target
+ image using the demons algorithm. The source image is warped (resampled)
+ into the space of the target image.
+
+ The final warped image and deformation field can be returned as outputs
+ from the function.
+
+ There are a number of optional parameters which affect the registration
+ or how the results are displayed, which are explained below. These can be
+ speficied using variable=value inputs.
+ The default values are given after the parameter name
+ sigma_elastic = 1
+ sigma_fluid = 1
+ the amount of elastic and fluid regularistion to apply. these values
+ specify the standard deviation of the Gaussian used to smooth the
+ update (fluid) or displacement field (elastic). a value of 0 means no
+ smoothing is applied.
+ num_lev = 3
+ the number of levels to use in the multi-resolution scheme
+ use_composition = false
+ specifies whether the registration is performed using the classical
+ demons algorithm, where the updates are added to the current
+ transformation, or using the diffeomorphic demons algorithm, where
+ the updates are composed with the current transformation. Set
+ use_composition to true to compose the updates, or to false to add
+ the updates.
+ use_target_grad = false
+ logical (true/false) value indicating whether the target image
+ gradient or warped image gradient is used when calculating the
+ demons forces.
+ max_it = 1000
+ the maximum number of iterations to perform.
+ check_MSD = true
+ logical value indicating if the Mean Squared Difference (MSD)
+ should be checked for improvement at each iteration. If true, the
+ MSD will be evaluated at each iteration, and if there is no
+ improvement since the previous iteration the registration will move
+ to the next resolution level or finish if it is on the final level.
+ disp_freq = 3
+ the frequency with which to update the displayed images. the images
+ will be updated every disp_freq iterations. If disp_freq is set to
+ 0 the images will not be updated during the registration
+ disp_spacing = 2
+ the spacing between the grid lines or arrows when displaying the
+ deformation field and update.
+ scale_update_for_display = 10
+ the factor used to scale the update field for displaying
+ disp_method_df = 'grid'
+ the display method for the deformation field.
+ can be 'grid' or 'arrows'
+ disp_method_up = 'arrows'
+ the display method for the update. can be 'grid' or 'arrows'
+ """
+
+ # Make copies of full resolution images
+ source_full = source
+ target_full = target
+
+ # Prepare the figure for live update during registration process
+ fig, axs = plt.subplots(1, 3, figsize=(12, 6))
+ iteration_text = fig.text(0.5, 0.92, '', ha='center', va='top', fontsize=10, color='black')
+ fig.suptitle('Live display')
+
+ # Loop over resolution levels
+ for lev in range(1, num_lev + 1):
+ # if not final level, resample images
+ if lev != num_lev:
+ resamp_factor = np.power(2, num_lev - lev)
+ target = rescale(target_full, 1.0 / resamp_factor, mode='edge', order=3, anti_aliasing=True)
+ source = rescale(source_full, 1.0 / resamp_factor, mode='edge', order=3, anti_aliasing=True)
+ else: # if final level, we don't need to resample
+ target = target_full
+ source = source_full
+
+ X, Y = np.mgrid[0:target.shape[0], 0:target.shape[1]]
+
+ # if first level, initialise deformation and displacement fields
+ if lev == 1:
+ def_field = np.zeros((X.shape[0], X.shape[1], 2))
+ def_field[:, :, 0] = X
+ def_field[:, :, 1] = Y
+ disp_field_x = np.zeros(target.shape)
+ disp_field_y = np.zeros(target.shape)
+ else:
+ # otherwise upsample disp_field from previous level
+ disp_field_x = 2 * resize(disp_field_x, (target.shape[0], target.shape[1]), mode='edge', order=3)
+ disp_field_y = 2 * resize(disp_field_y, (target.shape[0], target.shape[1]), mode='edge', order=3)
+ # recalculate def_field for this level from disp_field
+ def_field = np.zeros((X.shape[0], X.shape[1], 2)) # clear def_field from previous level
+ def_field[:, :, 0] = X + disp_field_x
+ def_field[:, :, 1] = Y + disp_field_y
+
+ #initialise updates
+ update_x = np.zeros(target.shape)
+ update_y = np.zeros(target.shape)
+ update_def_field = np.zeros(def_field.shape)
+
+ # calculate the transformed image at the start of this level
+ warped_image = resampImageWithDefField(source, def_field)
+
+ # store the current def_field and MSD value to check for improvements at
+ # end of iteration
+ def_field_prev = def_field.copy()
+ prev_MSD = calcMSD(target, warped_image)
+
+ # if target image gradient is being used this can be calculated now as it will
+ # not change during the registration
+ if use_target_grad:
+ [img_grad_x, img_grad_y] = np.gradient(target)
+
+ # main iterative loop - repeat until max number of iterations reached
+ for it in range(max_it):
+ # plot if first iteration
+ if it == 0 and lev == 1:
+ update_live_display(axs, fig, iteration_text, source, def_field, update_x, update_y, lev, it, prev_MSD, X, Y, disp_spacing, scale_update_for_display, disp_method_df, disp_method_up)
+
+ # if the warped image gradient is used (instead of the target image gradient) this needs to be calculated
+ if not use_target_grad:
+ [img_grad_x, img_grad_y] = np.gradient(warped_image)
+
+ # calculate difference image
+ diff = target - warped_image
+ # calculate denominator of demons forces
+ denom = np.power(img_grad_x, 2) + np.power(img_grad_y, 2) + np.power(diff, 2)
+ # calculate x and y components of numerator of demons forces
+ numer_x = diff * img_grad_x
+ numer_y = diff * img_grad_y
+ # calculate the x and y components of the update
+ update_x = numer_x / denom
+ update_y = numer_y / denom
+
+ # set nan values to 0
+ update_x[np.isnan(update_x)] = 0
+ update_y[np.isnan(update_y)] = 0
+
+ # if fluid like regularisation used smooth the update
+ if sigma_fluid > 0:
+ update_x = gaussian_filter(update_x, sigma_fluid, mode='nearest')
+ update_y = gaussian_filter(update_y, sigma_fluid, mode='nearest')
+
+ # update displacement field using addition (original demons) or composition (diffeomorphic demons)
+ if use_composition:
+ # compose update with current transformation
+ # this can be done by treating the deformation field as an image, and resampling it with the update using resampImageWithDefField
+ # to do this, we first need to calculate a deformation field from the update (which is stored as a displacement field)
+ update_def_field[:, :, 0] = update_x + X
+ update_def_field[:, :, 1] = update_y + Y
+ # resample the deformation field with the update deformation field, saving the result over the previous deformtion field
+ def_field = resampImageWithDefField(def_field, update_def_field)
+ # the result is deformation field representing the update composed with the original deformation field
+ # however, to apply elastic smoothing (below) we need to convert this back to a displacement field
+ disp_field_x = def_field[:, :, 0] - X
+ disp_field_y = def_field[:, :, 1] - Y
+ # replace nans in disp field with 0s
+ disp_field_x[np.isnan(disp_field_x)] = 0
+ disp_field_y[np.isnan(disp_field_y)] = 0
+ else:
+ # add the update to the current displacement field
+ disp_field_x = disp_field_x + update_x
+ disp_field_y = disp_field_y + update_y
+
+ # if elastic like regularisation used smooth the displacement field
+ if sigma_elastic > 0:
+ disp_field_x = gaussian_filter(disp_field_x, sigma_elastic, mode='nearest')
+ disp_field_y = gaussian_filter(disp_field_y, sigma_elastic, mode='nearest')
+
+ # update deformation field from disp field
+ def_field[:, :, 0] = disp_field_x + X
+ def_field[:, :, 1] = disp_field_y + Y
+
+ # transform the image using the updated deformation field
+ warped_image = resampImageWithDefField(source, def_field)
+
+ # calculate MSD between target and warped image and print results
+ MSD = calcMSD(target, warped_image)
+ print('Level {0:d}, Iteration {1:d}: MSD = {2:f}\n'.format(lev, it, MSD))
+
+ # update images if required for this iteration
+ if disp_freq > 0 and it % disp_freq == 0:
+ update_live_display(axs, fig, iteration_text, warped_image, def_field, update_x, update_y, lev, it, MSD, X, Y, disp_spacing, scale_update_for_display, disp_method_df, disp_method_up)
+
+ # check for improvement in MSD if required
+ if check_MSD and MSD >= prev_MSD:
+ # restore previous results and finish level
+ def_field = def_field_prev
+ warped_image = resampImageWithDefField(source, def_field)
+ print('No improvement in MSD')
+ break
+
+ # update previous values of def_field and MSD
+ def_field_prev, prev_MSD = def_field.copy(), MSD.copy()
+
+ # update the live display with the final results
+ update_live_display(axs, fig, iteration_text, warped_image, def_field, update_x, update_y, lev, it, MSD, X, Y, disp_spacing, scale_update_for_display, disp_method_df, disp_method_up)
+ # and make the final display appear
+ final_display(source_full, target_full, warped_image, def_field, disp_spacing, disp_method_df)
+
+ # return the transformed image and the deformation field
+ return warped_image, def_field
+
+
+def update_live_display(axs, fig, iteration_text, warped_image, def_field, update_x, update_y, lev, it, MSD, X, Y, disp_spacing, scale_update_for_display, disp_method_df, disp_method_up):
+ """
+ Updates the live display of the registration process during each iteration.
+
+ SYNTAX:
+ update_live_display(axs, fig, iteration_text, warped_image, def_field,
+ update_x, update_y, lev, it, prev_MSD, X, Y,
+ disp_spacing, scale_update_for_display,
+ disp_method_df, disp_method_up)
+
+ DESCRIPTION:
+ This function updates the display of three visualizations:
+ 1. The current warped image.
+ 2. The deformation field.
+ 3. The update field for the current iteration.
+
+ The function also updates the iteration text that shows the current
+ multi-resolution level and iteration, along with the Mean Squared
+ Difference (MSD) between the source and target images.
+
+ axs: list of Matplotlib axes
+ A list of Matplotlib axes objects to display the warped image,
+ deformation field, and update field.
+ fig: Matplotlib figure object
+ The figure object used to update the canvas.
+ iteration_text: Matplotlib Text object
+ The text object to display the current iteration and MSD.
+ warped_image: 2D array
+ The current warped image being displayed.
+ def_field: 3D array
+ The current deformation field (displacement vectors) being displayed.
+ update_x, update_y: 2D arrays
+ The update field's x and y components used for the current iteration.
+ lev: int
+ The current level of the multi-resolution registration scheme.
+ it: int
+ The current iteration number.
+ MSD: float
+ The current Mean Squared Difference (MSD) value.
+ X, Y: 2D arrays
+ The grid coordinates of the original image.
+ disp_spacing: int
+ The spacing between grid lines or arrows in the deformation and update fields.
+ scale_update_for_display: float
+ Scaling factor applied to the update field for visualization purposes.
+ disp_method_df: str
+ Method used to display the deformation field. Options are 'grid' or 'arrows'.
+ disp_method_up: str
+ Method used to display the update field. Options are 'grid' or 'arrows'.
+ OUTPUT:
+ Updates the plots and text on the figure in real-time to reflect the current state
+ of the registration process. The function does not return any value.
+ """
+
+ for ax in axs:
+ ax.clear()
+
+ plt.sca(axs[0])
+ dispImage(warped_image, title='Warped Image')
+ x_lims, y_lims = plt.xlim(), plt.ylim()
+
+ plt.sca(axs[1])
+ dispDefField(def_field, spacing=disp_spacing, plot_type=disp_method_df)
+ axs[1].set_xlim(x_lims)
+ axs[1].set_ylim(y_lims)
+ axs[1].set_title('Deformation Field')
+
+ plt.sca(axs[2])
+ up_field_to_display = scale_update_for_display * np.dstack((update_x, update_y))
+ up_field_to_display += np.dstack((X, Y))
+ dispDefField(up_field_to_display, spacing=disp_spacing, plot_type=disp_method_up)
+ axs[2].set_xlim(x_lims)
+ axs[2].set_ylim(y_lims)
+ axs[2].set_title('Update Field')
+
+ iteration_text.set_text(f'Level {lev}, Iteration {it}: MSD = {MSD:.6f}')
+ plt.tight_layout()
+ fig.canvas.draw()
+ fig.canvas.flush_events()
+
+
+def final_display(source, target, warped_image, def_field, disp_spacing, disp_method_df):
+ """
+ Displays the final results of image registration, allowing interactive navigation
+ between the source, target, and warped images, as well as between the deformation
+ field and the Jacobian.
+
+ SYNTAX:
+ final_display(source, target, warped_image, def_field,
+ disp_spacing, disp_method_df)
+
+ DESCRIPTION:
+ This function creates an interactive display with three subplots:
+ 1. A viewer for the source, target, and warped images.
+ 2. A visualization of either the deformation field or the Jacobian, depending on user input.
+ 3. A difference image, showing the difference between the currently selected image
+ (source, target, or warped) and the target image.
+
+ The display allows the user to:
+ - Use the left and right arrow keys to switch between the source, target, and warped images.
+ - Use the up and down arrow keys to toggle between displaying the deformation field
+ and the Jacobian in the second subplot.
+
+ The deformation field is visualized using the provided spacing and display method.
+
+ PARAMETERS:
+ source: 2D array
+ The source image used in the registration.
+ target: 2D array
+ The target image used in the registration.
+ warped_image: 2D array
+ The final warped image after the registration process.
+ def_field: 3D array
+ The deformation field (displacement vectors) resulting from the registration process.
+ disp_spacing: int
+ The spacing between grid lines or arrows when displaying the deformation field.
+ disp_method_df: str
+ The display method for the deformation field. Options are 'grid' or 'arrows'.
+
+ OUTPUT:
+ Displays the source, target, and warped images, deformation field or Jacobian,
+ and a difference image in an interactive figure. No values are returned.
+
+ INTERACTIVITY:
+ - Left/Right Arrow Keys: Switch between source, target, and warped images.
+ - Up/Down Arrow Keys: Switch between the deformation field and the Jacobian.
+ """
+
+ # Initialise global variables for current index tracking
+ current_image_index = [2]
+ current_mode_index = [0]
+
+ # Define the images and titles
+ images = [source, target, warped_image]
+ image_titles = ['Source Image', 'Target Image', 'Warped Image']
+ modes = ['Deformation Field', 'Jacobian']
+
+ def on_key(event):
+ if event.key == 'right':
+ current_image_index[0] = (current_image_index[0] + 1) % len(images)
+ elif event.key == 'left':
+ current_image_index[0] = (current_image_index[0] - 1) % len(images)
+ elif event.key == 'up' or event.key == 'down':
+ current_mode_index[0] = (current_mode_index[0] + 1) % len(modes)
+ update_display()
+
+ def update_display():
+ axs_combined[0].clear()
+ plt.sca(axs_combined[0])
+ dispImage(images[current_image_index[0]], title=image_titles[current_image_index[0]])
+ x_lims, y_lims = plt.xlim(), plt.ylim()
+
+ axs_combined[1].clear()
+ plt.sca(axs_combined[1])
+ if modes[current_mode_index[0]] == 'Deformation Field':
+ dispDefField(def_field, spacing=disp_spacing, plot_type=disp_method_df)
+ axs_combined[1].set_xlim(x_lims)
+ axs_combined[1].set_ylim(y_lims)
+ axs_combined[1].set_title('Deformation Field')
+
+ else:
+ [jacobian, _] = calcJacobian(def_field)
+ dispImage(jacobian, title='Jacobian')
+ plt.set_cmap('jet')
+
+ axs_combined[2].clear()
+ plt.sca(axs_combined[2])
+ diff_image = images[current_image_index[0]] - target
+ dispImage(diff_image, title='Difference Image')
+
+ fig_combined.canvas.draw()
+
+ # Create a single figure with 3 subplots
+ fig_combined, axs_combined = plt.subplots(1, 3, figsize=(12, 6))
+ fig_combined.suptitle('Final display')
+
+ # Display initial images
+ plt.sca(axs_combined[0])
+ dispImage(images[current_image_index[0]], title=image_titles[current_image_index[0]])
+ x_lims, y_lims = plt.xlim(), plt.ylim()
+
+ plt.sca(axs_combined[1])
+ dispDefField(def_field, spacing=disp_spacing, plot_type=disp_method_df)
+ axs_combined[1].set_title('Deformation Field')
+ axs_combined[1].set_xlim(x_lims)
+ axs_combined[1].set_ylim(y_lims)
+ axs_combined[1].set_title('Deformation Field')
+
+ plt.sca(axs_combined[2])
+ diff_image = images[current_image_index[0]] - target
+ dispImage(diff_image, title='Difference Image')
+
+ # Add instructions for navigating images
+ fig_combined.text(0.5, 0.1, 'Press <- or -> to navigate between source, target and warped images, Press Up or Down to switch between deformation field and Jacobian', ha='center', va='top', fontsize=12, color='black')
+
+ # Connect the key event handler to the figure
+ fig_combined.canvas.mpl_connect('key_press_event', on_key)
+
+ plt.tight_layout()
\ No newline at end of file
diff --git a/episodes/files/ipmi_reg_python_env.yml b/episodes/files/ipmi_reg_python_env.yml
new file mode 100644
index 00000000..2124c4cb
--- /dev/null
+++ b/episodes/files/ipmi_reg_python_env.yml
@@ -0,0 +1,11 @@
+name: ideas-reg
+channels:
+ - conda-forge
+ - defaults
+dependencies:
+ - ipykernel=6.29.5
+ - matplotlib=3.9.2
+ - nibabel=5.3.2
+ - numpy=2.1.3
+ - scikit-image=0.24.0
+ - scipy=1.14.1
\ No newline at end of file
diff --git a/episodes/files/practical1_notebook.ipynb b/episodes/files/practical1_notebook.ipynb
new file mode 100644
index 00000000..4768cb39
--- /dev/null
+++ b/episodes/files/practical1_notebook.ipynb
@@ -0,0 +1,272 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 1: An introduction to medical imaging data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 1. Visualising images with ITK-SNAP\n",
+ "\n",
+ "#### This section is completed using ITK-SNAP.\n",
+ "\n",
+ "## 2. Viewing and understanding the NIfTI header with NiBabel\n",
+ "\n",
+ "### 2.1. Reading and displaying the NIfTI header"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# import packages\n",
+ "import numpy as np\n",
+ "import nibabel as nib"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# load the nifti file saved in section 1\n",
+ "ct_for_pet_nii = nib.load(\"data/practical1/CT_for_PET.nii.gz\")\n",
+ "\n",
+ "#this returns a Nifti1Image object\n",
+ "print(type(ct_for_pet_nii))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# display the size/shape of the image\n",
+ "print(ct_for_pet_nii.shape)\n",
+ "\n",
+ "# display the data type of the image\n",
+ "print(ct_for_pet_nii.get_data_dtype())\n",
+ "\n",
+ "# display the affine matrix for the image\n",
+ "print(ct_for_pet_nii.affine)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# print the header\n",
+ "print(ct_for_pet_nii.header)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 2.2 Specifying the affine transform"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# print the matrix represented by the qform\n",
+ "print(ct_for_pet_nii.get_qform())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Modifying NIfTI images and headers with NiBabel\n",
+ "\n",
+ "### 3.1. Changing the data type and qform code"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# read the image data of the Nifti1Image object\n",
+ "# it gets loaded as a numpy array and casted to float64\n",
+ "ct_for_pet_img = ct_for_pet_nii.get_fdata()\n",
+ "\n",
+ "# check the type of the array\n",
+ "print(type(ct_for_pet_img))\n",
+ "\n",
+ "# check the type of the array elements\n",
+ "print(ct_for_pet_img.dtype)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# change the data type to float32\n",
+ "ct_for_pet_nii.set_data_dtype(np.float32)\n",
+ "\n",
+ "# set the qform code to unkown (0)\n",
+ "ct_for_pet_nii.set_qform(None, code='unknown')\n",
+ "\n",
+ "# check the header has been updated\n",
+ "print(ct_for_pet_nii.header)\n",
+ "\n",
+ "# save the float32 image to disk\n",
+ "nib.save(ct_for_pet_nii, 'data/practical1/CT_for_PET_float32.nii.gz')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Check data types with NiBabel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# load the new image and check the data type is float32\n",
+ "ct_for_pet_float32_nii = nib.load(\"data/practical1/CT_for_PET_float32.nii.gz\")\n",
+ "print(ct_for_pet_float32_nii.get_data_dtype())\n",
+ "\n",
+ "# load the original image and check the data type is still int16\n",
+ "ct_for_pet_orig_nii = nib.load(\"data/practical1/CT_for_PET.nii.gz\")\n",
+ "print(ct_for_pet_orig_nii.get_data_dtype())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 3.2. Cropping images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# create a new array containing a copy of the desired slices\n",
+ "x_first = 91\n",
+ "x_last = 390\n",
+ "y_first = 131\n",
+ "y_last = 375\n",
+ "z_first = 21\n",
+ "z_last = 155\n",
+ "ct_for_pet_cropped_img = ct_for_pet_img[x_first:x_last+1, y_first:y_last+1, z_first:z_last+1].copy()\n",
+ "\n",
+ "# check the size of the arrays containing the original image and the cropped image\n",
+ "print(ct_for_pet_img.shape)\n",
+ "print(ct_for_pet_cropped_img.shape)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# create a new Nifti1Image object using the header and affine from the uncropped image\n",
+ "ct_for_pet_cropped_nii = nib.nifti1.Nifti1Image(ct_for_pet_cropped_img, ct_for_pet_nii.affine, ct_for_pet_nii.header)\n",
+ "\n",
+ "# check the shape and header of the new Nifti1Image object\n",
+ "print(ct_for_pet_cropped_nii.shape)\n",
+ "print(ct_for_pet_cropped_nii.header)\n",
+ "\n",
+ "# save the cropped image\n",
+ "nib.save(ct_for_pet_cropped_nii, \"data/practical1/CT_for_PET_cropped.nii.gz\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# calculate the world coordinates of first voxel in the cropped image\n",
+ "cropped_origin = ct_for_pet_nii.affine @ np.array([x_first, y_first, z_first, 1])\n",
+ "print(cropped_origin)\n",
+ "\n",
+ "# use these to update the corresponding values in the affine matrix for the cropped image\n",
+ "ct_for_pet_cropped_nii.affine[0,3] = cropped_origin[0]\n",
+ "ct_for_pet_cropped_nii.affine[1,3] = cropped_origin[1]\n",
+ "ct_for_pet_cropped_nii.affine[2,3] = cropped_origin[2]\n",
+ "print(ct_for_pet_cropped_nii.affine)\n",
+ "\n",
+ "# save the cropped image - this also updates the sform values in the header using the updated affine\n",
+ "print(ct_for_pet_cropped_nii.header.get_sform())\n",
+ "nib.save(ct_for_pet_cropped_nii, \"data/practical1/CT_for_PET_cropped_aligned.nii.gz\")\n",
+ "print(ct_for_pet_cropped_nii.header.get_sform())\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# we can do the same as above using nibabel's slicer attribute\n",
+ "ct_for_pet_slicer_nii = ct_for_pet_nii.slicer[x_first:x_last+1, y_first:y_last+1, z_first:z_last+1]\n",
+ "print(ct_for_pet_slicer_nii.header)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 3.3. Aligning data"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 61,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Add you code here to align the centre of the inhale_BH_CT with the centre of CT_for_PET\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ideas-reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/episodes/files/practical2_notebook.ipynb b/episodes/files/practical2_notebook.ipynb
new file mode 100644
index 00000000..f2bc4db2
--- /dev/null
+++ b/episodes/files/practical2_notebook.ipynb
@@ -0,0 +1,327 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 2: Affine transformations and resampling images\n",
+ "\n",
+ "## 1. Loading and displaying images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# import the required packages\n",
+ "import numpy as np\n",
+ "import skimage.io\n",
+ "import matplotlib\n",
+ "import matplotlib.pyplot as plt\n",
+ "import utils as ut\n",
+ "\n",
+ "# the next lines make figures display in separate windows instead of in the notebook\n",
+ "# this is required for the animations to work correctly\n",
+ "matplotlib.use('TkAgg')\n",
+ "plt.ion()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load the 2D lung MRI image using the imread function from the scikit-image python library\n",
+ "img = skimage.io.imread('data/practical2/lung_MRI_slice.png')\n",
+ "\n",
+ "# Check the data type of the image\n",
+ "print(img.dtype)\n",
+ "\n",
+ "# convert data type to double to avoid errors when processing integers\n",
+ "img = np.double(img)\n",
+ "\n",
+ "# check new data type\n",
+ "print(img.dtype)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Add your own code here to reorientate the image into 'standard orientation'\n",
+ "\n",
+ "\n",
+ "# display the image using the dispImage function, it should open in a separate window\n",
+ "ut.dispImage(img)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. Translating and resampling images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Create a 2D array representing a 2D affine matrix for a translation by 10 pixels in x and 20 pixels in y\n",
+ "# Note: numpy has a matrix class, but it recommends not to use it and use standard arrays instead, so arrays should be used in these exercises\n",
+ "# Matrix multiplication can be performed between two arrays using the @ operator or the numpy.matmul function\n",
+ "\n",
+ "# TODO: edit the array to represent a 2D affine matrix for a translation by 10 pixels in x and 20 pixels in y\n",
+ "T = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n",
+ "print(T)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: edit the code below to create a deformation field from the affine matrix\n",
+ "# and then use the deformation field to resample the image\n",
+ "# The function definitions in uitls.py explain what the inputs and outputs of the functions should be\n",
+ "\n",
+ "def_field = ut.defFieldFromAffineMatrix()\n",
+ "img_resampled = ut.resampImageWithDefField()\n",
+ "\n",
+ "# display the resampled image\n",
+ "ut.dispImage(img_resampled)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# check the value of pixel 255,255 in the resampled image\n",
+ "print(img_resampled[255,255])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: add code to resample the image using nearest neighbour and splinef2d interpolation\n",
+ "img_resampled_nn = \n",
+ "img_resampled_sp = \n",
+ "\n",
+ "# TODO: display the results in separate windows\n",
+ "plt.figure()\n",
+ "\n",
+ "plt.figure()\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display the difference images between the new results and the original image in separate windows\n",
+ "plt.figure()\n",
+ "\n",
+ "plt.figure()\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat the above steps for a translation by 10.5 pixels in x and 20.5 pixels in y\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display the difference images with intensity limits of [-20, 20]\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Rotating images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# define a function to calculate the affine matrix for a rotation about a point\n",
+ "def affineMatrixForRotationAboutPoint(theta, p_coords):\n",
+ " \"\"\"\n",
+ " function to calculate the affine matrix corresponding to an anticlockwise\n",
+ " rotation about a point\n",
+ "\n",
+ " SYNTAX:\n",
+ " aff_mat = affineMatrixForRotationAboutPoint(theta, p_coords)\n",
+ " \n",
+ " INPUTS:\n",
+ " theta - the angle of the rotation, specified in degrees\n",
+ " p_coords - the 2D coordinates of the point that is the centre of rotation.\n",
+ " p_coords[0] is the x coordinate,\n",
+ " p_coords[1] is the y coordinate\n",
+ " \n",
+ " OUTPUTS:\n",
+ " aff_mat - a numpy array representing the 3 x 3 affine matrix\n",
+ " \"\"\"\n",
+ " \n",
+ " # TODO: implement the function\n",
+ " \n",
+ " # return the affine matrix\n",
+ " return aff_mat"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Close any open figures before continuing\n",
+ "plt.close('all')\n",
+ "\n",
+ "# TODO: Use the above function to calculate the affine matrix representing an anticlockwise rotation\n",
+ "# of 5 degrees about the centre of the image\n",
+ "R = affineMatrixForRotationAboutPoint()\n",
+ "print(R)\n",
+ "\n",
+ "# TODO: use this rotation to transform the original image\n",
+ "\n",
+ "\n",
+ "# TODO: display the resampled image using the intensity limits of the original image\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: apply the same transformation again to the resampled image and display the result. \n",
+ "# repeat this 71 times so that the image appears to rotate a full 360 degrees.\n",
+ "for n in range(71):\n",
+ " \n",
+ " plt.pause(0.05) # add a short pause so the figure display updates"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat above code with 0 padding value\n",
+ "# need to first resample the original image and display it\n",
+ "\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "for n in range(71):\n",
+ " \n",
+ " plt.pause(0.05) # add a short pause so the figure display updates"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat above code using nearest neighbour interpolation\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat above code using splinef2d interpolation\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 4. Composing transformations"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: write code below that makes an animation of the rotating image as above (using linear interpolation)\n",
+ "# but composes the transformations to avoid multiple resamplings of the image\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat the animation using nearest neighbour and splinef2d interpolation\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 5. Push interpolation\n",
+ "Use push interpolation instead of pull interpolation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: copy and modify the above code to use push interpolation instead of pull interpolation\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ideas-reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/episodes/files/practical3_notebook.ipynb b/episodes/files/practical3_notebook.ipynb
new file mode 100644
index 00000000..11ac2a1f
--- /dev/null
+++ b/episodes/files/practical3_notebook.ipynb
@@ -0,0 +1,380 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 3: Exploring similarity measures for cost functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# import the required packages\n",
+ "import numpy as np\n",
+ "import skimage.io\n",
+ "import matplotlib\n",
+ "import matplotlib.pyplot as plt\n",
+ "import utils as ut\n",
+ "\n",
+ "# the next lines make figures display in separate windows instead of in the notebook\n",
+ "# this is required for the animations to work correctly\n",
+ "matplotlib.use('TkAgg')\n",
+ "plt.ion()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Inspect the images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load in the three 2D images required for these exercises using the imread function from the scikit-image python library\n",
+ "ct_img_int8 = skimage.io.imread('data/practical3/ct_slice_int8.png')\n",
+ "ct_img_int16 = skimage.io.imread('data/practical3/ct_slice_int16.png')\n",
+ "mr_img_int16 = skimage.io.imread('data/practical3/mr_slice_int16.png')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Display the data type of each image\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Convert all the images to double data type and reorient them into cartesian orientation\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Display each image in a separate figure\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. Moving images in and out of alignment"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Write code so that on each iteration of the loop the images are all rotated by the current value of theta about the point 10,10 \n",
+ "# and the resampled 8 bit CT image is displayed\n",
+ "theta = np.arange(-90, 91, 1)\n",
+ "num_pix_x, num_pix_y = ct_img_int8.shape\n",
+ "\n",
+ "for n in range(theta.size): \n",
+ " # create affine matrix and corresponding deformation field\n",
+ " aff_mat = #TODO: add here\n",
+ " def_field = #TODO: add here\n",
+ "\n",
+ " # resample the images\n",
+ " ct_img_int8_resamp = #TODO: add here\n",
+ " ct_img_int16_resamp = #TODO: add here\n",
+ " mr_img_int16_resamp = #TODO: add here\n",
+ " \n",
+ " # TODO: display the transformed 8 bit CT image \n",
+ " \n",
+ " # add a short pause so the figure display updates\n",
+ " plt.pause(0.05)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Sum of Squared DIfferences (SSD) and Mean of Squared Differences (MSD)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# do the same as above but this time calculate the SSD between the original and resampled images\n",
+ "SSDs = np.zeros((theta.size, 4))\n",
+ "\n",
+ "for n in range(theta.size): \n",
+ " #TODO: copy your code from the previous cell to create the affine matrix and corresponding deformation field, and to resample the images.\n",
+ " \n",
+ " \n",
+ " # Calculate the SSD values between the original and resampled images\n",
+ " # 1) The original 16-bit CT image and the resampled 16-bit CT image\n",
+ " # 2) The original 16-bit CT image and the resampled 8-bit CT image\n",
+ " # 3) The original 16-bit CT image and the resampled MR image\n",
+ " # 4) The original 8-bit CT image and the resampled 8-bit CT image\n",
+ " SSDs[n, 0] = #TODO: add here\n",
+ " SSDs[n, 1] = #TODO: add here\n",
+ " SSDs[n, 2] = #TODO: add here\n",
+ " SSDs[n, 3] = #TODO: add here"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# plot the SSD values (y axis) against the angle theta (x axis)\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, SSDs[:, 0])\n",
+ "plt.title('SSD: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, SSDs[:, 1])\n",
+ "plt.title('SSD: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, SSDs[:, 2])\n",
+ "plt.title('SSD: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, SSDs[:, 3])\n",
+ "plt.title('SSD: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# do the same as above but this time calculate the MSD between the original and transformed images\n",
+ "MSDs = np.zeros((theta.size, 4))\n",
+ "\n",
+ "for n in range(theta.size):\n",
+ " #TODO: copy your code from the previous cell to create the affine matrix and corresponding deformation field, and to resample the images.\n",
+ " \n",
+ " \n",
+ " # Calculate the MSD values between the original and resampled images\n",
+ " # 1) The original 16-bit CT image and the resampled 16-bit CT image\n",
+ " # 2) The original 16-bit CT image and the resampled 8-bit CT image\n",
+ " # 3) The original 16-bit CT image and the resampled MR image\n",
+ " # 4) The original 8-bit CT image and the resampled 8-bit CT image\n",
+ " MSDs[n, 0] = #TODO: add here\n",
+ " MSDs[n, 1] = #TODO: add here\n",
+ " MSDs[n, 2] = #TODO: add here\n",
+ " MSDs[n, 3] = #TODO: add here\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# plot the MSD values (y axis) against the angle theta (x axis)\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, MSDs[:, 0])\n",
+ "plt.title('MSD: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, MSDs[:, 1])\n",
+ "plt.title('MSD: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, MSDs[:, 2])\n",
+ "plt.title('MSD: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, MSDs[:, 3])\n",
+ "plt.title('MSD: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 4. Normalized Cross Correlation (NCC) and Normalized Mutual Information (NMI)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define a function to calculate the NCC between two images\n",
+ "def calcNCC(A, B):\n",
+ " \"\"\"\n",
+ " function to calculate the normalised cross correlation between two images\n",
+ "\n",
+ " SYNTAX:\n",
+ " NCC = calcNCC(A, B)\n",
+ "\n",
+ " INPUTS:\n",
+ " A - an image stored as a 2D array\n",
+ " B - an image stored as a 2D array. B must the the same size as A\n",
+ "\n",
+ " OUTPUTS:\n",
+ " NCC - the value of the normalised cross correlation\n",
+ "\n",
+ " NOTE\n",
+ " if either of the images contain NaN values these pixels should be\n",
+ " ignored when calculating the NCC.\n",
+ " \"\"\"\n",
+ "\n",
+ " # TODO: Remove pixels that contain NaN in either image\n",
+ " \n",
+ " # TODO: Calculate the mean and std of each image\n",
+ " \n",
+ " # TODO: Calculate and return the NCC\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# do the same as above but this time calculate the NCC, joint and marginal entropies between the original and transformed images\n",
+ "NCCs = np.zeros((theta.size, 4))\n",
+ "H_ABs = np.zeros((theta.size, 4))\n",
+ "H_As = np.zeros((theta.size, 4))\n",
+ "H_Bs = np.zeros((theta.size, 4))\n",
+ "\n",
+ "for n in range(theta.size): \n",
+ " #TODO: copy your code from the previous cell to create the affine matrix and corresponding deformation field, and to resample the images.\n",
+ " \n",
+ " # Calculate the NCC values\n",
+ " NCCs[n, 0] = #TODO: add here\n",
+ " NCCs[n, 1] = #TODO: add here\n",
+ " NCCs[n, 2] = #TODO: add here\n",
+ " NCCs[n, 3] = #TODO: add here\n",
+ " \n",
+ " # Calculate the joint and marginal entropies\n",
+ " H_ABs[n, 0], H_As[n, 0], H_Bs[n, 0] = #TODO: add here\n",
+ " H_ABs[n, 1], H_As[n, 1], H_Bs[n, 1] = #TODO: add here\n",
+ " H_ABs[n, 2], H_As[n, 2], H_Bs[n, 2] = #TODO: add here\n",
+ " H_ABs[n, 3], H_As[n, 3], H_Bs[n, 3] = #TODO: add here\n",
+ " \n",
+ "# Calculate the mutual information and normalised mutual information\n",
+ "MIs = #TODO: add here\n",
+ "NMIs = #TODO: add here"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# plot the results for NCC, H_AB, MI, and NMI in separate figures\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, NCCs[:, 0])\n",
+ "plt.title('NCC: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, NCCs[:, 1])\n",
+ "plt.title('NCC: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, NCCs[:, 2])\n",
+ "plt.title('NCC: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, NCCs[:, 3])\n",
+ "plt.title('NCC: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()\n",
+ "\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, H_ABs[:, 0])\n",
+ "plt.title('H_AB: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, H_ABs[:, 1])\n",
+ "plt.title('H_AB: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, H_ABs[:, 2])\n",
+ "plt.title('H_AB: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, H_ABs[:, 3])\n",
+ "plt.title('H_AB: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()\n",
+ "\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, MIs[:, 0])\n",
+ "plt.title('MI: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, MIs[:, 1])\n",
+ "plt.title('MI: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, MIs[:, 2])\n",
+ "plt.title('MI: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, MIs[:, 3])\n",
+ "plt.title('MI: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()\n",
+ "\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, NMIs[:, 0])\n",
+ "plt.title('NMI: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, NMIs[:, 1])\n",
+ "plt.title('NMI: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, NMIs[:, 2])\n",
+ "plt.title('NMI: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, NMIs[:, 3])\n",
+ "plt.title('NMI: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ipmi_reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/episodes/files/practical4_notebook.ipynb b/episodes/files/practical4_notebook.ipynb
new file mode 100644
index 00000000..ab9501f8
--- /dev/null
+++ b/episodes/files/practical4_notebook.ipynb
@@ -0,0 +1,306 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 4: Exploring the Demons registration algorithm"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# import the required packages\n",
+ "import numpy as np\n",
+ "import skimage.io\n",
+ "import matplotlib\n",
+ "import matplotlib.pyplot as plt\n",
+ "import utils as ut\n",
+ "\n",
+ "# import the demonsReg function, this is imported from the demonsReg.py file which is separate than the usual utils.py file\n",
+ "from demonsReg import demonsReg\n",
+ "\n",
+ "# the next lines make figures display in separate windows instead of in the notebook\n",
+ "# this is required for the animations to work correctly\n",
+ "matplotlib.use('TkAgg')\n",
+ "plt.ion()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 1. Compare the images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# load in the three 2D images required for these exercises using the imread function from the scikit-image python library\n",
+ "cine_MR_1 = skimage.io.imread('data/practical4/cine_MR_1.png') \n",
+ "cine_MR_2 = skimage.io.imread('data/practical4/cine_MR_2.png')\n",
+ "cine_MR_3 = skimage.io.imread('data/practical4/cine_MR_3.png')\n",
+ "\n",
+ "# TODO: Convert all the images to double data type and reorient them into cartesian orientation\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display each image in a separate figure\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display difference images\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: use the dispImageFlip function to compare cine_MR_1 with cine_MR_2 and compare cine_MR_1 with cine_MR_3\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. The `demonsReg` function"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run the demonsReg function on the cine_MR_1 and cine_MR_2 images\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Regularisation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_elastic set to 0\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_fluid set to 0\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with several different values for sigma_elastic and sigma_fluid\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 4. Resolution levels"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with the number of resolution levels set to 1\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with the number of resolution levels set to 6\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg varying both the number of levels and the regularisation weights\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 5. Folding and the determinant of the Jacobian matrix"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_elastic set to 0.5\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: calculate the Jacobian map from the deformation field and display it, setting the colour map and adding a colour bar\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display a binary image showing the pixels where folding has occurred\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 6. Composing the updates"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_elastic set to 0.5 and use_composition = True\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with parameters you previously found gave a good result, but now with use_composition=True\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 7. Other parameters"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with check_MSE set to false and the display frequency set to 50\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with the same parameters as above but with max_it set to 500\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg using the spatial gradients of target image instead of source image\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 8. Registering the deep-breath image"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with cine_MR_1 as source and cine_MR_3 as target\n",
+ "# TODO: experiment with different parameters to try and get a good result\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with cine_MR_3 as source and cine_MR_1 as target\n",
+ "# TODO: experiment with different parameters to try and get a good result\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ipmi_reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/episodes/files/utils.py b/episodes/files/utils.py
new file mode 100644
index 00000000..4cef311f
--- /dev/null
+++ b/episodes/files/utils.py
@@ -0,0 +1,361 @@
+"""
+utility functions for use in the image registration exercises
+
+updated Oct 24
+
+Author: Jamie McClelland, UCL
+Contributor: Clea Dronne, UCL
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+import scipy.interpolate as scii
+
+def dispImage(img, int_lims=[], title=''):
+ """
+ Function to display a grey-scale image that is stored in 'standard
+ orientation' with y-axis on the 2nd dimension and 0 at the bottom.
+ SYNTAX:
+ dispImage(img)
+ dispImage(img, int_lims)
+
+ INPUTS:
+ img - image to be displayed
+ int_lims - the intensity limits to use when displaying the image
+ int_lims(1) = min intensity to display
+ int_lims(2) = max intensity to display
+ default = [np.nanmin(img), np.nanmax(img)]
+ title - the title to display above the image
+ """
+ if not int_lims:
+ int_lims = [np.nanmin(img), np.nanmax(img)]
+ if int_lims[0] == int_lims[1]:
+ int_lims[0] -= 1
+ int_lims[1] += 1
+
+ img = img.T
+ plt.gca().clear()
+ plt.imshow(img, cmap='gray', vmin=int_lims[0], vmax=int_lims[1], origin='lower')
+ plt.title(title)
+ plt.axis('image')
+ plt.tight_layout()
+
+def defFieldFromAffineMatrix(aff_mat, num_pix_x, num_pix_y):
+ """
+ Function to create a 2D deformation field from an affine matrix.
+
+ SYNTAX:
+ def_field = defFieldFromAffineMatrix(aff_mat, num_pix_x, num_pix_y)
+
+ INPUTS:
+ aff_mat - a 3 x 3 numpy array representing the 2D affine transformation
+ in homogeneous coordinates
+ num_pix_x - number of pixels in the deformation field along the x
+ dimension
+ num_pix_y - number of pixels in the deformation field along the y
+ dimension
+
+ OUTPUTS:
+ def_field - the 2D deformation field as a 3D numpy array
+
+ NOTES:
+ the function calculates the pixel coordinates of the deformation field
+ as:
+ x = 0:num_pix_x - 1
+ y = 0:num_pix_y - 1
+ """
+ [X, Y] = np.mgrid[0:num_pix_x, 0:num_pix_y]
+ total_pix = num_pix_x * num_pix_y
+ pix_coords = np.array([np.reshape(X, -1), np.reshape(Y, -1), np.ones(total_pix)])
+
+ trans_coords = aff_mat @ pix_coords
+
+ def_field = np.zeros((num_pix_x, num_pix_y, 2))
+ def_field[:, :, 0] = np.reshape(trans_coords[0, :], (num_pix_x, num_pix_y))
+ def_field[:, :, 1] = np.reshape(trans_coords[1, :], (num_pix_x, num_pix_y))
+
+ return def_field
+
+def resampImageWithDefField(source_img, def_field, interp_method='linear', pad_value=np.nan):
+ """
+ Function to resample a 2D image with a 2D deformation field.
+
+ SYNTAX:
+ resamp_img = resampImageWithDefField(source_img, def_field)
+ resamp_img = resampImageWithDefField(source_img, def_field, interp_method)
+ resamp_img = resampImageWithDefField(source_img, def_field, interp_method, pad_value)
+
+ INPUTS:
+ source_img - the source image to be resampled, as a 2D matrix
+ def_field - the deformation field, as a 3D array
+ interp_method - interpolation method accepted by interpn function
+ default = 'linear'
+ pad_value - the value to assign to pixels that are outside the source image
+ default = NaN
+
+ OUTPUTS:
+ resamp_img - the resampled image
+ """
+ x_coords = np.arange(source_img.shape[0], dtype='float')
+ y_coords = np.arange(source_img.shape[1], dtype='float')
+
+ return scii.interpn((x_coords, y_coords), source_img, def_field, bounds_error=False, fill_value=pad_value, method=interp_method)
+
+def resampImageWithDefFieldPushInterp(source_img, def_field, interp_method='linear'):
+ """
+ Function to resample a 2D image with a 2D deformation field using push interpolation.
+
+ SYNTAX:
+ resamp_img = resampImageWithDefFieldPushInterp(source_img, def_field)
+ resamp_img = resampImageWithDefFieldPushInterp(source_img, def_field, interp_method)
+
+ INPUTS:
+ source_img - the source image to be resampled, as a 2D matrix
+ def_field - the deformation field, as a 3D matrix
+ interp_method - interpolation method accepted by griddata function ('linear', 'nearest', or 'cubic')
+ default = 'linear'
+
+ OUTPUTS:
+ resamp_img - the resampled image
+ """
+ [X, Y] = np.mgrid[0:source_img.shape[0], 0:source_img.shape[1]]
+ pix_coords = np.array([np.reshape(X, -1), np.reshape(Y, -1)]).T
+
+ def_field_x = def_field[:, :, 0]
+ def_field_y = def_field[:, :, 1]
+ def_field_reformed = np.array([np.reshape(def_field_x, -1), np.reshape(def_field_y, -1)]).T
+ resamp_img = scii.griddata(def_field_reformed, np.reshape(source_img, -1), pix_coords, method=interp_method)
+
+ return np.reshape(resamp_img, source_img.shape)
+
+def affineMatrixForRotationAboutPoint(theta, p_coords):
+ """
+ Function to calculate the affine matrix corresponding to an anticlockwise rotation about a point.
+
+ SYNTAX:
+ aff_mat = affineMatrixForRotationAboutPoint(theta, p_coords)
+
+ INPUTS:
+ theta - the angle of the rotation, specified in degrees
+ p_coords - the 2D coordinates of the point that is the center of rotation.
+ p_coords[0] is the x coordinate,
+ p_coords[1] is the y coordinate
+
+ OUTPUTS:
+ aff_mat - a numpy array representing the 3 x 3 affine matrix
+ """
+ theta = np.pi * theta / 180
+
+ T1 = np.array([[1, 0, -p_coords[0]], [0, 1, -p_coords[1]], [0, 0, 1]])
+ T2 = np.array([[1, 0, p_coords[0]], [0, 1, p_coords[1]], [0, 0, 1]])
+ R = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
+
+ aff_mat = T2 @ R @ T1
+ return aff_mat
+
+def calcSSD(A, B):
+ """
+ Function to calculate the sum of squared differences between two images.
+
+ SYNTAX:
+ SSD = calcSSD(A, B)
+
+ INPUTS:
+ A - an image stored as a 2D array
+ B - an image stored as a 2D array. B must the same size as A
+
+ OUTPUTS:
+ SSD - the value of the sum of squared differences
+ """
+ return np.nansum((A - B) ** 2)
+
+def calcMSD(A, B):
+ """
+ Function to calculate the mean of squared differences between two images.
+
+ SYNTAX:
+ MSD = calcMSD(A, B)
+
+ INPUTS:
+ A - an image stored as a 2D array
+ B - an image stored as a 2D array. B must be the same size as A
+
+ OUTPUTS:
+ MSD - the value of the mean of squared differences
+ """
+ return np.nanmean((A - B) ** 2)
+
+def calcEntropies(A, B, num_bins=[32, 32]):
+ """
+ Function to calculate the joint and marginal entropies for two images.
+
+ SYNTAX:
+ [H_AB, H_A, H_B] = calcEntropies(A, B)
+ [H_AB, H_A, H_B] = calcEntropies(A, B, num_bins)
+
+ INPUTS:
+ A - an image stored as a 2D array
+ B - an image stored as a 2D array. B must be the same size as A
+ num_bins - a 2 element vector specifying the number of bins to use in the joint histogram for each image
+ default = [32, 32]
+
+ OUTPUTS:
+ H_AB - the joint entropy between A and B
+ H_A - the marginal entropy in A
+ H_B - the marginal entropy in B
+ """
+ nan_inds = np.logical_or(np.isnan(A), np.isnan(B))
+ A = A[np.logical_not(nan_inds)]
+ B = B[np.logical_not(nan_inds)]
+
+ joint_hist, _, _ = np.histogram2d(A, B, bins=num_bins)
+ probs_AB = joint_hist / np.sum(joint_hist)
+
+ probs_A = np.sum(probs_AB, axis=1)
+ probs_B = np.sum(probs_AB, axis=0)
+
+ eps = np.finfo(float).eps
+ H_AB = -np.nansum(probs_AB * np.log(probs_AB + eps))
+ H_A = -np.nansum(probs_A * np.log(probs_A + eps))
+ H_B = -np.nansum(probs_B * np.log(probs_B + eps))
+
+ return H_AB, H_A, H_B
+
+def dispImageFlip(image1, image2, int_lims=[]):
+ """
+ Function to display two grey-scale images that are stored in 'standard
+ orientation' with y-axis on the 2nd dimension and 0 at the bottom. It is
+ possible to flip between the two images using the left and right arrow keys.
+ SYNTAX:
+ dispImage(image1, image2)
+ dispImage(image1, image2, int_lims)
+
+ INPUTS:
+ image1 - first image to be displayed
+ image2 - second image to be displayed
+ int_lims - the intensity limits to use when displaying the image
+ int_lims(1) = min intensity to display
+ int_lims(2) = max intensity to display
+ default = [min(np.nanmin(image1), np.nanmin(image2)), max(np.nanmax(image1), np.nanmax(image2))]
+ """
+
+ # Calculate the intensity limits if not provided
+ if not int_lims:
+ int_lims = [min(np.nanmin(image1), np.nanmin(image2)), max(np.nanmax(image1), np.nanmax(image2))]
+ if int_lims[0] == int_lims[1]:
+ int_lims[0] -= 1
+ int_lims[1] += 1
+
+ # Initialize a figure and axis
+ fig, ax = plt.subplots()
+
+ # Set up a list of images
+ images = [image1.T, image2.T]
+ current_image_index = [0] # Use a list to hold the index, which is mutable
+
+ # Display the first image
+ img_display = ax.imshow(images[current_image_index[0]], cmap='gray', vmin=int_lims[0], vmax=int_lims[1], origin='lower')
+ img_display.set_array(images[current_image_index[0]])
+ ax.set_aspect('equal')
+ ax.set_title(f"Image {current_image_index[0] + 1}")
+
+ # Key press event handler
+ def on_key(event):
+ if event.key == 'right':
+ current_image_index[0] = (current_image_index[0] + 1) % len(images)
+ elif event.key == 'left':
+ current_image_index[0] = (current_image_index[0] - 1) % len(images)
+
+ # Update the displayed image
+ img_display.set_array(images[current_image_index[0]])
+ ax.set_title(f"Image {current_image_index[0] + 1}")
+ plt.draw()
+
+ # Connect the event handler to the figure
+ fig.canvas.mpl_connect('key_press_event', on_key)
+ fig.text(0.5, 0.02, 'Press <- or -> to navigate between the two images.', ha='center', va='top', fontsize=8, color='black')
+
+ # Display the figure
+ plt.tight_layout()
+
+def calcJacobian(def_field):
+ """
+ A function to calculate the Jacobian from a deformation field.
+
+ SYNTAX:
+ [J, J_Mat] = calcJacobian(def_field)
+
+ INPUTS:
+ def_field - the deformation field as a 3D array
+
+ OUTPUTS:
+ J - the Jacobian determinant for each pixel in the deformation field
+ J_Mat - the full Jacobian matrix for each pixel in the deformation
+ field as a 4D array (last 2 dimensions contain 2 x 2 matrix for
+ each pixel)
+ """
+ # Calculate gradient of x component of deformation field
+ [grad_x_x, grad_x_y] = np.gradient(def_field[:, :, 0])
+ # Calculate gradient of y component of deformation field
+ [grad_y_x, grad_y_y] = np.gradient(def_field[:, :, 1])
+
+ # Initialize outputs as zeros
+ J = np.zeros_like(grad_x_x)
+ if grad_x_x.ndim == 2:
+ J_Mat = np.zeros((grad_x_x.shape[0], grad_x_x.shape[1], 2, 2))
+ elif grad_x_x.ndim == 3:
+ J_Mat = np.zeros((grad_x_x.shape[0], grad_x_x.shape[1], grad_x_x.shape[2], 2, 2))
+
+ # Loop over pixels in the deformation field
+ for x in range(grad_x_x.shape[0]):
+ for y in range(grad_x_x.shape[1]):
+ # Form the Jacobian matrix for this pixel
+ J_Mat_this_pix = np.array([[grad_x_x[x, y], grad_x_y[x, y]], [grad_y_x[x, y], grad_y_y[x, y]]])
+ # Calculate and store the determinant
+ J[x, y] = np.linalg.det(J_Mat_this_pix)
+ # Store the full matrix
+ J_Mat[x, y, :, :] = J_Mat_this_pix
+ # More efficient to calculate determinant from matrix in temporary variable
+ return J, J_Mat
+
+def dispDefField(def_field, spacing=5, plot_type='grid'):
+ """
+ Function to display a deformation field.
+
+ SYNTAX:
+ dispDefField(def_field)
+ dispDefField(def_field, spacing)
+ dispDefField(def_field, spacing, plot_type)
+
+ INPUTS:
+ def_field - the deformation field as a 3D array
+ spacing - the spacing of the grids/arrows in pixels
+ default = 5
+ plot_type - the type of plot to use, 'grid' or 'arrows'
+ default = 'grid'
+ """
+ # Calculate coordinates for plotting grid-lines/arrows
+ x_inds = np.arange(0, def_field.shape[0], spacing)
+ y_inds = np.arange(0, def_field.shape[1], spacing)
+
+ # Check if plotting grids
+ if plot_type == 'grid':
+ # Plot vertical lines
+ plt.plot(def_field[x_inds, :, 0].T, def_field[x_inds, :, 1].T, 'k', linewidth=0.5)
+ # Plot horizontal lines
+ plt.plot(def_field[:, y_inds, 0], def_field[:, y_inds, 1], 'k', linewidth=0.5)
+
+ elif plot_type == 'arrows':
+ # Calculate grids of coordinates for plotting
+ [Xs, Ys] = np.meshgrid(x_inds, y_inds, indexing='ij')
+ # Calculate displacement field for plotting
+ disp_field_x = def_field[Xs, Ys, 0] - Xs
+ disp_field_y = def_field[Xs, Ys, 1] - Ys
+
+ # Plot displacements using quiver function
+ plt.quiver(Xs, Ys, disp_field_x, disp_field_y, angles='xy', scale_units='xy', scale=1)
+
+ else:
+ print('Display type must be grid or arrows')
+
+ plt.axis('image')
diff --git a/episodes/practical1.Rmd b/episodes/practical1.Rmd
index d97770e0..9a7d1c9c 100644
--- a/episodes/practical1.Rmd
+++ b/episodes/practical1.Rmd
@@ -1,41 +1,41 @@
---
-title: "An introduction to medical imaging data"
+title: "An introduction to viewing and manipulating medical imaging data"
teaching: 10
exercises: 2
-output: pdf_document
---
:::::::::::::::::::::::::::::::::::::: questions
-- How is medical imaging data typically stored?
-- What tools and methods can be used to visualise medical imaging data?
-- How can medical imaging data be manipulated and modified consistently?
+* What file formats are commonly used to store medical image data?
+* How can we view and compare medical images?
+* How can we display and modify image header information?
+* How can we modify the imaging data and the header data consistently?
::::::::::::::::::::::::::::::::::::::::::::::::
::::::::::::::::::::::::::::::::::::: objectives
-- Explore medical imaging data formats, focusing on DICOM and NIfTI.
-- Learn to use ITK-SNAP software to display medical imaging data.
-- Understand the structure and content of the NIfTI header.
-- Modify and manipulate NIfTI images and headers using Python.
+* Understand some of the key differences between the DICOM and NIfTI file formats.
+* Learn to use ITK-SNAP software to view and compare medical imaging data.
+* Explore how the NiBabel library can be used to display and modify NIfTI header and image data.
::::::::::::::::::::::::::::::::::::::::::::::::
:::::::::::::::::::::::::::::::::::::: prereq
-You should have followed the instructions from [the Setup page](../learners/setup.md) to download the course material, setup Python, and install ITK-SNAP.
+You should have followed the instructions from [the Setup page](../learners/setup.md) to access the data and Jupyter notebook for these exercises, setup Python and install ITK-SNAP.
::::::::::::::::::::::::::::::::::::::
-The first part of this tutorial makes use of the ITK-SNAP software. The second part is based in Python, and uses the Jupyter notebook `practical1-exercises.ipynb` downloaded with the course material.
+The first part of this practical makes use of the ITK-SNAP software. The second part is based in Python, and uses this Jupyter notebook: [`practical1_notebook.ipynb`](files/practical1_notebook.ipynb).
+
### Data
In these exercises, we will be working with real-world medical imaging data from The Cancer Imaging Archive [(TCIA)](https://www.cancerimagingarchive.net/collection/ct-vs-pet-ventilation-imaging/).
TCIA is a resource of public datasets that hosts a large archive of medical images of cancer accessible for public download.
We are going to use data from the CT Ventilation as a Functional Imaging Modality for Lung Cancer Radiotherapy dataset (also known as CT-vs-PET-Ventilation-Imaging dataset).
-It contains 20 lung cancer patients who underwent exhale/inhale breath-hold CT (BHCT), free-breathing four-dimensional CT (4DCT) and Galligas PET ventilation scans in a single session on a combined 4DPET/CT scanner. We won't make use of the 4DCT.
+It contains 20 lung cancer patients who underwent exhale/inhale breath-hold CT (BHCT), free-breathing four-dimensional CT (4DCT) and Galligas PET ventilation scans in a single session on a combined 4DPET/CT scanner. We won't make use of the 4DCT in these exercises.
We used the scans from the patient CT-PET-VI-02. In the unzipped `data` folder, you can find them in the `practical1` folder. You should see four folders:
@@ -49,8 +49,6 @@ We used the scans from the patient CT-PET-VI-02. In the unzipped `data` folder,
#### Ethical approval for using open datasets
When using open datasets that contain images of humans, such as those from TCIA, for your own research, you will still need to get ethical approval as you do for any non-open datasets you use. You should contact your local Research Ethics Committee at your institution for details on how to obtain ethical approval for your research.
-Ethical approval has been obtained from UCL for using these datasets as part of this teaching course.
-
::::::::::::::::::::::::::::::::::::::
### Medical imaging formats
@@ -98,7 +96,7 @@ For more information, please see Anderson Winkler's blog on the [NIfTI file form
::::::::::::::::::::::::::::::::::::::
-## 1. Visualising images with ITK-SNAP
+## 1. Viewing images with ITK-SNAP
### 1.1. Getting started with ITK-SNAP
@@ -113,14 +111,14 @@ ITK-SNAP is a free, open-source, multi-platform software application used to seg
* Open the ITK-SNAP application.
* When it opens it may display a 'Layout Preference Reminder' popout window:
- 
+ 
* If this popout appears and you have not changed the settings in ITK-SNAP it should say the same as above, i.e. that radiological convention is used. In this case you can check 'Don't remind me again' and click 'No'.
* When ITK-SNAP opens you can select between the 'Getting Started' pane, the 'Recent Images' pane, or the 'Recent Workspaces' pane using the tabs near the top of the window. You should select the 'Recent Images' pane if it is not already showing.
* If you have used ITK-SNAP before it will display recent images which you can select to open. Note, for DICOM images the name displayed is the name of the file containing the first DICOM slice. This can be confusing when the folder names rather than the file names specify which scan is which, as is the case in this practical.
-
+
### 1.2. Opening and viewing DICOM images
@@ -130,15 +128,15 @@ ITK-SNAP is a free, open-source, multi-platform software application used to seg
* :warning: **Warning!** some versions of ITK-SNAP incorrectly set the 'File Format:' for the images used in this practical to '4D CTA DICOM Series'
* If this happens use the drop-down menu to set the 'File Format:' to 'DICOM Image Series' instead before clicking 'Next >'
- 
+ 
* You will then be asked to select the DICOM series to open, but as this folder only includes one series, you can click 'Next >'.
- 
+ 
* Once the image has loaded a small window will display some summary information about the image. This can be reviewed and then the window can be closed by clicking 'Finish'.
- 
+ 
* The inhale scan will now be displayed in ITK-SNAP, as shown below. On the left you will see the 'Main Toolbar', 'Cursor Inspector', 'Segmentation Labels', and '3D Toolbar'. In the rest of the window you will see four panes, three of which show orthogonal slices through the 3D volume:
* Top-left shows an axial slice.
@@ -146,7 +144,7 @@ ITK-SNAP is a free, open-source, multi-platform software application used to seg
* Bottom-right shows a coronal slice.
* Bottom-left is currently empty (but would show a 3D reconstruction of the structures that have been segmented in the image when using ITK-SNAP for segmentation)
-
+
### 1.3. Navigating around 3D images
* To navigate around the image, you can left-click in any of the three orthogonal slices to reposition the cursor to where you clicked. This will also change the slices displayed in the other two panes so that they show the slices that pass through the cursor (as indicated by the two dotted blue lines).
@@ -154,7 +152,7 @@ ITK-SNAP is a free, open-source, multi-platform software application used to seg
* If you use the mouse wheel (or two fingers on the touchpad) you will scroll through the slices in the pane where the mouse pointer currently is. You can also use the slider next to the displayed slice.
* As you move the cursor around or scroll through the slices you should notice that the information displayed in the 'Cursor Inspector' is updated accordingly. Note, the cursor position displayed here is in voxel coordinates.
- 
+ 
* To zoom in , you can use the right click and drag the cursor upwards (to zoom out, drag the cursor downwards).
* You can pan, or move the entire image, by clicking and dragging the mouse centre button (or alt + left click on Windows, option key + left click on Mac OS).
@@ -167,13 +165,13 @@ If you open a new main image in ITK-SNAP it will replace the current image, maki
* This will open a similar window to the one for opening the main image, where you can enter the name of the file you want to open or use the 'Browse...' button to locate it.
* This time select the first slice from the `exhale_BH_CT` folder, which is also called `1-001.dcm`. As before, make sure the 'File Format:' is set to 'DICOM Image Series' and click 'Next >'.
- 
+ 
* As before you will be asked to select the DICOM series to open, but there is only one series in this folder, so just click 'Next >'.
* A window will then be displayed asking 'How should the image be displayed?'
* Select 'As a separate image (shown beside other images)' and then click 'Next >'.
- 
+ 
* Once the second image is loaded, you will see two thumbnails in the top corner of each slice display.
* Click on these to swap the displayed slices from one image to the other, allowing you to easily see the similarities and differences between the images.
@@ -182,21 +180,21 @@ If you open a new main image in ITK-SNAP it will replace the current image, maki
* You can also change the displayed image by clicking on it in the 'Cursor Inspector'.
* If you compare the inhale and exhale image you will see they are very similar, although the patient's chest is further out in the inhale scan, and the rest of the anatomy has shifted accordingly.
- 
+ 
* Load an additional image as a colour overlay
* To demonstrate the use of colour overlays we are going to load the PET scan as a colour overlay on top of the corresponding CT scan. You should first load the CT scan as the main image (which will close any other images that are currently open, e.g. the inhale and exhale scans).
* Click 'File -> Open Main Image...'.
* Select the first slice from the `CT_for_PET` folder (also called `1-001.dcm`) and again make sure the 'File Format:' is set to 'DICOM Image Series' before clicking 'Next >'.
- 
+ 
* As before you will be asked to select the DICOM series to open, but there is only one series in this folder, so just click 'Next >'.
* And then the summary information about the image will be displayed, which can be reviewed before closing the window by clicking 'Finish'.
* Now click 'File -> Add Another Image...'
* Select the first slice from the `PET` folder (also called `1-001.dcm`) and check the 'File Format:' is set to 'DICOM Image Series' before clicking 'Next >'.
- 
+ 
* Once again there is only one DICOM series in the folder so just click 'Next >'.
* The window will now be displayed asking 'How should the image be displayed?'
@@ -204,16 +202,16 @@ If you open a new main image in ITK-SNAP it will replace the current image, maki
* Use the dropdown menu to set the 'Overlay color map:' to 'Hot'.
* Click 'Next >'.
- 
+ 
* The 'Image Summary' will now be displayed. This time it will include a warning about loss of precision, but this can be ignored. Click 'Finish' to close the window.
- 
+ 
* The PET scan will now be displayed in ITK-SNAP using the hot colourmap (so the lungs mostly appear in red). Depending on the version of ITK-SNAP used, the opacity of the PET scan may initially be set to 0% (so all you will see is the CT scan), 100% (so all you will see is the PET scan), or some other value (so you will see both the CT scan and the PET scan overlaid on it).
* The opacity of the PET scan can be adjusted by right-clicking on the PET scan in the 'Cursor Inspector' (where it is called *Galligas Lung*) and dragging the 'Opacity:' slider.
- 
+ 
### 1.5. Image Layer Inspector tool
The 'Image Layer Inspector' tool displays further information about the images and allows you to modify and control how they are displayed. We will now briefly look at some of the functionality provided by the 'Image Layer Inspector'.
@@ -225,7 +223,7 @@ You will see the open images listed on the left, and five tabs across the top: '
#### General tab
The 'General' tab displays the filename and the nickname for the selected image. For any 'Additional Images' the 'General' tab also enables you to select if the image is displayed as a 'Separate image' or a 'Semi-transparent overlay', and to change the 'Overlay opacity' if the image is being displayed as an overlay.
-
+
#### Contrast tab
The 'Contrast' tab enables you to adjust how the image intensities are displayed for the different images. You can use the text boxes in the 'Linear Image Contrast Adjustment:' panel to modify the range of intensity values displayed, e.g.:
@@ -236,13 +234,13 @@ The 'Contrast' tab enables you to adjust how the image intensities are displayed
The 'Reset' button can be used to return the intensity range to the original values. The 'Auto' button can be used to automatically set the intensity range based on the distribution of the intensity values in the image.
The 'Curve-Based Image Contrast Adjustment:' panel can also be used to provide more fine grained control of how the image intensities are mapped to displayed intensities. The details of how this works are beyond the scope of this practical, but feel free to have a play with it in your own time (and remember the 'Reset' button resets the curve as well).
-
+
#### Color Map tab
The 'Color Map' tab can be used to select and manipulate the colour map used for each image. The colour map can be changed for any of the images, not just those displayed as semi-transparent overlays. You can select from predefined colour maps using the dropdown menu in the 'Presets:' panel.
The 'Color Map Editor:' panel can be used to modify the colour maps, which can then be saved as new presets (using the '+' button in the 'Presets:' panel). The details of how this works are beyond the scope of this practical, but feel free to have a play with it in your own time. You can always restore the original colour map by reselecting it from the dropdown menu in the 'Presets:' panel.
-
+
#### Info tab
The 'Info' tab displays some of the important information about the selected image under 'Image Header'. It also displays the 'Cursor Coordinates' in both voxel and world units (ITK-SNAP uses the same world coordinate system as NIfTI images - see section 4.3 below for more information on NIfTI and DICOM world coordinate systems) as well as the intensity at the cursor for the selected image.
@@ -254,12 +252,12 @@ The 'Info' tab displays some of the important information about the selected ima
In ITK-SNAP the cursor can be located at the centre of any voxel in the main image. However, the voxel locations in the additional images may not be aligned with the voxels in the main image, e.g. if they have a different resolution, as for the CT and PET images here. In this case the cursor will not be located at the centre of a voxel in the additional images, hence the non-integer voxel coorindates.
-
+
#### Metadata tab
The 'Metadata' tab contains information stored in the headers of the image files, so in this case (some of) the DICOM header information for these images. As can be seen, a lot of the information stored in the DICOM headers is not required or useful for processing the images.
-
+
### 1.6 Converting DICOM images to NIfTI
As mentioned earlier, converting DICOM images to another format, such as NIfTI, is often one of the first steps when processing and analysing medical image data. ITK-SNAP can save images in NIfTI format (and a few others), so can be used to convert the DICOM images to NIfTI. To save the images in NIfTI format:
@@ -270,13 +268,13 @@ As mentioned earlier, converting DICOM images to another format, such as NIfTI,
* Note, using the `.nii.gz` extension saves the image as a compressed NIfTI file. Many softwares and libraries, including ITK-SNAP, NiBabel, and NiftyReg, can work directly with the compressed `.nii.gz` files, so these are often used to save storage space, and will be used in these exercises.
* Check the 'File Format:' is set to 'NiFTI' and click 'Finish'.
- 
+ 
* If you look in the `practical1` folder you should now see a file called `CT_for_PET.nii.gz`.
* If you load this file as an additional image in ITK-SNAP you should see that the new NIfTI image looks exactly the same as the original DICOM image, and the 'Info' tab in the 'Image Layer Inspector' has exactly the same information for both images, as we would expect.
* However, the information in the 'Metadata' tab for the two images is different, with the information from the NIfTI file header being displayed for the NIfTI image and the DICOM header information being displayed for the DICOM image.
-
+
:::::::::::::::::::::::::::::::::::::: spoiler
@@ -431,7 +429,7 @@ On the other hand, the original DICOM images assume the world coordinate system
```output
[[ 0.9765625 0. 0. -249.51171875]
[ 0. 0.9765625 0. -466.01171875]
- [ 0. 0. 3. -205.5 ]
+ [ 0. 0. 2. -553.5 ]
[ 0. 0. 0. 1. ]]
```
As you can see, the values in the first two rows corresponding to the X and Y dimensions are the negative of those in the NIfTI header.
@@ -478,14 +476,14 @@ The new file `CT_for_PET_float32.nii.gz` is larger than `CT_for_PET.nii.gz` (62
* Load `CT_for_PET_float32.nii.gz` as an additional image in ITK-SNAP. You should see that the new NIfTI image looks exactly the same as the previous NIfTI image (and the original DICOM image) and has the same intensity values.
* However, if you look at the 'Info' tab in the 'Image Layer Inspector' you will see that the 'Pixel Format:' for the new image is float32, but is int16 for the previous image.
-
+
-
+
* And if you look at he 'Metadata' tab you will see that the 'bitpix' value is 32 for the new image and 16 for the previous image, and the 'datatype' is 16 for the new image and 4 for the previous image.
-
-
+
+
### 3.2. Cropping images
@@ -508,7 +506,7 @@ When creating a new Nifti1Image object, you can provide an affine transform and/
* Load the uncropped CT image (`CT_for_PET.nii.gz`) into ITK-SNAP as the main image. Now load the cropped image as an additional image.
You will see that the cropped image has been cropped in all three dimensions so that it just contains the lungs, but it is shifted relative to the uncropped image.
-
+
:::::::::::::::::::::::::::::::::::::: callout
@@ -544,14 +542,14 @@ But you wouldn’t have learnt so much if we’d simply done that to start with!
If you load the new image into ITK-SNAP you will see that it is now aligned with the original image.
-
+
-
+
### 3.3. Aligning images
If you load `CT_for_PET.nii.gz` and the DICOM image in the inhale_BH_CT` folder in to ITK-SNAP, you can see that there is a large misalignment between the images.
-
+
If we perform a registration between these images as they are, it may have difficulties as the images are so far out of alignment to start with. One way to roughly align the images before performing a registration is to align the centres of the images.
@@ -586,8 +584,8 @@ The translation should not replace the image origin values in the affine header
Use ITK-SNAP to verify that you have correctly aligned the centres of the images.
-
-
+
+
## 4. References
#### Data
diff --git a/episodes/practical2.Rmd b/episodes/practical2.Rmd
index e4cec1c6..9a6d09f0 100644
--- a/episodes/practical2.Rmd
+++ b/episodes/practical2.Rmd
@@ -1,28 +1,36 @@
---
-title: 'Image transformations in medical imaging'
+title: 'Affine transformations and resampling images'
teaching: 10
exercises: 2
---
:::::::::::::::::::::::::::::::::::::: questions
-- What are the steps for applying transformations to medical images?
-- How can multiple transformations be combined and applied to the same image?
-- What are the advantages of pull interpolation over push interpolation?
+* What are the steps for applying affine transformations to medical images?
+* What is the effect of using different interpolation methods when resampling images?
+* How can multiple affine transformations be combined and applied to the same image?
+* What are the differences between pull and push interpolation?
::::::::::::::::::::::::::::::::::::::::::::::::
::::::::::::::::::::::::::::::::::::: objectives
-- Learn how to apply various transformations, such as translations and rotations, to medical images.
-- Understand the process of composing multiple transformations.
-- Differentiate between push and pull interpolations.
+* Learn how to apply affine transformations, such as translations and rotations, to medical images.
+* Observe how different interpolation methods, such as nearest neighbour and linear, give different results when resampling images.
+* Use the process of composing multiple transformations to avoid artefacts caused multiple resamplings.
+* Understand the differences between push and pull interpolations.
::::::::::::::::::::::::::::::::::::::::::::::::
-In these exercises, we will use a 2D slice from a lung MRI image, *lung_MRI_slice.png*, which you can find in the zipped data folder under `practical2`.
+:::::::::::::::::::::::::::::::::::::: prereq
-This tutorial uses Python. There is a template Jupyter notebook for you to work on `practical2-exercises.ipynb`. You have also been provided with some utility functions in `utils.py`. When using a function from this file, you should read it and make sure you understand what it does.
+You should have followed the instructions from [the Setup page](../learners/setup.md) to access the data and code for these exercises, and setup Python.
+
+::::::::::::::::::::::::::::::::::::::
+
+This practical is based in Python, and uses this template Jupyter notebook: [`practical2_notebook.ipynb`](files/practical2_notebook.ipynb).You have also been provided with some utility functions in [`utils.py`](files/utils.py). When using a function from this file, you should read it and make sure you understand what it does.
+
+In these exercises, we will use a 2D slice from a lung MRI image, `lung_MRI_slice.png`, which you can find in the unzipped data folder under `practical2`.
* Run the first cell of the Juypter notebook. This imports the required libraries, and sets the *matplotlib* library to display figures in separate windows. This is required for the animations in these exercises to run correctly.
@@ -39,7 +47,7 @@ Therefore, before proceeding you should reorientate the image into ‘standard o
* Write your own code in the next cell of the Juypter notebook to reorientate the image. It will then display the image using the `dispImage` function from *utils.py*.
* If you have done this correctly, the image should look like this:
-
+
## 2. Translating and resampling images
* Edit the code in the next cell of the notebook to create an affine matrix representing a translation by 10 pixels in the x direction and 20 pixels in the y direction. The cell already contains the code to create a 3 x 3 identity matrix, which can be edited as required.
@@ -68,7 +76,7 @@ Matrix multiplication can be performed between two arrays using the @ operator o
* Now run the next cell to display the transformed image.
* If you have done this correctly it should appear like this:
-
+
:::::::::::: discussion
@@ -93,7 +101,7 @@ The results should look the same as the result from using *linear* interpolation
* Write the code to display the difference images.
* They should appear like this:
-
+
(the difference image for *nearest neighbour* is on the left, and *splinef2d* on the right)
:::::::::::: discussion
@@ -108,14 +116,14 @@ The values in the difference image for *splinef2d* are very small, in the order
* Now write code that repeats the steps above using a translation of 10.5 pixels in the x direction and 20.5 pixels in the y direction.
* The difference images should look like this:
-
+
(the difference image for nearest neighbour is on the left, and splinef2d on the right)
By default, the `dispImage` function displays images by scaling the values so that they use the full intensity range, i.e. the lowest value in the image is set to black and the highest to white. However, this can be misleading when comparing images, as the grey values seen in the images will not correspond to the same intensity values. Therefore, it is often a good idea to ensure exactly the same intensity range is used when displaying and comparing different images by using the `int_lims` input to the `dispImage` function.
* Write code to redisplay the difference images, in both cases using an intensity limits of [-20, 20].
* The difference images should now appear like this:
-
+
(the difference image for nearest neighbour is on the left, and splinef2d on the right)
::::::::::::::::::: discussion
@@ -148,22 +156,22 @@ You will then need to compose the transformations (using matrix multiplication)
* Now write code that uses the function to calculate the affine matrix representing an anticlockwise rotation of 5 degrees about the centre of the image. Then transforms the original image using the rotation you just created and display the result.
* Linear interpolation should be used when resampling the image, and the intensity limits from the original image used when displaying the results.
* The result should look like this:
-
+
* Apply the same transformation again to the resampled image and display the result. This should be repeated 71 times, so that the image appears to rotate a full 360 degrees.
* It is necessary to add a short pause (i.e. 0.05 seconds) using `plt.pause(0.05)` so that the figure updates each time the new image is displayed.
* The final image should look this this:
-
+
* Here is the animation:
-
+
You will notice that the image gets smaller and smaller as it rotates. This is because of the NaN padding values – when a pixel value is interpolated from one or more NaN values it also gets set to NaN, so the pixels at the edge of the image keep getting set to NaN, and the image gets smaller after each rotation.
* To prevent this, repeat your code so that it uses a padding value of 0 rather than NaN and run your new code.
* Before the for loop You will need to first resample the original image (also using a padding value of 0), display it, and add a short pause so that the displayed image is updated.
* This time the final image should look like this:
-
+
* Here is the animation:
-
+
::::::::::::::::::: discussion
@@ -173,15 +181,17 @@ You will notice that the corners of the image still get ‘rounded off’ as it
* Do you understand why this happens?
+* How else does the final image differ from the original image?
+
:::::::::::::::::::
* Now add code to the template script to repeat the above, but first using *nearest neighbour* interpolation, and then using *splinef2d*.
* The final images should look like this:
-
+
* Here is the animation for *nearest neighbour* interpolation:
-
+
* And for *splinef2d*:
-
+
The blurring artefacts and the ‘rounding off’ of the images seen above are caused by multiple resamplings of the image. Resampling essentially makes a copy of the image, and the interpolation slightly degrades that image. Multiple resamplings are like copies of copies of copies - you start to notice the cumulative degradation of the images. This can be prevented by composing the rotations into a single transformation and then applying the resulting composed transformation to the original image instead of resampling the result image each time.
@@ -202,11 +212,11 @@ At each iteration of the loop `R_current` will be updated by composing it with `
* Repeat the animation using *nearest neighbour* and *splinef2d* interpolation.
* The resulting animation should look like this for *linear*:
-
+
* And this for *nearest neighbour*:
-
+
* And this for *splinef2d*:
-
+
You should notice that the corners of the images do not get ‘rounded off’ during these animations. Furthermore, you should notice that the intermediate images are different for the different interpolation methods (although this is only really noticeable for nearest neighbour interpolation), but the blurring artefacts do not get worse as the animation progresses, and the final image should be the same as the original image.
@@ -216,7 +226,7 @@ As discussed in the lectures, it is possible to resample an image using push-int
* Copy your code from above (that composes the transformations and uses *linear* interpolation) and modify it to perform push interpolation instead of pull interpolation by using the `resampImageWithDefFieldPushInterp` function instead of the `resampImageWithDefField` function.
* Note, you cannot provide a `pad_value` for `resampImageWithDefFieldPushInterp`
* The resulting animation should look like this:
-
+
::::::::::::::::::: discussion
diff --git a/episodes/practical3.Rmd b/episodes/practical3.Rmd
index a8b0ba4f..65706aa0 100644
--- a/episodes/practical3.Rmd
+++ b/episodes/practical3.Rmd
@@ -6,111 +6,157 @@ exercises: 2
:::::::::::::::::::::::::::::::::::::: questions
-- What measures are used to calculate similarity between two images?
-- What are some commonly used similarity measures and how do they differ in implementation and functionality?
+* How do different similarity measures change as images move into and out of alignment?
+* How do different similarity measures perform with different images (same/different modality)?
+* How are different similarity measure implemented?
::::::::::::::::::::::::::::::::::::::::::::::::
::::::::::::::::::::::::::::::::::::: objectives
-- Understand how common similarity measures for image registration are calculated and used.
-- Learn about the advantages and disadvantages of these metrics.
+* Understand how different similarity measures are implemented and used.
+* Learn about the advantages and disadvantages of the different similarity measures.
::::::::::::::::::::::::::::::::::::::::::::::::
+:::::::::::::::::::::::::::::::::::::: prereq
+
+You should have followed the instructions from [the Setup page](../learners/setup.md) to access the data and code for these exercises, and setup Python.
+
+::::::::::::::::::::::::::::::::::::::
+
+This practical is based in Python, and uses this template Jupyter notebook: [`practical3_notebook.ipynb`](files/practical3_notebook.ipynb).You have also been provided with some utility functions in [`utils.py`](files/utils.py). When using a function from this file, you should read it and make sure you understand what it does.
+
In these exercises, we use a three 2D images:
* `ct_slice_int8.png`: an axial CT slice of a head, pixels are stored as unsigned 8-bit integers.
* `ct_slice_int16.png`: the same axial slice CT slice, pixels are stored as unsigned 16-bit integers.
* `mr_slice_int16.png`: the corresponding axial MR slice, pixels are stored as unsigned 16-bit integers.
-You can find in the zipped data folder under `practical3`.
+You can find in the unzipped data folder under `practical3`.
-This tutorial uses Python. There is a template Jupyter notebook for you to work on *practical3-exercises.ipynb*. You have also been provided with some utility functions in `utils.py`. When using a function from this file, you should read it and make sure you understand what it does.
+* Run the first cell of the Juypter notebook. This imports the required libraries, and sets the *matplotlib* library to display figures in separate windows. This is required for the animations in these exercises to run correctly.
-## 1. Loading and displaying the images
-Load the three 2D images. Display their data type and check these match the expected data types.
+## 1. Inspect the images
-Convert these images to double, reorient them into 'standard orientation' and display each image in a separate figure using the `dispImage` function from *utils2.py*.
+* Run the next cell of the notebook to load the 3 images.
+* Add code to the next cell to display the data type for each of the images, run it, and check they match the expected data types.
+* Add code to the next cells to convert all the images to double data type, reorient them into cartesian orientation, and display each image in a separate figure using the `dispImage` function from *utils.py*.
The images should appear like this:
-
+
The left and middle images are the CT images, while the image on the right is an MRI.
-You should notice that the two CT images appear exactly the same, but if you examine the actual values in the images you will see that the images use different intensity ranges. If you move your cursor over the image the intensity values can be seen next to the figure:
+You should notice that the two CT images appear exactly the same, but if you examine the actual values in the images you will see that the images use different intensity ranges. If you move your cursor over the image the intensity values can be seen next to the figure (it is the value in [], so 19.0 in the figure below):
-
+
In the 16 bit version, the intensity values correspond to Hounsfield Units (or rather Hounsfield Units + 1000, since air has a value of -1000, but the image cannot contain negative values as it is stored as unsigned integers), but in the 8 bit image the values have been scaled as the image can only contain values from 0 to 255.
-## 2. Rotations
+## 2. Moving images in and out of alignment
+
+In order to explore the performance of the different similarity measures we are going to write some code to rotate the images from -90 degrees to 90 degrees so they first move into, and then out of, alignment with the original (unrotated) images.
+The Juypter notebook contains the outline of some code to rotate each of the images between -90 degrees and 90 degrees, in steps of 1 degree, but you need to complete the code.
-The template script contains some code to rotate each of the images between -90 degrees and 90 degrees, in steps of 1 degree. Edit the code so that on each iteration of the loop it uses the `affineMatrixFromRotationAboutPoint` function from *utils2.py* to create an affine matrix representing an anti-clockwise rotation by theta degrees about the point 10,10, and uses the `defFieldFromAffineMatrix` function to create the corresponding deformation field. Then resample each of the 3 images using the `resampImageWithDefField` function and display the transformed 8-bit CT image using the `dispImage` function.
+* Edit the code so that on each iteration of the loop it:
+ * uses the `affineMatrixFromRotationAboutPoint` function from *utils.py* to create an affine matrix representing an anti-clockwise rotation by theta degrees about the point (10,10),
+ * uses the `defFieldFromAffineMatrix` function to create the corresponding deformation field,
+ * then resamples each of the 3 images using the `resampImageWithDefField` function with the default interpolation (linear) and padding value (NaN),
+ * and uses the `dispImage` function to display the resampled 8-bit CT image.
-If this has been implemented correctly the image should appear to rotate clockwise, starting with a rotation of -90 degrees (so mostly being ‘off to the left’ of the original image) and finishing with a rotation of +90 degrees (so mostly being ‘off the bottom’ of the original image), as shown in the intermediate images below:
+If this has been implemented correctly the image should appear to rotate clockwise, starting with a rotation of -90 degrees (so mostly being ‘off to the left’ of the original image) and finishing with a rotation of +90 degrees (so mostly being ‘off the bottom’ of the original image), as shown in the animation below:
-
+
-
+:::::::::::: discussion
-Make sure you understand why the image appears to rotate clockwise when the function produces an affine matrix representing an anti-clockwise rotation.
+#### Question
-Note, the default padding value of NaN should be used when resampling the image so that pixels from outside the original images are ignored when calculating the similarity measures
-below.
+Why does the image appear to rotate clockwise when the function produces an affine matrix representing an anti-clockwise rotation?
-### 2.1. Sum of Squared DIfferences (SSD)
+* Why should the default padding value of NaN be used when resampling the images?
-In this section, we are going to observe how Sum of Squared Differences (SSD) varies between our original image and our rotated image. Edit the code so that on each iteration of the loop it calculates and stores the SSD between:
+::::::::::::
-* 1) The original 16-bit CT image and the transformed 16-bit CT image
-* 2) The original 16-bit CT image and the transformed 8-bit CT image
-* 3) The original 16-bit CT image and the transformed MR image
-* 4) The original 8-bit CT image and the transformed 8-bit CT image
-Now rerun the cell with the for loop (you may want to comment out the lines that display the image and pause so that the code runs faster).
+## 3. Sum of Squared DIfferences (SSD) and Mean of Squared Differences (MSD)
-Plot the SSD values on the y-axis against the theta on the x-axis for each of the four cases above.
+We are now going to observe how the Sum of Squared Differences (SSD) and Mean of Squared Differences (MSD) similarity measures changes as the rotated images and original images come in and out of alignment.
+
+* Copy your code from the previous cell to create the affine matrix and corresponding deformation field, and to resample the images.
+ * You do not need to copy the code to display the rotated 8-bit CT image as this was just to check the code was working as expected, and it makes the loop a lot slower to run.
+* Add new code so that on each iteration of the loop it calculates and stores the SSD between:
+ * 1) The original 16-bit CT image and the resampled 16-bit CT image
+ * 2) The original 16-bit CT image and the resampled 8-bit CT image
+ * 3) The original 16-bit CT image and the resampled MR image
+ * 4) The original 8-bit CT image and the resampled 8-bit CT image
+* Run the cell. Then run the following cell so that it plots the SSD values on the y-axis against the theta on the x-axis for each of the four cases above.
The plots should look like this:
-
+
+
+:::::::::::: discussion
-Note that:
+#### Question
-* The SSD reaches a minimum (of 0) for cases 1 and 4 when the images are in alignment.
-* For cases 2 and 3 SSD does not have a minimum when the images are aligned.
-* For all cases the SSD decreases as the overlap between the images decreases.
-* Although the shape of the SSD curve for cases 1 and 4 is the same, the values of the SSD are different by 2 orders of magnitude.
+* Why does the SSD reach a minimum (of 0) for cases 1 and 4 when the images are in alignment?
+* Why does the SSD not reach a minimum value when the images are aligned for cases 2 and 3?
+* Why does the SSD decrease as the overlap between the images decreases?
+* Why is the shape of the SSD curve for cases 1 and 4 the same, but the values of the SSD are different by 2 orders of magnitude?
-Make sure you understand why you get these results.
+::::::::::::
-### 2.2. Mean of Squared Differences (MSD)
-Now edit the code so that it rotates the images as above but calculates the MSD instead of the SSD at each iteration, and then plots the MSD values.
+* Now edit and run the next two cells so that they do the same as above but calculate and plot the MSD values instead of the SSD values.
The plots should look like this:
-
+
+
+
+:::::::::::: discussion
+
+#### Question
-Note that:
+Note that for cases 1 and 4, unlike the SSD, the MSD does not decrease as the amount of overlap decreases. And the shape of the MSD curves for cases 1 and 4 are the same but the values are larger for case 1. Make sure you understand why you get these results.
-* The MSD for cases 1 and 4 does not decrease as the amount of overlap decreases.
-* The shape of the MSD curves for cases 1 and 4 are the same but the values are larger for case 1.
-* The MSD values for cases 2 and 3 are lower for negative values of theta and higher for positive values (this one is a bit tricky!).
+* Why are the MSD values for cases 2 and 3 lower for negative values of theta and higher for positive values (this one is a bit tricky!)?
-Make sure you understand why you get these results.
+::::::::::::
-### 2.3. Normalized Cross Correlation (NCC) and Normalized Mutual Information (NMI)
-Implement the `calcNCC` function so that it calculates the Normalised Cross Correlation (NCC) between two images.
+## 4. Normalized Cross Correlation (NCC) and Normalized Mutual Information (NMI)
-Edit the code so that it uses your `calcNCC` function and the `calcEntropies` function from `utils.py` to calculate the (NCC) and the joint and marginal entropies ($H_{AB}$, $H_A$, and $H_B$) instead of the MSD/SSD at each iteration. Also calculate the Mutual Information (MI) and Normalised Mutual Information (NMI) from the entropy values, and plot the results for NCC, $H_{AB}$, MI, and NMI.
+* Edit the next cell of the notebook to implement the `calcNCC` function so that it calculates the Normalised Cross Correlation (NCC) between two images.
+
+::::::::::::::::::: spoiler
+
+#### Hints
+
+When calculating the NCC you need to ignore any pixels that are NaN in either of the input images. The *numpy* `nanmean` and `nanstd` functions can calculate the mean and standard deviation of an array (image) while ignoring the NaN values, but they will only ignore pixels that are NaN in the images passed as input to the function, not the pixels that are NaN in the other image.
+
+Therefore, you will first need to remove any pixels that contain NaN in either of the input images. The *numpy* `isnan` function can find the NaN pixels in each individual image, and logical operations and boolean indexing can then be used to keep just the pixels that are not NaN in both images. If you are not sure how to do this ask me or one of the PGTAs to help you work it out.
+
+You can then calculate the mean and standard deviation of each image using the *numpy* `mean` and `std` functions. And then calculate the NCC using the equation from the lectures.
+
+:::::::::::::::::::
+
+* Edit the code in the next cell so that it uses your `calcNCC` function and the `calcEntropies` function from `utils.py` to calculate the (NCC) and the joint and marginal entropies ($H_{AB}$, $H_A$, and $H_B$) instead of the MSD/SSD at each iteration.
+* Also add code after the loop to calculate the Mutual Information (MI) and Normalised Mutual Information (NMI) from the entropy values.
+* Now run the cell and the next cell to plot the results for the NCC, the joint entropy ($H_{AB}$), the MI and the NMI.
The plots should look like this:
-
-
-
-
+
+
+
+
+
+:::::::::::: discussion
+
+#### Question
-Do the different measures perform as expected for the different cases? Based on these results, which measures are suitable for registering the different pairs of images? Does this agree with what you were taught in the lecture?
+* Do the different measures perform as expected for the different cases?
+* Based on these results, which measures are suitable for registering the different pairs of images?
+* Does this agree with what you were taught in the lecture?
-Make sure you understand all the results you get.
+::::::::::::
diff --git a/episodes/practical4.Rmd b/episodes/practical4.Rmd
index c243935e..73867cf2 100644
--- a/episodes/practical4.Rmd
+++ b/episodes/practical4.Rmd
@@ -1,146 +1,226 @@
---
-title: 'Understanding the Demons registration algorithm'
+title: 'Exploring the Demons registration algorithm'
teaching: 10
exercises: 2
---
:::::::::::::::::::::::::::::::::::::: questions
-- How does the Demons registration algorithm achieve image registration?
-- What parameters should be adjusted to optimise the performance of the Demons registration algorithm?
-- What are the key strengths and potential limitations of using the Demons registration algorithm?
+* How does the Demons registration algorithm attempt to align images?
+* How do the different parameters affect the performance of the Demons registration algorithm?
::::::::::::::::::::::::::::::::::::::::::::::::
::::::::::::::::::::::::::::::::::::: objectives
-- Explore the underlying principles and methods of the Demons registration algorithm.
-- Understand the key parameters that influence the performance of the algorithm and how to tune them effectively.
-- Visualise and interpret the results by generating plots of deformation fields and Jacobian determinant maps.
+* Understand how the demons registration algorithm works and how it attempts to align images.
+* Investigate the influence of the different parameters on the performance of the registration.
+* Visualise the execution of the algorithm by displaying the images and deformation fields while it is running.
+* Interpret the results by inspecting the images, deformation fields, and Jacobian determinant maps.
::::::::::::::::::::::::::::::::::::::::::::::::
-In these exercises, we use three 2D sagittal MR slices `cine_MR_1.png`, `cine_MR_2.png`, and `cine_MR_3.png` showing the lung, liver, and surrounding anatomy, at different points in time during free-breathing. Image 1 is at end-exhalation, image 2 at the end-inhalation for a normal breath, and image 3 at end-inhalation for a deep breath. You can find them in the zipped data folder under `practical4`.
-This tutorial uses Python. There is a template Jupyter notebook for you to work on `practical4-exercises.ipynb`. You have also been provided with some utility functions in `utils.py`. When using a function from this file, you should read it and make sure you understand what it does.
-There is also a function that implements the Demons registration algorithm in `demonsReg.py`. You should look at the code in `demonsReg.py` and check you can follow what it is doing. You should be able to understand what each section of code is doing, even if you don’t fully understand every line.
+:::::::::::::::::::::::::::::::::::::: prereq
-## 1. Loading and displaying images
-Load and display the three images. Convert the images to doubles and reorientate them as in the previous exercises.
+You should have followed the instructions from [the Setup page](../learners/setup.md) to access the data and code for these exercises, and setup Python.
+
+::::::::::::::::::::::::::::::::::::::
+
+This practical is based in Python, and uses this template Jupyter notebook: [`practical4_notebook.ipynb`](files/practical4_notebook.ipynb).You have been provided with some utility functions in [`utils.py`](files/utils.py). When using a function from this file, you should read it and make sure you understand what it does. There is also a function that implements the Demons registration algorithm in [`demonsReg.py`](files/demonsReg.py). You should look at the `demonsReg` function and check you can follow what it is doing. You should be able to understand what each section of code is doing, even if you don’t fully understand every line.
+
+In these exercises, we use three 2D sagittal MR slices showing the lung, liver, and surrounding anatomy, at different points in time during free-breathing:
+
+* `cine_MR_1.png` is at end-exhalation,
+* `cine_MR_2.png` is at the end-inhalation for a normal breath,
+* `cine_MR_3.png` is at end-inhalation for a deep breath.
+
+You can find them in the unzipped data folder under `practical4`.
+
+* Run the first cell of the Juypter notebook. This imports the required libraries, and sets the *matplotlib* library to display figures in separate windows. This is required for the animations in these exercises to run correctly.
+
+## 1. Compare the images
+
+* Edit and run the next cell so that it loads the three images and then coverts them to doubles and reorients them as in the previous exercises.
+* Add code to the following cell to display the images in separate figures.
The images should look like this:
-
+
+
+When displaying the images next to each other you should be able to see that the images are different and some of the structures have moved (especially in `cine_MR_3`), but it can be difficult to see exactly what has moved, and by how much. Difference images (one image minus another, e.g. as shown below) are a good way seeing where there are differences between the images, but it can still be difficult to tell exactly what has moved where from the difference images.
-Compare the images to each other and observe the motion that occurs between the images. Difference images (one image minus another, e.g. as shown below) are a good way seeing where there are differences between the images, but it can be difficult to tell how things have moved between the images from the difference image.
+* Add code to the next cell to display the difference image between `cine_MR_1` and `cine_MR_2` in one figure, and the difference image between `cine_MR_1` and `cine_MR_3` in another figure.
-
+The difference images should look like this:
-The image on the left is the difference image between the first and second images, and the one on the right is the difference image between the first and third images.
+
-It is often easier to appreciate how things have moved/changed between images if you 'flip' between the images. To achieve this, we can use the `dispImageFlip` function. This function takes in the two images you want to look at as arguments, and you can flip between the two using the arrow keys on your keyboard.
+It is often easier to appreciate how things have moved/changed between images if you 'flip' between the images. The `dispImageFlip` function in `utils.py` makes it easy to do this by taking two images as inputs, and allowing you to flip between the two using the arrow keys on your keyboard.
-See how the images differ from each other. Pay particular attention to motion that you think could be challenging for the demons algorithm to recover, such as very large motion and deformation, or sliding motion that can occur between the lung/liver and surrounding anatomy.
+* Add code to the next cell to use the `dispImageFlip` function to compare `cine_MR_1` and `cine_MR_2` and to compare `cine_MR_1` and `cine_MR_3`. Observe how the images differ from each other. Pay particular attention to motion that you think could be challenging for the demons algorithm to recover, such as very large motion and deformation, or sliding motion that can occur between the lung/liver and surrounding anatomy.
+
+## 2. The `demonsReg` function
-## 2. Demons algorithm
-### 2.1. `demonsReg` function
Have a look at the comments at the start of `demonsReg` function in `demonsReg.py` (which are also displayed as the help info for the function). There are two required input parameters: the **source** and **target** images for the registration. There are also a number of optional input parameters, which have default values assigned to them that will be used if an alternative value is not specified. We will explore the effects of these parameters as we go through these exercises. The outputs of the function are the final warped image and the corresponding deformation field.
-Start by running a registration with `cine_MR_1` as the source image and `cine_MR_2` as the target image and using default values for all the other parameters.
-When you start the registration, a figure appears with 3 subplots. The first subplot on the left shows the warped image (transformed source image), the second subplot in the middle shows the current deformation field and the third subplot on the right shows the update to the deformation field for the current iteration. The deformation field (middle plot) is displayed as a deformed grid whereas the update is displayed as a vector field. All three images update during the registration.
+When you start the registration, a 'live display' figure appears with 3 subplots. The first subplot on the left shows the warped image (resampled source image), the second subplot in the middle shows the current deformation field and the third subplot on the right shows the update to the deformation field for the current iteration. The deformation field (middle plot) is displayed as a deformed grid whereas the update is displayed as a vector field. All three images update during the registration.
+
+
+
+:::::::::::: callout
+
+#### Note
+
+Most other registration softwares (i.e. all that I am aware of!) do not display the intermediate results while the registration is running, and just output the final result. I think it can be very informative to visualise the intermediate results, and watch how the registration algorithm gets to its final answer - which is why we have implemented it for these exercises! :smirk:
+
+Although, to be fair, this is much easier in 2D than in 3D!
+
+::::::::::::
+
+* Run `demonsReg` with `cine_MR_1` as the source image and `cine_MR_2` as the target image and using default values for all the other parameters.
+
+While the registration is running, you will notice that the figure is updated and that text is outputted in the notebook stating the current resolution level, iteration number, and value of the Mean Squared Difference (MSD) between the target image and the current warped image. At the end of the registration, the figure should look like this:
+
+
+
+Once the registration has finished another figure appears. This 'final display' figure allows us to flip between the source, target and warped image in the left subplot. The middle subplot shows the deformation field (or Jacobian determinant map, more details later), and the right subplot shows a difference image between the target image and the image shown in the subplot.
+
+
+
+If you look at the text output you will see that the final MSD value is 80.20 (rounded to 2 decimal places – more are shown in the figure and notebook), and that 74 iterations were performed at the final (3rd) resolution level. If you scroll up in the notebook (you may need to click on 'View as scollable element' at the bottom of the text output if the output has been truncated) you will see that 24 iterations were also performed at the 2nd resolution level, and 17 iterations at the 1st level.
+
+:::::::::::: discussion
+
+#### Questions
+
+Within each resolution level, the MSD value decreases with each iteration. However, the MSD increases between resolution levels.
+
+* Do you know why it does this?
+
+Hint, it’s not just because there are more pixels in the higher resolution images, as using the mean rather than sum of squared differences should account for different numbers of pixels in the images.
-
+Compare the final warped image with the target and source images.
-While the registration is running, you will notice that the figures are updated and that text is outputted in the notebook stating the current resolution level, iteration number, and value of the Mean Squared Difference (MSD) between the target image and the current warped image. The MSD is the similarity metric used by this demons registration. At the end of the registration, the figure should look like this:
+* How well do you think the registration did at aligning the images?
+* Which parts of the images are well aligned, and which parts are less well aligned?
-
+If you look at the deformation field you should see that it corresponds to the deformation that would be applied to the warped image to get back to the source image (rather than the deformation that would be applied to the source image to get to the warped image). Make sure you understand why this is the case.
-Here is a summary of the registration:
-
+::::::::::::
-At the end of the registration, a second figure appears. The figure allows us to flip between the source, target and warped image and shows the deformation field and difference image. We can also display the Jacobian (more details later).
+## 3. Regularisation
-
+You can specify the values of the optional input parameters when calling the function, e.g.
-If you look at the text output you will see that the final MSD value is 80.20 (rounded to 2 decimal places – more are shown in the console), and that 74 iterations were performed at the final (3rd) resolution level. If you scroll up in the console you will see that 24 iterations were also performed at the 2nd resolution level, and 17 iterations at the 1st level.
-:::::::::::::::challenge
-Within each resolution level, the MSD value decreases with each iteration. However, the MSD increases between resolution levels. Do you know why it does this?
-::::::::::::::::hint
-It’s not just because there are more pixels in the higher resolution images, as using the mean rather than sum of squared differences should account for different numbers of pixels in the images.
-::::::::::::::::::::
-::::::::::::::::::::
+`warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, sigma_elastic=0)`
-Compare the final warped image with the target and source images. How well do you think the registration did at aligning the images? Which parts of the images are well aligned, and which parts are less well aligned?
+will run the registration with sigma_elastic set to 0, i.e. with no elastic like regularisation applied.
-If you look at the deformation field you will see that it corresponds to the deformation that would be applied to the warped image to get back to the source image (rather than the deformation that would be applied to the source image to get to the warped image). Make sure you understand why this is the case (hint – pull-interpolation).
+* Try running this registration.
-### 2.2. Regularisation
+At the end of the registration, the 'final display' figure should look like this:
-You can specify the values of the optional input parameters when calling the function, e.g. `demonsReg(cine_MR_1, cine_MR_2, sigma_elastic=0)` will run the registration with sigma_elastic set to 0, i.e. with no elastic like regularisation applied. Try running this registration. At the end of the registration, the figures should look like this:
+
-
-
-Here is a summary of the registration:
-
+You will see that parts of the final warped image looks very similar to the target image, and the final MSD is 79.01, which is a little lower than for the default parameters. However, you can see that the final deformation field is very ‘crumpled’ and does not look very physically plausible. Therefore, for most applications this would not be considered a good registration result. The reason that the result is like this is that without any elastic-like regularisation applied the registration is free to deform each part of the image independently in any way that better aligns the intensities. This results in structures being overly-deformed to resemble different structures, rather than being aligned with the correct corresponding structure in the other image, and highlights the need for using suitable regularisation.
-You will see that parts of the final warped image looks very similar to the target image, and the final MSD is 79.01, which is a little lower than for the default parameters. However, you can see that the final deformation field is very ‘crumpled’ and does not look very physically plausible. Therefore, for most applications this would not be considered a good registration result. The reason that the result is like this is that without any elastic-like regularisation applied to the deformation field it is free to ‘evolve’ in any way that better aligns the intensities, even if this results in implausible transformations, and highlights the need for using suitable regularisation.
+* Now rerun the registration with sigma_elastic at its default value again but this time setting sigma_fluid to 0.
-Now rerun the registration with sigma_elastic at its default value again but this time setting sigma_fluid to 0. At the end of the registration, the figures should look like this:
+At the end of the registration, the figures should look like this:
-
-
+
-Here is a summary of the registration:
-
+
-You will now see that the updates are much larger and noisier as no regularisation is being applied to them, but the deformation field is still relatively smooth as it does have regularisation applied. If you compare the result from this registration to the result obtained with the default parameters (remember, you can store the outputs of the function so that you can directly compare the results from different registrations) you will see that the results are very similar. However, the final MSD value is lower (72.73), but some regions of the deformation field are slightly less smooth. Which of the results should be considered ‘best’ will depend on exactly what the application is. But it should be clear that the elastic-like regularisation usually has a larger effect on the result than the fluid-like regularisation does. Now try running several more registrations with different levels of elastic-like and fluid-like regularisation to get a good feel for how the combination of these parameters affects the results, and which ranges of values produce good/bad results.
+You will now see that the updates are much larger and noisier as no regularisation is being applied to them, but the deformation field is still relatively smooth as it does have regularisation applied. If you compare the result from this registration to the result obtained with the default parameters you will see that the results are very similar. However, the final MSD value is lower (72.73), but some regions of the deformation field are slightly less smooth. Which of the results should be considered ‘best’ will depend on exactly what the application is. But it should be clear that the elastic-like regularisation usually has a larger effect on the result than the fluid-like regularisation does.
-:::::::::::::::::::::::callout
-You can control how often the plots are updated by setting the value of the `disp_freq` input parameter. The registrations themselves run relatively quickly, but updating the images takes some time. This makes the registrations run considerably slower but allows you to see how the deformation field and warped image change during the course of the registration, which can be very useful for understanding why the registration produces the result it does. If you set `disp_freq` to 0 then the images will only update at the start of each level and to show the final result. If you do this you will notice that the registration runs much quicker, but you miss all the details of how the registration produces the result. For most of these exercises, keep the `disp_freq` value relatively low, e.g. at the default value of 5 iterations or less, so that you can see how the deformation field and warped image change as the registration is progressing.
-:::::::::::::::::::::::::::
+* Now try running several more registrations with different values for the elastic-like and fluid-like regularisation to get a good feel for how the combination of these parameters affects the results, and which ranges of values produce good/bad results.
-### 2.3. Resolution levels
-You should have noticed that the registration is using a multi-resolution approach, with 3 resolution levels by default. You can specify the number of resolution levels to use with the `num_lev` input parameter. Try running a registration with `num_lev` set to 1, i.e. so that only a single resolution level is used, and the multi-resolution approach is not applied. At the end of the registration, the figures should look like this:
-
-
+## 4. Resolution levels
+You should have noticed that the registration is using a multi-resolution approach, with 3 resolution levels by default. You can specify the number of resolution levels to use with the `num_lev` input parameter.
-Here is a summary of the registration:
-
+* Try running a registration with `num_lev` set to 1, i.e. so that only a single resolution level is used, and the multi-resolution approach is not applied.
-::::::::::::::::::::discussion
-Do you think using a multi-resolution approach helps? Why?
-:::::::::::::::::::::
+At the end of the registration, the final display should look like this:
-Now try running the registration with 6 resolution levels (if you use 7 levels then the first level only contains 4 pixels, so there is not very much to change and improve. Anything greater than 7 levels will result in an error, since no further down-sampling can be done). The final MSD with six levels is worse (MSD = 86.24 ) then with the default 3 resolution levels (MSD = 80.20). However, visually the results from six resolution levels appears to align the structures within the lung better than the result from using three levels. It is the structures outside the lung, e.g. the ribs, that are not aligned as well. Again, assessing the "best" registration will be dependent on the exact application.
+
-Now experiment with varying the values for the regularisation parameters as well as the number of resolution levels, and see how the results are affected.
+:::::::::::::::: discussion
-### 2.4. Jacobian determinant
+#### Questions
-When investigating the effects of the regularisation parameters you should have noticed that some parameters will result in deformation fields do not look very plausible. In particular, sometimes the deformation field will appear to fold over itself, indicating that folding has occurred, which is often undesirable when performing registrations.
+* Do you think using a multi-resolution approach helps?
+* Why?
-For example, if you perform a registration with the following parameters: `sigma_elastic = 0.5, sigma_fluid = 1, num_lev = 3`, the result is the deformation field shown below, and as can be seen in the zoomed-in image on the right, the grid has folded over itself in some regions.
+::::::::::::::::
-
-You have been provided with a function, `calcJacobian`, to calculate the Jacobian determinant (and optionally the full Jacobian matrix) at each pixel in the deformation field, and return a map (image) of the Jacobian determinants. Have a look at this function and make sure you understand how it works.
+* Now try running the registration with 6 resolution levels (if you use 7 levels then the first level only contains 4 pixels, so there is not very much to change and improve. Anything greater than 7 levels will result in an error, since no further down-sampling can be done).
-You can display the Jacobian by using the up and down arrows on the second output figure at the end of the registration.
+The final MSD with six levels is worse (MSD = 86.24 ) then with the default 3 resolution levels (MSD = 80.20). However, visually the results from six resolution levels appears to align the structures within the lung better than the result from using three levels. It is the structures outside the lung, e.g. the ribs, that are not aligned as well. Again, assessing the "best" registration will be dependent on the exact application.
-
+* Now experiment with varying the values for the regularisation parameters as well as the number of resolution levels, and see how the results are affected.
-You can also plot the Jacobian yourself by calling the `calcJacobian` and `dispImage` functions. It is useful to add a colourbar and to use the `jet` colourmap. You can also plot a binary mask of the Jacobian by setting all values <= 0 to 1s.
+## 5. Folding and the determinant of the Jacobian matrix
-
+When investigating the effects of the regularisation parameters you should have noticed that some parameters will result in deformation fields do not look physically plausible. In particular, sometimes the deformation field will appear to fold over itself, indicating that folding has occurred, which is generally undesirable when performing registrations. E.g.:
-You can notice that the image is very ‘patchy’, and does not show smooth expansions/contractions as would be expected during breathing. The colourbar shows us that the Jacobian contains some very large values (>4) as well as negative values, indicating that folding has occurred.
+* Run a registration with `sigma_elastic=0.5` (and all other parameters set to default values)
-If you compare the location of the pixels with folding in the binary image to the deformation field you will notice that they do not align with where you can see folding occurring in the grid. This is because the grid shows the deformation in the source image space whereas the binary image corresponds to the pixels in the warped image space.
+At the end of the registration, the final display should look like this:
-### 2.5. Compositions
-With the `demonsReg` function it is possible to compose the updates rather than add them, so that the deformation field will always represent a diffeomorphic transformation (as the updates are themselves always diffeomorphic), and hence will not contain any folding. If you look at lines 150-157 of the `demonsReg` function and you will see that the composition of two deformation fields can be implemented by treating the current deformation field as an image, and resampling it with (a deformation field derived from) the update to the transformation. To see why, we can write the equation for resampling an image as:
+
+
+You can use the magnifying glass icon at the bottom-left of the figure to zoom in on different regions of the deformation field. To zoom back out you can click on the left facing (back) arrow icon, which will go back to the previous view/zoom (you can also right click and drag a rectangle to zoom out, but I found it very difficult to get this to behave how I wanted it to).
+
+
+* Zoom in on the deformation field to the region shown in red in the figure on the left, below. You should see that the deformation field has folded in on itself, as shown in the figure on the right, below.
+
+
+
+The final display can also display a map of the determinant of the Jacobian matrix (often called the Jacobian determinant, or simply the Jacobian) instead of the deformation field.
+
+* Use the up or down arrows to switch from displaying the deformation field to the Jacobian map, and back again.
+
+
+
+:::::::::::::::: discussion
+
+#### Question
+
+You should notice that the regions where the deformation field expands correspond to the yellow and red regions of the Jacobian determinant map, and the regions where the deformation field compresses are blue in the Jacobian determinant map, with the regions where there is folding the darkest blue. However, the deformation field and Jacobian map are not well aligned with each other in some regions.
+
+* Why?
+
+::::::::::::::::
+
+It can be useful to display a colour bar with the Jacobian map so you see which values the different colours correspond to. Unfortunately adding a colour bar to the final display disrupts the layout of the figure, and is not recommended. However, if you have stored the deformation field output by the registration in a variable, e.g.:
+
+`warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, sigma_elastic=0.5)`
+
+you can use the `calcJacobian` function from `utils.py` to calculate the Jacobian map (and optionally the full Jacobian matrix at each pixel) from the deformation field. Have a look at this function and make sure you understand how it works.
+
+* Calculate the Jacobian map using `calcJacobian` and display it in a separate figure.
+ * You can use the `dispImage` function to display the Jacobian map with the correct orientation.
+ * You should change the colourmap to better visualise the Jacobian map. We have used 'Jet' but you can try different colourmaps if you prefer.
+ * You should then add a colourbar to the figure.
+
+The result should look like the left side of the figure below.
+
+With the colourbar you can see which colours correspond to values <= 0, i.e. where folding has occurred. However, due to the continuous nature of most colourmaps it can be difficult to see exactly which pixels have Jacobian values <= 0, and which are just above 0.
+
+* Create a binary image (boolean array) of the pixels where folding has occurred and display it in a separate figure.
+
+The result should look like the right side of the figure below
+
+
+
+## 6. Composing the updates
+
+With the `demonsReg` function it is possible to compose the updates rather than add them, so that the deformation field will always represent a diffeomorphic transformation (as the updates are themselves always diffeomorphic), and hence will not contain any folding. If you look at lines 170-183 of the `demonsReg` function and you will see that the composition of two deformation fields can be implemented by treating the current deformation field as an image, and resampling it with (a deformation field derived from) the update to the transformation. To see why, we can write the equation for resampling an image as:
$I_{resamp}(\vec{x})=I_{orig}(T(\vec{x}))$
@@ -154,41 +234,46 @@ i.e. to calculate the composed transformation, $T_{t+1}$, we first apply the upd
You can see that the equation for composing the transformation is the same as the equation for resampling the image, with the current transformation, $T_t$, replacing the image, $I_{orig}$, and $T_{up}$ used as the transformation to resample the image/transformation.
-Now perform a registration using the same parameters as above but use composition rather than addition to update the transformation, i.e. use the following parameters: `sigma_elastic = 0.5, sigma_fluid = 1, num_lev = 3, use_composition = True`.
-
-Here is a summary of the registration:
-
+* Now perform a registration using the same parameters as above (i.e. `sigma_elastic=0.5`) but use composition rather than addition to update the transformation by setting `use_composition=True`.
-You will notice that the deformation field no longer contains any folding:
+The final display should look like:
+
-
+If you zoom in on the deformation field you will notice that the deformation field no longer contains any folding, but the Jacobian determinant map is still very patchy (note, the intensity-colour scale is different in the image below compared to the previous image as the min and max values are different):
-but the Jacobian determinant map is still very patchy
+
-
+This indicates that the deformation is still not very physically plausible. And if you look at the warped image you will see that some of the features in the lungs are better aligned when addition was used for updating the transformation, although the MSD value is lower when composition is used (63.01) than when addition is used (64.07). But there is no folding when using composition and there is folding when using addition. Which registration should be considered the 'best' will depend on the specific application (although neither would be considered very good for most applications!).
-and the max value is larger (note – a different intensity scale is used compared to the Jacobian determinant image on the previous page to show there is a different range of values).
+* Run another registration using a set of parameters that you previously found gave a good result when adding the updates, but now use `use_composition=True`. How do the results compare to the previous ones?
-This indicates that the deformation is still not very physically plausible. And if you look at the warped image you will see that some of the features in the lungs are better aligned when addition was used for updating the transformation, although the MSD value is lower when composition is used (63.10) than when addition is used (64.07). But there is no folding when using composition and there is folding when using addition.
+## 7. Other parameters
-So which registration do you think is best?
+As you will have seen, there are a number of other optional parameters that can be set. The parameters `disp_spacing`, `scale_update_for_display`, `disp_method_df`, and `disp_method_up` all effect how the deformation field and update are displayed but have no effect on the actual registration itself. The comments/help should explain what these parameters do.
-### 2.6. Other parameters
-As you will have seen, there are a number of other optional parameters that can be set. The parameters `disp_spacing`, `scale_update_for_display`, `disp_method_df`, and `disp_method_up` all effect how the deformation field and update are displayed but have no effect on the actual registration itself. The comments/help should explain how these parameters work.
+The `disp_freq` parameter controls how often the plots are updated. The registrations themselves run relatively quickly, but updating the images takes some time. This makes the registrations run considerably slower but allows you to see how the deformation field and warped image change during the course of the registration, which can be very useful for understanding why the registration produces the result it does. If you set `disp_freq` to 0 then the images will only update at the start of each level and to show the final result. If you do this you will notice that the registration runs much quicker, but you miss all the details of how the registration produces the result.
The `max_it` parameter sets the maximum number of iterations to be performed. The default value is 1000, but you should have noticed that usually the registration stops well before reaching this number of iterations. This is because the registration checks for an improvement in the MSD at each iteration, and if there has not been an improvement it will move on to the next resolution level or finish if it is already on the last level. This behaviour can be disabled by setting the `check_MSD` parameter to false. In this case the registration will run for the full max_it number of iterations at each level.
-Try running a registration with the default parameters and `check_MSD = False`. It is also recommended to set `disp_freq = 50` so that the registration does not take too long to run. By looking at the MSD values output to the console you will notice that some iterations now cause (small) increases to the MSD. You will also notice that the registration reaches a stage at each level where the result starts to ‘oscillate’, with a few iterations making minor increases to the MSD and then a few making minor decreases to the MSD, and barely any noticeable changes to the warped image or deformation field. The final MSD value is lower than when `check_MSD` was set to true (74.53 compared to 80.20), but at the expense of running many more iterations. We get a similar result (MSD = 75.21) by setting `max_it` to 500 and only running half as many iterations, indicating that for at least the last 500 iterations the result was mostly oscillating and not improving, but the problem is it is difficult to know how many iterations will be required before the results start to ‘oscillate’ as this will depend on both the images being registered and the other parameters.
+* Try running a registration with the default parameters and `check_MSD = False`. It is also recommended to set `disp_freq = 50` so that the registration does not take too long to run.
+
+By looking at the MSD values output to the notebook you will notice that some iterations now cause (small) increases to the MSD. You will also notice that the registration reaches a stage at each level where the result starts to ‘oscillate’, with a few iterations making minor increases to the MSD and then a few making minor decreases to the MSD, and barely any noticeable changes to the warped image or deformation field. The final MSD value is lower than when `check_MSD` was set to true (74.52 compared to 80.20), but at the expense of running many more iterations.
+
+* Now try running the same registration but with `max_it` set to 500
+
+We get a similar result (MSD = 75.21) wuth `max_it` set to 500 and only running half as many iterations, indicating that for at least the last 500 iterations the result was mostly oscillating and not improving, but the problem is it is difficult to know how many iterations will be required before the results start to ‘oscillate’ as this will depend on both the images being registered and the other parameters.
-The last parameter that can be set is `use_target_grad`. This is false by default, and setting it to true will cause the registration to use the spatial gradient of the target image when calculating the demons forces rather than the spatial gradient of the source image, i.e. it will use the original demons forces rather than the common demons forces. If you run a registration with the default parameters and `use_target_grad` set to true and compare it to the results when `use_target_grad` is false, you will see there are differences in the results. However, each result aligns some regions/structures better than the other, and neither of the results are clearly the best overall (the final MSC is lower when `use_target_grad` is false, but this is not always the case when different parameters are used).
+The last parameter that can be set is `use_target_grad`. This is false by default, and setting it to true will cause the registration to use the spatial gradient of the target image when calculating the demons forces rather than the spatial gradient of the source image, i.e. it will use the original demons forces rather than the common demons forces.
-### 2.7. Deeper breath image
+* Run a registration with the default parameters and `use_target_grad` set to true.
-Now try registering `cine_MR_1.png` to `cine_MR_3.png`, which is a more challenging registration due to the deep breath taken when `cine_MR_3.png` was acquired.
+If you compare the results to the results obtained when `use_target_grad` is false, you will see there are differences in the results. However, each result aligns some regions/structures better than the other, and neither of the results are clearly the best overall (the final MSC is lower when `use_target_grad` is false, but this is not always the case when different parameters are used).
-Experiment with different parameters – can you find some that give a good result?
+## 8. Registering the deep-breath image
-Try swapping the source and target image over. Do you think you get better results with `cine_MR_3.png` as the source or target image (or are they equally bad for both!)?
+* Now try registering `cine_MR_1.png` to `cine_MR_3.png`.
+ * It is much more difficult to get a good result for this registration due to the deep breath taken when `cine_MR_3.png` was acquired. Experiment with different parameters – can you find some that give a good result?
+ * Try swapping the source and target image over. Do you think you get better results with `cine_MR_3.png` as the source or target image (or are they equally bad for both!)?
diff --git a/example_solutions/practical1_notebook_solution.ipynb b/example_solutions/practical1_notebook_solution.ipynb
new file mode 100644
index 00000000..1041ebc8
--- /dev/null
+++ b/example_solutions/practical1_notebook_solution.ipynb
@@ -0,0 +1,302 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 1:An introduction to viewing and manipulating medical imaging data"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 1. Viewing images with ITK-SNAP\n",
+ "\n",
+ "#### This section is completed using ITK-SNAP.\n",
+ "\n",
+ "## 2. Viewing and understanding the NIfTI header with NiBabel\n",
+ "\n",
+ "### 2.1. Reading and displaying the NIfTI header"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# import packages\n",
+ "import numpy as np\n",
+ "import nibabel as nib"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# load the nifti file saved in section 1\n",
+ "ct_for_pet_nii = nib.load(\"data/practical1/CT_for_PET.nii.gz\")\n",
+ "\n",
+ "#this returns a Nifti1Image object\n",
+ "print(type(ct_for_pet_nii))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# display the size/shape of the image\n",
+ "print(ct_for_pet_nii.shape)\n",
+ "\n",
+ "# display the data type of the image\n",
+ "print(ct_for_pet_nii.get_data_dtype())\n",
+ "\n",
+ "# display the affine matrix for the image\n",
+ "print(ct_for_pet_nii.affine)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# print the header\n",
+ "print(ct_for_pet_nii.header)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 2.2 Specifying the affine transform"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# print the matrix represented by the qform\n",
+ "print(ct_for_pet_nii.get_qform())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Modifying NIfTI images and headers with NiBabel\n",
+ "\n",
+ "### 3.1. Changing the data type and qform code"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# read the image data of the Nifti1Image object\n",
+ "# it gets loaded as a numpy array and casted to float64\n",
+ "ct_for_pet_img = ct_for_pet_nii.get_fdata()\n",
+ "\n",
+ "# check the type of the array\n",
+ "print(type(ct_for_pet_img))\n",
+ "\n",
+ "# check the type of the array elements\n",
+ "print(ct_for_pet_img.dtype)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# change the data type to float32\n",
+ "ct_for_pet_nii.set_data_dtype(np.float32)\n",
+ "\n",
+ "# set the qform code to unkown (0)\n",
+ "ct_for_pet_nii.set_qform(None, code='unknown')\n",
+ "\n",
+ "# check the header has been updated\n",
+ "print(ct_for_pet_nii.header)\n",
+ "\n",
+ "# save the float32 image to disk\n",
+ "nib.save(ct_for_pet_nii, 'data/practical1/CT_for_PET_float32.nii.gz')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "#### Check data types with NiBabel"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# load the new image and check the data type is float32\n",
+ "ct_for_pet_float32_nii = nib.load(\"data/practical1/CT_for_PET_float32.nii.gz\")\n",
+ "print(ct_for_pet_float32_nii.get_data_dtype())\n",
+ "\n",
+ "# load the original image and check the data type is still int16\n",
+ "ct_for_pet_orig_nii = nib.load(\"data/practical1/CT_for_PET.nii.gz\")\n",
+ "print(ct_for_pet_orig_nii.get_data_dtype())"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 3.2. Cropping images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# create a new array containing a copy of the desired slices\n",
+ "x_first = 91\n",
+ "x_last = 390\n",
+ "y_first = 131\n",
+ "y_last = 375\n",
+ "z_first = 21\n",
+ "z_last = 155\n",
+ "ct_for_pet_cropped_img = ct_for_pet_img[x_first:x_last+1, y_first:y_last+1, z_first:z_last+1].copy()\n",
+ "\n",
+ "# check the size of the arrays containing the original image and the cropped image\n",
+ "print(ct_for_pet_img.shape)\n",
+ "print(ct_for_pet_cropped_img.shape)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# create a new Nifti1Image object using the header and affine from the uncropped image\n",
+ "ct_for_pet_cropped_nii = nib.nifti1.Nifti1Image(ct_for_pet_cropped_img, ct_for_pet_nii.affine, ct_for_pet_nii.header)\n",
+ "\n",
+ "# check the shape and header of the new Nifti1Image object\n",
+ "print(ct_for_pet_cropped_nii.shape)\n",
+ "print(ct_for_pet_cropped_nii.header)\n",
+ "\n",
+ "# save the cropped image\n",
+ "nib.save(ct_for_pet_cropped_nii, \"data/practical1/CT_for_PET_cropped.nii.gz\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# calculate the world coordinates of first voxel in the cropped image\n",
+ "cropped_origin = ct_for_pet_nii.affine @ np.array([x_first, y_first, z_first, 1])\n",
+ "print(cropped_origin)\n",
+ "\n",
+ "# use these to update the corresponding values in the affine matrix for the cropped image\n",
+ "ct_for_pet_cropped_nii.affine[0,3] = cropped_origin[0]\n",
+ "ct_for_pet_cropped_nii.affine[1,3] = cropped_origin[1]\n",
+ "ct_for_pet_cropped_nii.affine[2,3] = cropped_origin[2]\n",
+ "print(ct_for_pet_cropped_nii.affine)\n",
+ "\n",
+ "# save the cropped image - this also updates the sform values in the header using the updated affine\n",
+ "print(ct_for_pet_cropped_nii.header.get_sform())\n",
+ "nib.save(ct_for_pet_cropped_nii, \"data/practical1/CT_for_PET_cropped_aligned.nii.gz\")\n",
+ "print(ct_for_pet_cropped_nii.header.get_sform())\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# we can do the same as above using nibabel's slicer attribute\n",
+ "ct_for_pet_slicer_nii = ct_for_pet_nii.slicer[x_first:x_last+1, y_first:y_last+1, z_first:z_last+1]\n",
+ "print(ct_for_pet_slicer_nii.header)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 3.3. Aligning images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 61,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Add you code here to align the centre of the inhale_BH_CT with the centre of CT_for_PET\n",
+ "\n",
+ "# find the centre of the ct_for_pet image in voxel coordinates\n",
+ "ct_for_pet_centre_vox = np.ones(4)\n",
+ "ct_for_pet_centre_vox[0] = (ct_for_pet_nii.shape[0] - 1) / 2\n",
+ "ct_for_pet_centre_vox[1] = (ct_for_pet_nii.shape[1] - 1) / 2\n",
+ "ct_for_pet_centre_vox[2] = (ct_for_pet_nii.shape[2] - 1) / 2\n",
+ "\n",
+ "# find the centre of the ct_for_pet image in world coordinates\n",
+ "ct_for_pet_centre_world = ct_for_pet_nii.affine @ ct_for_pet_centre_vox\n",
+ "\n",
+ "# find the centre of the inhale_ct image in voxel coordinates\n",
+ "inhale_ct_nii = nib.load('data/practical1/inhale_BH_CT.nii.gz')\n",
+ "inhale_ct_centre_vox = np.ones(4)\n",
+ "inhale_ct_centre_vox[0] = (inhale_ct_nii.shape[0] - 1) / 2\n",
+ "inhale_ct_centre_vox[1] = (inhale_ct_nii.shape[1] - 1) / 2\n",
+ "inhale_ct_centre_vox[2] = (inhale_ct_nii.shape[2] - 1) / 2\n",
+ "\n",
+ "# find the centre of the inhale_ct image in world coordinates\n",
+ "inhale_ct_centre_world = inhale_ct_nii.affine @ inhale_ct_centre_vox\n",
+ "\n",
+ "# find the translation between the centres of the images (in world coordinates)\n",
+ "translation = ct_for_pet_centre_world - inhale_ct_centre_world\n",
+ "\n",
+ "# add the translation to the cooresponding elements of the affine matrix for the inhale_ct image\n",
+ "inhale_ct_nii.affine[0,3] = inhale_ct_nii.affine[0,3] + translation[0]\n",
+ "inhale_ct_nii.affine[1,3] = inhale_ct_nii.affine[1,3] + translation[1]\n",
+ "inhale_ct_nii.affine[2,3] = inhale_ct_nii.affine[2,3] + translation[2]\n",
+ "\n",
+ "# save the aligned image - the updated affine will be used to update the header when it is saved\n",
+ "nib.save(inhale_ct_nii, \"data/practical1/inhale_BH_CT_aligned.nii.gz\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ideas-reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/example_solutions/practical2_notebook_solution.ipynb b/example_solutions/practical2_notebook_solution.ipynb
new file mode 100644
index 00000000..51058dfb
--- /dev/null
+++ b/example_solutions/practical2_notebook_solution.ipynb
@@ -0,0 +1,577 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 2: Affine transformations and resampling images\n",
+ "\n",
+ "## 1. Loading and displaying images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# import the required packages\n",
+ "import numpy as np\n",
+ "import skimage.io\n",
+ "import matplotlib\n",
+ "import matplotlib.pyplot as plt\n",
+ "import utils as ut\n",
+ "\n",
+ "# the next lines make figures display in separate windows instead of in the notebook\n",
+ "# this is required for the animations to work correctly\n",
+ "matplotlib.use('TkAgg')\n",
+ "plt.ion()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "uint8\n",
+ "float64\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Load the 2D lung MRI image using the imread function from the scikit-image python library\n",
+ "img = skimage.io.imread('data/practical2/lung_MRI_slice.png')\n",
+ "\n",
+ "# Check the data type of the image\n",
+ "print(img.dtype)\n",
+ "\n",
+ "# convert data type to double to avoid errors when processing integers\n",
+ "img = np.double(img)\n",
+ "\n",
+ "# check new data type\n",
+ "print(img.dtype)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Add your own code here to reorientate the image into 'standard orientation'\n",
+ "img = img.T\n",
+ "img = np.flip(img, 1)\n",
+ "\n",
+ "# display the image using the dispImage function, it should open in a separate window\n",
+ "ut.dispImage(img)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. Translating and resampling images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 1 0 10]\n",
+ " [ 0 1 20]\n",
+ " [ 0 0 1]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Create a 2D array representing a 2D affine matrix for a translation by 10 pixels in x and 20 pixels in y\n",
+ "# Note: numpy has a matrix class, but it recommends not to use it and use standard arrays instead, so arrays should be used in these exercises\n",
+ "# Matrix multiplication can be performed between two arrays using the @ operator or the numpy.matmul function\n",
+ "\n",
+ "# TODO: edit the array to represent a 2D affine matrix for a translation by 10 pixels in x and 20 pixels in y\n",
+ "T = np.array([[1, 0, 10], [0, 1, 20], [0, 0, 1]])\n",
+ "print(T)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: edit the code below to create a deformation field from the affine matrix\n",
+ "# and then use the deformation field to resample the image\n",
+ "# The function definitions in uitls.py explain what the inputs and outputs of the functions should be\n",
+ "num_pix_x, num_pix_y = img.shape\n",
+ "def_field = ut.defFieldFromAffineMatrix(T, num_pix_x, num_pix_y)\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field)\n",
+ "\n",
+ "# display the resampled image\n",
+ "ut.dispImage(img_resampled)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "nan\n"
+ ]
+ }
+ ],
+ "source": [
+ "# check the value of pixel 255,255 in the resampled image\n",
+ "print(img_resampled[255,255])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 6,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# TODO: add code to resample the image using nearest neighbour and splinef2d interpolation\n",
+ "img_resampled_nn = ut.resampImageWithDefField(img, def_field, interp_method='nearest')\n",
+ "img_resampled_sp = ut.resampImageWithDefField(img, def_field, interp_method='splinef2d')\n",
+ "\n",
+ "# TODO: display the results in separate windows\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled_nn)\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled_sp)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display the difference images between the new results and the original image in separate windows\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled - img_resampled_nn)\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled - img_resampled_sp)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat the above steps for a translation by 10.5 pixels in x and 20.5 pixels in y\n",
+ "T = np.array([[1, 0, 10.5], [0, 1, 20.5], [0, 0, 1]])\n",
+ "def_field = ut.defFieldFromAffineMatrix(T, num_pix_x, num_pix_y)\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field)\n",
+ "img_resampled_nn = ut.resampImageWithDefField(img, def_field, interp_method='nearest')\n",
+ "img_resampled_sp = ut.resampImageWithDefField(img, def_field, interp_method='splinef2d')\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled - img_resampled_nn)\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled - img_resampled_sp)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display the difference images with intensity limits of [-20, 20]\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled - img_resampled_nn, int_lims=[-20, 20])\n",
+ "plt.figure()\n",
+ "ut.dispImage(img_resampled - img_resampled_sp, int_lims=[-20, 20])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Rotating images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# define a function to calculate the affine matrix for a rotation about a point\n",
+ "def affineMatrixForRotationAboutPoint(theta, p_coords):\n",
+ " \"\"\"\n",
+ " function to calculate the affine matrix corresponding to an anticlockwise\n",
+ " rotation about a point\n",
+ "\n",
+ " SYNTAX:\n",
+ " aff_mat = affineMatrixForRotationAboutPoint(theta, p_coords)\n",
+ " \n",
+ " INPUTS:\n",
+ " theta - the angle of the rotation, specified in degrees\n",
+ " p_coords - the 2D coordinates of the point that is the centre of rotation.\n",
+ " p_coords[0] is the x coordinate,\n",
+ " p_coords[1] is the y coordinate\n",
+ " \n",
+ " OUTPUTS:\n",
+ " aff_mat - a numpy array representing the 3 x 3 affine matrix\n",
+ " \"\"\"\n",
+ " \n",
+ " # TODO: implement the function\n",
+ " #convert theta to radians\n",
+ " theta = np.pi * theta / 180\n",
+ "\n",
+ " #form matrices for translation and rotation\n",
+ " T1 = np.array([[1, 0, -p_coords[0]],\n",
+ " [0, 1, -p_coords[1]],\n",
+ " [0,0,1]])\n",
+ " T2 = np.array([[1, 0, p_coords[0]],\n",
+ " [0, 1, p_coords[1]],\n",
+ " [0,0,1]])\n",
+ " R = np.array([[np.cos(theta), -np.sin(theta), 0],\n",
+ " [np.sin(theta), np.cos(theta), 0],\n",
+ " [0, 0, 1]])\n",
+ " \n",
+ " #compose matrices using matrix multiplication\n",
+ " aff_mat = T2 @ R @ T1\n",
+ "\n",
+ " # return the affine matrix\n",
+ " return aff_mat"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[ 0.9961947 -0.08715574 11.59753319]\n",
+ " [ 0.08715574 0.9961947 -10.62718121]\n",
+ " [ 0. 0. 1. ]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Close any open figures before continuing\n",
+ "plt.close('all')\n",
+ "\n",
+ "# TODO: Use the above function to calculate the affine matrix representing an anticlockwise rotation\n",
+ "# of 5 degrees about the centre of the image\n",
+ "R = affineMatrixForRotationAboutPoint(5, [(num_pix_x - 1)/2, (num_pix_y - 1)/2])\n",
+ "print(R)\n",
+ "\n",
+ "# TODO: use this rotation to transform the original image\n",
+ "def_field = ut.defFieldFromAffineMatrix(R, num_pix_x, num_pix_y)\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field)\n",
+ "\n",
+ "# TODO: display the resampled image using the intensity limits of the original image\n",
+ "plt.figure()\n",
+ "int_lims_img = [np.min(img), np.max(img)]\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: apply the same transformation again to the resampled image and display the result. \n",
+ "# repeat this 71 times so that the image appears to rotate a full 360 degrees.\n",
+ "for n in range(71):\n",
+ " img_resampled = ut.resampImageWithDefField(img_resampled, def_field)\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat above code with 0 padding value\n",
+ "# need to first resample the original image and display it\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0)\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "for n in range(71):\n",
+ " img_resampled = ut.resampImageWithDefField(img_resampled, def_field, pad_value=0)\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat above code using nearest neighbour interpolation\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0, interp_method='nearest')\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "for n in range(71):\n",
+ " img_resampled = ut.resampImageWithDefField(img_resampled, def_field, pad_value=0, interp_method='nearest')\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: repeat above code using splinef2d interpolation\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0, interp_method='splinef2d')\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "for n in range(71):\n",
+ " img_resampled = ut.resampImageWithDefField(img_resampled, def_field, pad_value=0, interp_method='splinef2d')\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 4. Composing transformations"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\229672114.py:14: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\229672114.py:25: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n"
+ ]
+ }
+ ],
+ "source": [
+ "# TODO: write code below that makes an animation of the rotating image as above (using linear interpolation)\n",
+ "# but composes the transformations to avoid multiple resamplings of the image\n",
+ "\n",
+ "#import imageio\n",
+ "#fig = plt.figure()\n",
+ "#frames = []\n",
+ "\n",
+ "R_current = R\n",
+ "def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0)\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "#frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "#frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "#frames.append(frame)\n",
+ "\n",
+ "for n in range(71):\n",
+ " R_current = R_current @ R\n",
+ " def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ " img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0)\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "# frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "# frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "# frames.append(frame)\n",
+ "\n",
+ "#imageio.mimsave('trans_rot_final_image_compose_linear.gif', frames, fps=10, loop=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\88696620.py:10: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\88696620.py:21: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\88696620.py:36: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\88696620.py:47: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n"
+ ]
+ }
+ ],
+ "source": [
+ "# TODO: repeat the animation using nearest neighbour and splinef2d interpolation\n",
+ "#frames = []\n",
+ "\n",
+ "R_current = R\n",
+ "def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0, interp_method='nearest')\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "#frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "#frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "#frames.append(frame)\n",
+ "\n",
+ "for n in range(71):\n",
+ " R_current = R_current @ R\n",
+ " def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ " img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0, interp_method='nearest')\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "# frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "# frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "# frames.append(frame)\n",
+ "\n",
+ "#imageio.mimsave('trans_rot_final_image_compose_nearest.gif', frames, fps=10, loop=0)\n",
+ "\n",
+ "\n",
+ "#frames = []\n",
+ "\n",
+ "R_current = R\n",
+ "def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ "img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0, interp_method='splinef2d')\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "#frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "#frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "#frames.append(frame)\n",
+ "\n",
+ "for n in range(71):\n",
+ " R_current = R_current @ R\n",
+ " def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ " img_resampled = ut.resampImageWithDefField(img, def_field, pad_value=0, interp_method='splinef2d')\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "# frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "# frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "# frames.append(frame)\n",
+ "\n",
+ "#imageio.mimsave('trans_rot_final_image_compose_splinef2d.gif', frames, fps=10, loop=0)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 5. Push interpolation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\3998973965.py:11: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_17752\\3998973965.py:22: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n"
+ ]
+ }
+ ],
+ "source": [
+ "# TODO: copy and modify the above code to use push interpolation instead of pull interpolation\n",
+ "#fig = plt.figure()\n",
+ "#frames = []\n",
+ "\n",
+ "R_current = R\n",
+ "def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ "img_resampled = ut.resampImageWithDefFieldPushInterp(img, def_field)\n",
+ "ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ "plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "#frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "#frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "#frames.append(frame)\n",
+ "\n",
+ "for n in range(71):\n",
+ " R_current = R_current @ R\n",
+ " def_field = ut.defFieldFromAffineMatrix(R_current, num_pix_x, num_pix_y)\n",
+ " img_resampled = ut.resampImageWithDefFieldPushInterp(img, def_field)\n",
+ " ut.dispImage(img_resampled, int_lims=int_lims_img)\n",
+ " plt.pause(0.05) # add a short pause so the figure display updates\n",
+ "\n",
+ "# frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ "# frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ "# frames.append(frame)\n",
+ "\n",
+ "#imageio.mimsave('trans_rot_final_image_push.gif', frames, fps=3, loop=0)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ipmi_reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/example_solutions/practical3_notebook_solution.ipynb b/example_solutions/practical3_notebook_solution.ipynb
new file mode 100644
index 00000000..ed6253db
--- /dev/null
+++ b/example_solutions/practical3_notebook_solution.ipynb
@@ -0,0 +1,497 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 3: Exploring similarity measures for cost functions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# import the required packages\n",
+ "import numpy as np\n",
+ "import skimage.io\n",
+ "import matplotlib\n",
+ "import matplotlib.pyplot as plt\n",
+ "import utils as ut\n",
+ "\n",
+ "# the next lines make figures display in separate windows instead of in the notebook\n",
+ "# this is required for the animations to work correctly\n",
+ "matplotlib.use('TkAgg')\n",
+ "plt.ion()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Inspect the images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Load in the three 2D images required for these exercises using the imread function from the scikit-image python library\n",
+ "ct_img_int8 = skimage.io.imread('data/practical3/ct_slice_int8.png')\n",
+ "ct_img_int16 = skimage.io.imread('data/practical3/ct_slice_int16.png')\n",
+ "mr_img_int16 = skimage.io.imread('data/practical3/mr_slice_int16.png')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "uint8\n",
+ "uint16\n",
+ "uint16\n"
+ ]
+ }
+ ],
+ "source": [
+ "# TODO: Display the data type of each image\n",
+ "print(ct_img_int8.dtype)\n",
+ "print(ct_img_int16.dtype)\n",
+ "print(mr_img_int16.dtype)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Convert all the images to double data type and reorient them into cartesian orientation\n",
+ "ct_img_int8 = np.double(ct_img_int8)\n",
+ "ct_img_int8 = ct_img_int8.T\n",
+ "ct_img_int8 = np.flip(ct_img_int8, 1)\n",
+ "ct_img_int16 = np.double(ct_img_int16)\n",
+ "ct_img_int16 = ct_img_int16.T\n",
+ "ct_img_int16 = np.flip(ct_img_int16, 1)\n",
+ "mr_img_int16 = np.double(mr_img_int16)\n",
+ "mr_img_int16 = mr_img_int16.T\n",
+ "mr_img_int16 = np.flip(mr_img_int16, 1)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: Display each image in a separate figure\n",
+ "plt.figure()\n",
+ "ut.dispImage(ct_img_int8)\n",
+ "plt.figure()\n",
+ "ut.dispImage(ct_img_int16)\n",
+ "plt.figure()\n",
+ "ut.dispImage(mr_img_int16)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. Moving images in and out of alignment"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Write code so that on each iteration of the loop the images are all rotated by the current value of theta about the point 10,10 \n",
+ "# and the resampled 8 bit CT image is displayed\n",
+ "theta = np.arange(-90, 91, 1)\n",
+ "num_pix_x, num_pix_y = ct_img_int8.shape\n",
+ "\n",
+ "for n in range(theta.size): \n",
+ " # create affine matrix and corresponding deformation field\n",
+ " aff_mat = ut.affineMatrixForRotationAboutPoint(theta[n], [10, 10])\n",
+ " def_field = ut.defFieldFromAffineMatrix(aff_mat, num_pix_x, num_pix_y)\n",
+ "\n",
+ " # resample the images\n",
+ " ct_img_int8_resamp = ut.resampImageWithDefField(ct_img_int8, def_field)\n",
+ " ct_img_int16_resamp = ut.resampImageWithDefField(ct_img_int16, def_field)\n",
+ " mr_img_int16_resamp = ut.resampImageWithDefField(mr_img_int16, def_field)\n",
+ " \n",
+ " # TODO: display the resampled 8 bit CT image\n",
+ " ut.dispImage(ct_img_int8_resamp) \n",
+ " \n",
+ " # add a short pause so the figure display updates\n",
+ " plt.pause(0.05)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "C:\\Users\\jmccl\\AppData\\Local\\Temp\\ipykernel_7312\\3448956577.py:22: MatplotlibDeprecationWarning: The tostring_rgb function was deprecated in Matplotlib 3.8 and will be removed in 3.10. Use buffer_rgba instead.\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n"
+ ]
+ }
+ ],
+ "source": [
+ "# this cell saves a gif of the animation from the previous cell, used on the instructions website\n",
+ "# it will not be implemented by students and does not need to be run\n",
+ "\n",
+ "import imageio\n",
+ "plt.close('all')\n",
+ "fig = plt.figure()\n",
+ "frames = []\n",
+ "\n",
+ "for n in range(theta.size): \n",
+ " # create affine matrix and corresponding deformation field\n",
+ " aff_mat = ut.affineMatrixForRotationAboutPoint(theta[n], [10, 10])\n",
+ " def_field = ut.defFieldFromAffineMatrix(aff_mat, num_pix_x, num_pix_y)\n",
+ "\n",
+ " # resample the images\n",
+ " ct_img_int8_resamp = ut.resampImageWithDefField(ct_img_int8, def_field)\n",
+ " ct_img_int16_resamp = ut.resampImageWithDefField(ct_img_int16, def_field)\n",
+ " mr_img_int16_resamp = ut.resampImageWithDefField(mr_img_int16, def_field)\n",
+ " \n",
+ " # TODO: display the resampled 8 bit CT image\n",
+ " ut.dispImage(ct_img_int8_resamp) \n",
+ " \n",
+ " # add a short pause so the figure display updates\n",
+ " plt.pause(0.05)\n",
+ "\n",
+ " frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n",
+ " frame = frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n",
+ " frames.append(frame)\n",
+ "\n",
+ "imageio.mimsave('practical3-in-out-alignment-anim.gif', frames, fps=10, loop=0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Sum of Squared DIfferences (SSD) and Mean of Squared Differences (MSD)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# do the same as above but this time calculate the SSD between the original and resampled images\n",
+ "SSDs = np.zeros((theta.size, 4))\n",
+ "\n",
+ "for n in range(theta.size): \n",
+ " #TODO: copy your code from the previous cell to create the affine matrix and corresponding deformation field, and to resample the images.\n",
+ " # create affine matrix and corresponding deformation field\n",
+ " aff_mat = ut.affineMatrixForRotationAboutPoint(theta[n], [10, 10])\n",
+ " def_field = ut.defFieldFromAffineMatrix(aff_mat, num_pix_x, num_pix_y)\n",
+ "\n",
+ " # resample the images\n",
+ " ct_img_int8_resamp = ut.resampImageWithDefField(ct_img_int8, def_field)\n",
+ " ct_img_int16_resamp = ut.resampImageWithDefField(ct_img_int16, def_field)\n",
+ " mr_img_int16_resamp = ut.resampImageWithDefField(mr_img_int16, def_field)\n",
+ "\n",
+ " # Calculate the SSD values between the original and resampled images\n",
+ " # 1) The original 16-bit CT image and the resampled 16-bit CT image\n",
+ " # 2) The original 16-bit CT image and the resampled 8-bit CT image\n",
+ " # 3) The original 16-bit CT image and the resampled MR image\n",
+ " # 4) The original 8-bit CT image and the resampled 8-bit CT image\n",
+ " SSDs[n, 0] = ut.calcSSD(ct_img_int16, ct_img_int16_resamp)\n",
+ " SSDs[n, 1] = ut.calcSSD(ct_img_int16, ct_img_int8_resamp)\n",
+ " SSDs[n, 2] = ut.calcSSD(ct_img_int16, mr_img_int16_resamp)\n",
+ " SSDs[n, 3] = ut.calcSSD(ct_img_int8, ct_img_int8_resamp)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# plot the SSD values (y axis) against the angle theta (x axis)\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, SSDs[:, 0])\n",
+ "plt.title('SSD: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, SSDs[:, 1])\n",
+ "plt.title('SSD: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, SSDs[:, 2])\n",
+ "plt.title('SSD: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, SSDs[:, 3])\n",
+ "plt.title('SSD: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# do the same as above but this time calculate the MSD between the original and transformed images\n",
+ "MSDs = np.zeros((theta.size, 4))\n",
+ "\n",
+ "for n in range(theta.size):\n",
+ " #TODO: copy your code from the previous cell to create the affine matrix and corresponding deformation field, and to resample the images.\n",
+ " # create affine matrix and corresponding deformation field\n",
+ " aff_mat = ut.affineMatrixForRotationAboutPoint(theta[n], [10, 10])\n",
+ " def_field = ut.defFieldFromAffineMatrix(aff_mat, num_pix_x, num_pix_y)\n",
+ "\n",
+ " # resample the images\n",
+ " ct_img_int8_resamp = ut.resampImageWithDefField(ct_img_int8, def_field)\n",
+ " ct_img_int16_resamp = ut.resampImageWithDefField(ct_img_int16, def_field)\n",
+ " mr_img_int16_resamp = ut.resampImageWithDefField(mr_img_int16, def_field)\n",
+ " \n",
+ " # Calculate the MSD values between the original and resampled images\n",
+ " # 1) The original 16-bit CT image and the resampled 16-bit CT image\n",
+ " # 2) The original 16-bit CT image and the resampled 8-bit CT image\n",
+ " # 3) The original 16-bit CT image and the resampled MR image\n",
+ " # 4) The original 8-bit CT image and the resampled 8-bit CT image\n",
+ " MSDs[n, 0] = ut.calcMSD(ct_img_int16, ct_img_int16_resamp)\n",
+ " MSDs[n, 1] = ut.calcMSD(ct_img_int16, ct_img_int8_resamp)\n",
+ " MSDs[n, 2] = ut.calcMSD(ct_img_int16, mr_img_int16_resamp)\n",
+ " MSDs[n, 3] = ut.calcMSD(ct_img_int8, ct_img_int8_resamp)\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# plot the MSD values (y axis) against the angle theta (x axis)\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, MSDs[:, 0])\n",
+ "plt.title('MSD: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, MSDs[:, 1])\n",
+ "plt.title('MSD: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, MSDs[:, 2])\n",
+ "plt.title('MSD: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, MSDs[:, 3])\n",
+ "plt.title('MSD: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 4. Normalized Cross Correlation (NCC) and Normalized Mutual Information (NMI)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Define a function to calculate the NCC between two images\n",
+ "def calcNCC(A, B):\n",
+ " \"\"\"\n",
+ " function to calculate the normalised cross correlation between two images\n",
+ "\n",
+ " SYNTAX:\n",
+ " NCC = calcNCC(A, B)\n",
+ "\n",
+ " INPUTS:\n",
+ " A - an image stored as a 2D array\n",
+ " B - an image stored as a 2D array. B must the the same size as A\n",
+ "\n",
+ " OUTPUTS:\n",
+ " NCC - the value of the normalised cross correlation\n",
+ "\n",
+ " NOTE\n",
+ " if either of the images contain NaN values these pixels should be\n",
+ " ignored when calculating the NCC.\n",
+ " \"\"\"\n",
+ "\n",
+ " # TODO: Remove pixels that contain NaN in either image\n",
+ " nan_inds = np.logical_or(np.isnan(A), np.isnan(B))\n",
+ " A = A[np.logical_not(nan_inds)]\n",
+ " B = B[np.logical_not(nan_inds)]\n",
+ "\n",
+ " # TODO: Calculate the mean and std of each image\n",
+ " mu_A = np.mean(A)\n",
+ " mu_B = np.mean(B)\n",
+ " sig_A = np.std(A)\n",
+ " sig_B = np.std(B)\n",
+ "\n",
+ " # TODO: Calculate and return the NCC\n",
+ " NCC = np.sum((A - mu_A) * (B - mu_B)) / (A.size * sig_A * sig_B)\n",
+ " return NCC\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# do the same as above but this time calculate the NCC, joint and marginal entropies between the original and transformed images\n",
+ "NCCs = np.zeros((theta.size, 4))\n",
+ "H_ABs = np.zeros((theta.size, 4))\n",
+ "H_As = np.zeros((theta.size, 4))\n",
+ "H_Bs = np.zeros((theta.size, 4))\n",
+ "\n",
+ "for n in range(theta.size): \n",
+ " #TODO: copy your code from the previous cell to create the affine matrix and corresponding deformation field, and to resample the images.\n",
+ " # create affine matrix and corresponding deformation field\n",
+ " aff_mat = ut.affineMatrixForRotationAboutPoint(theta[n], [10, 10])\n",
+ " def_field = ut.defFieldFromAffineMatrix(aff_mat, num_pix_x, num_pix_y)\n",
+ "\n",
+ " # resample the images\n",
+ " ct_img_int8_resamp = ut.resampImageWithDefField(ct_img_int8, def_field)\n",
+ " ct_img_int16_resamp = ut.resampImageWithDefField(ct_img_int16, def_field)\n",
+ " mr_img_int16_resamp = ut.resampImageWithDefField(mr_img_int16, def_field)\n",
+ "\n",
+ " # Calculate the NCC values\n",
+ " NCCs[n, 0] = calcNCC(ct_img_int16, ct_img_int16_resamp)\n",
+ " NCCs[n, 1] = calcNCC(ct_img_int16, ct_img_int8_resamp)\n",
+ " NCCs[n, 2] = calcNCC(ct_img_int16, mr_img_int16_resamp)\n",
+ " NCCs[n, 3] = calcNCC(ct_img_int8, ct_img_int8_resamp)\n",
+ " \n",
+ " # Calculate the joint and marginal entropies\n",
+ " H_ABs[n, 0], H_As[n, 0], H_Bs[n, 0] = ut.calcEntropies(ct_img_int16, ct_img_int16_resamp)\n",
+ " H_ABs[n, 1], H_As[n, 1], H_Bs[n, 1] = ut.calcEntropies(ct_img_int16, ct_img_int8_resamp)\n",
+ " H_ABs[n, 2], H_As[n, 2], H_Bs[n, 2] = ut.calcEntropies(ct_img_int16, mr_img_int16_resamp)\n",
+ " H_ABs[n, 3], H_As[n, 3], H_Bs[n, 3] = ut.calcEntropies(ct_img_int8, ct_img_int8_resamp)\n",
+ " \n",
+ "# Calculate the mutual information and normalised mutual information\n",
+ "MIs = H_As + H_Bs - H_ABs\n",
+ "NMIs = (H_As + H_Bs) / H_ABs"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# plot the results for NCC, H_AB, MI, and NMI in separate figures\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, NCCs[:, 0])\n",
+ "plt.title('NCC: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, NCCs[:, 1])\n",
+ "plt.title('NCC: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, NCCs[:, 2])\n",
+ "plt.title('NCC: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, NCCs[:, 3])\n",
+ "plt.title('NCC: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()\n",
+ "\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, H_ABs[:, 0])\n",
+ "plt.title('H_AB: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, H_ABs[:, 1])\n",
+ "plt.title('H_AB: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, H_ABs[:, 2])\n",
+ "plt.title('H_AB: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, H_ABs[:, 3])\n",
+ "plt.title('H_AB: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()\n",
+ "\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, MIs[:, 0])\n",
+ "plt.title('MI: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, MIs[:, 1])\n",
+ "plt.title('MI: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, MIs[:, 2])\n",
+ "plt.title('MI: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, MIs[:, 3])\n",
+ "plt.title('MI: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()\n",
+ "\n",
+ "plt.figure()\n",
+ "plt.subplot(2,2,1)\n",
+ "plt.plot(theta, NMIs[:, 0])\n",
+ "plt.title('NMI: 16-bit CT and 16-bit CT')\n",
+ "plt.subplot(2,2,2)\n",
+ "plt.plot(theta, NMIs[:, 1])\n",
+ "plt.title('NMI: 16-bit CT and 8-bit CT')\n",
+ "plt.subplot(2,2,3)\n",
+ "plt.plot(theta, NMIs[:, 2])\n",
+ "plt.title('NMI: 16-bit CT and 16-bit MR')\n",
+ "plt.subplot(2,2,4)\n",
+ "plt.plot(theta, NMIs[:, 3])\n",
+ "plt.title('NMI: 8-bit CT and 8-bit CT')\n",
+ "plt.tight_layout()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ipmi_reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/example_solutions/practical4_notebook_solution.ipynb b/example_solutions/practical4_notebook_solution.ipynb
new file mode 100644
index 00000000..6e670f2b
--- /dev/null
+++ b/example_solutions/practical4_notebook_solution.ipynb
@@ -0,0 +1,377 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Practical 4: Exploring the Demons registration algorithm"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# import the required packages\n",
+ "import numpy as np\n",
+ "import skimage.io\n",
+ "import matplotlib\n",
+ "import matplotlib.pyplot as plt\n",
+ "import utils as ut\n",
+ "\n",
+ "# import the demonsReg function, this is imported from the demonsReg.py file which is separate than the usual utils.py file\n",
+ "from demonsReg import demonsReg\n",
+ "\n",
+ "# the next lines make figures display in separate windows instead of in the notebook\n",
+ "# this is required for the animations to work correctly\n",
+ "matplotlib.use('TkAgg')\n",
+ "plt.ion()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 1. Compare the images"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# load in the three 2D images required for these exercises using the imread function from the scikit-image python library\n",
+ "cine_MR_1 = skimage.io.imread('data/practical4/cine_MR_1.png') \n",
+ "cine_MR_2 = skimage.io.imread('data/practical4/cine_MR_2.png')\n",
+ "cine_MR_3 = skimage.io.imread('data/practical4/cine_MR_3.png')\n",
+ "\n",
+ "# TODO: Convert all the images to double data type and reorient them into cartesian orientation\n",
+ "cine_MR_1 = np.double(cine_MR_1)\n",
+ "cine_MR_1 = cine_MR_1.T\n",
+ "cine_MR_1 = np.flip(cine_MR_1, 1)\n",
+ "\n",
+ "cine_MR_2 = np.double(cine_MR_2)\n",
+ "cine_MR_2 = cine_MR_2.T\n",
+ "cine_MR_2 = np.flip(cine_MR_2, 1)\n",
+ "\n",
+ "cine_MR_3 = np.double(cine_MR_3)\n",
+ "cine_MR_3 = cine_MR_3.T\n",
+ "cine_MR_3 = np.flip(cine_MR_3, 1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display each image in a separate figure\n",
+ "plt.figure()\n",
+ "ut.dispImage(cine_MR_1)\n",
+ "plt.figure()\n",
+ "ut.dispImage(cine_MR_2)\n",
+ "plt.figure()\n",
+ "ut.dispImage(cine_MR_3)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display difference images\n",
+ "plt.figure()\n",
+ "ut.dispImage(cine_MR_1 - cine_MR_2)\n",
+ "plt.figure()\n",
+ "ut.dispImage(cine_MR_1 - cine_MR_3)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: use the dispImageFlip function to compare cine_MR_1 with cine_MR_2 and compare cine_MR_1 with cine_MR_3\n",
+ "ut.dispImageFlip(cine_MR_1, cine_MR_2)\n",
+ "ut.dispImageFlip(cine_MR_1, cine_MR_3)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 2. The `demonsReg` function"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run the demonsReg function on the cine_MR_1 and cine_MR_2 images\n",
+ "#plt.close('all')\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 3. Regularisation"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_elastic set to 0\n",
+ "plt.close('all')\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, sigma_elastic=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_fluid set to 0\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, sigma_fluid=0)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with several different values for sigma_elastic and sigma_fluid\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, sigma_elastic=0.1, sigma_fluid=3)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 4. Resolution levels"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with the number of resolution levels set to 1\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, num_lev=1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with the number of resolution levels set to 6\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, num_lev=6)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg varying both the number of levels and the regularisation weights\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, num_lev=6, sigma_elastic=0.5, sigma_fluid=2)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 5. Folding and the determinant of the Jacobian matrix"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_elastic set to 0.5\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, sigma_elastic=0.5)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ ""
+ ]
+ },
+ "execution_count": 12,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# TODO: calculate the Jacobian map from the deformation field and display it, setting the colour map and adding a colour bar\n",
+ "J, J_Mat = ut.calcJacobian(def_field)\n",
+ "plt.figure()\n",
+ "ut.dispImage(J)\n",
+ "plt.set_cmap('jet')\n",
+ "plt.colorbar()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: display a binary image showing the pixels where folding has occurred\n",
+ "fold_map = J <= 0\n",
+ "plt.figure()\n",
+ "ut.dispImage(fold_map)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 6. Composing the updates"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with sigma_elastic set to 0.5 and use_composition=True\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, sigma_elastic=0.5, use_composition=True)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with parameters you previously found gave a good result, but now with use_composition=True\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, num_lev=6, sigma_elastic=0.5, sigma_fluid=2, use_composition=True)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 7. Other parameters"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with check_MSE set to false and the display frequency set to 50\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, check_MSD=False, disp_freq=50)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with the same parameters as above but with max_it set to 500\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, check_MSD=False, disp_freq=50, max_it=500)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg using the spatial gradients of target image instead of source image\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, use_target_grad=True)\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_2, use_target_grad=False)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 8. Registering the deep-breath image"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with cine_MR_1 as source and cine_MR_3 as target\n",
+ "# TODO: experiment with different parameters to try and get a good result\n",
+ "warped_image, def_field = demonsReg(cine_MR_1, cine_MR_3, sigma_elastic=0.25, sigma_fluid=4, use_composition=True, num_lev=6)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# TODO: run demonsReg with cine_MR_3 as source and cine_MR_1 as target\n",
+ "# TODO: experiment with different parameters to try and get a good result\n",
+ "warped_image, def_field = demonsReg(cine_MR_3, cine_MR_1, sigma_elastic=0.6, sigma_fluid=0.5, use_composition=True, num_lev=6)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "ipmi_reg",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.13.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/index.md b/index.md
index 89e087c6..e6787787 100644
--- a/index.md
+++ b/index.md
@@ -4,7 +4,23 @@ site: sandpaper::sandpaper_site
-Add course summary, lecture slides and timetable.
+These webpages provide all the material for the image registration practicals for the module:
+
+MPHY0025 - Information Processing in Medical Imaging
+
+run by:
+
+Jamie McClelland
+
+Dept. of Medical Physics and Biomedical Engineering
+
+UCL
+
+
+You should follow the instruction below **prior to the first practical session** to access the materials for the practicals, setup Python (used for all practicals) and install the ITK-SNAP software (used in the first practical).
+
+The instructions for each practical session can then be accessed using the links on the left.
+
[workbench]: https://carpentries.github.io/sandpaper-docs
diff --git a/learners/setup.md b/learners/setup.md
index 731b47c2..a95c9ddf 100644
--- a/learners/setup.md
+++ b/learners/setup.md
@@ -5,96 +5,46 @@ editor_options:
wrap: sentence
---
-````{=html}
-
-````
-
-## Course material
-
-Start by downloading the course material [here](https://liveuclac-my.sharepoint.com/:u:/g/personal/rmapcdr_ucl_ac_uk/ETtoAQ1qXedMiU69mRdFXOIBcnKQ-nxMQ1c74gsPm7a6Sg?download=1).
-Unzip it and make a note of where the folder is saved.
-
-In this folder, you will find:
-* A data directory which contains the datasets for each practical.
-* 6 Jupyter notebooks with extensions `.ipynb` for each practical.
-* 2 Python scripts with extensions `.py`.
-* The `python-environment.yml` file which we will use in the steps below.
-### Dataset
-We provide a summary of the different datasets for each practical here. Datasets are described at the beginning of each practical.
+## Accessing the materials for the practicals
+
+* Create a folder in which you will store all of the material for the IPMI registration practicals, e.g. `C:\ipmi_reg\`
+* If you haven't already, download the zip file containing data used in the practicals from moodle.
+* Extract the zip file in the folder you created above.
+ * This will create the following folders containing the data used in the exercises:
+ * `C:\impi_reg\data\practical1\CT_for_PET`
+ * `C:\impi_reg\data\practical1\exhale_BH_CT`
+ * `C:\impi_reg\data\practical1\inhale_BH_CT`
+ * `C:\impi_reg\data\practical1\PET`
+ * `C:\impi_reg\data\practical2`
+ * `C:\impi_reg\data\practical3`
+ * `C:\impi_reg\data\practical4`
+* Download the following Python files (right click on the links and select 'Save link as...') and save them in the folder you created above.
+ * [`practical1_notebook.ipynb`](files/practical1_notebook.ipynb)
+ * [`practical2_notebook.ipynb`](files/practical2_notebook.ipynb)
+ * [`practical3_notebook.ipynb`](files/practical3_notebook.ipynb)
+ * [`practical4_notebook.ipynb`](files/practical4_notebook.ipynb)
+ * [`utils.py`](files/utils.py) (used in practicals 2-4)
+ * [`demonsReg.py`](files/demonsReg.py) (used in practical 4)
-| Practical | Dataset |
-|-------------------------------|-----------------------------------------|
-| Practical 1 | [TCIA CT-vs-PET-Ventilation-Imaging](https://www.cancerimagingarchive.net/collection/ct-vs-pet-ventilation-imaging/) |
-| Practical 2 | Lung MRI slice |
-| Practical 3 | Head CT and MRI slices |
-| Practical 4 | Lung MRI slices |
-| Practical 5 | Dataset 5 |
-| Practical 6 | Dataset 6 |
## Python Setup
-### Installing Python
-
-For Python beginners, we recommend installing miniconda [here](https://www.anaconda.com/download/success).
-This provides a basic installation of Python and conda, which allows us to create virtual environments.
-Once this has been installed, you should be able to open a terminal.
-
-::::::::::::::::::::::::::::::::::::::::::: spoiler
-
-### Opening a Terminal
-
-* Windows: Click Start > Search for Anaconda Prompt > Click to Open
-* macOS: Launchpad > Other Application > Terminal
-* Linux: Open a terminal window
-
-:::::::::::::::::::::::::::::::::::::::::::
+Note - if you are taking the module MPHY0025 (IPMI) you are expected to have some familiarity and experience with Python. Anyone who has been using Python for a while, or has taken some kind of introductory course, should be fine.
### Creating an environment
-Once you have opened the terminal, move to the location of the course files
-(use the `cd` command to move to the correct location - have a quick look [here](https://tutorials.codebar.io/command-line/introduction/tutorial.html) if you've never used the command line before).
-
-:::::::::::::::::::::::::: discussion
-### What does this look like?
-Say you have saved the extracted zip file on your Desktop.
-::::::::::::::::::::::::::
+We suggest creating a separate environment for the IPMI registration practicals. You can use this yml file: [`ipmi_reg_python_env.yml`](files/ipmi_reg_python_env.yml) to create a new environment with the required libraries, e.g. on the Anaconda Prompt type:
-:::::::::::::::::::::::::::::::::::: solution
-### Windows
-```
-cd Desktop\ideas-reg\
-```
-Note: the above tutorial for the command line is for Mac/Linux users.
-In your Anaconda prompt in Windows, you can use `dir` instead of `ls` and `cd` without any arguments instead of `pwd`.
-::::::::::::::::::::::::::::::::::::
-
-:::::::::::::::::::::::::::::::::::: solution
-### Mac/Linux
-```bash
-cd Desktop/ideas-reg/
-```
-::::::::::::::::::::::::::::::::::::
-
-Create the environment from the existing `python-environment.yml` file.
-``` bash
-conda env create --name ideas-reg -f python-environment.yml
+```
+conda env create --name ipmi_reg -f ipmi_reg_python_env.yml
```
This environment has been tested on all the tutorials and should help avoiding issues when running them.
### VSCode
-If you do not already have a Python IDE installed, we recommend using VSCode.
-It is very convenient as it allows us to open both Python scripts and Jupyter notebooks in the same interface.
+You are welcome to use any Python IDE that you are familiar with for the practical session, but we recommend using VSCode, as it is a popular free IDE, and is what we used to develop and test the provided Jupyter notebooks and other Python code.
You can download it [here](https://code.visualstudio.com/download).
We recommend looking at the following instructions to get started:
@@ -102,24 +52,30 @@ We recommend looking at the following instructions to get started:
* [Python tutorial](https://code.visualstudio.com/docs/python/python-tutorial)
* [Instructions for Jupyter notebooks](https://code.visualstudio.com/docs/datascience/jupyter-notebooks)
-#### Open the `ideas-reg` folder
-Start by opening the folder with the course content. Go on `File --> Open Folder` and navigate to the `ideas-reg` unzipped folder.
-
+#### Preparing VSCode for the practicals
+* Start by opening the folder with the course content. Click 'File -> Open Folder...' and navigate to the `ipmi_reg` folder.
+
+
-On the left, you can see the files available in the directory. Open one of the practicals (`.ipynb`). This is a Jupyter notebook.
-If this is the first time you open a Jupyter notebook in VSCode, you should be prompted to install some extensions (Python, Pylance, Jupyter and a few more).
-Install them.
+On the left, you can see the files available in the directory.
-#### Select the environment
-You now need to select the conda environment we created above. At the top right of the notebook, there should be a `Select Kernel` button.
-
+* Open one of the practicals (`.ipynb`). This is a Jupyter notebook.
+ * If this is the first time you open a Jupyter notebook in VSCode, you may be prompted to install some extensions (Python, Pylance, Jupyter and a few more). If so, install them.
+ * Note, you may not be promoted to install these extensions until you select the kernel (the next step).
-Click on Python Environment. You should see a dropdown with your existing environments. Select `ideas-reg`.
-
+* You now need to select the conda environment we created above. At the top right of the notebook, there should be a 'Select Kernel' button.
+ * When you click this you may be prompted to install the extensions if you were not when you opened the Juypter notebook.
+
+ 
-## Software Setup
+ * Once the extensions are installed, clicking 'Select Kernel' will then list two options:
+ * 'Python Environments...'
+ * 'Existing Jupyter Server...'
+ * Click 'Python Environments...'. You should then see a dropdown menu with your existing environments. Select the `ipmi_reg` environment you created above.
+
+ 
-### ITK-SNAP
+## Installing ITK-SNAP
ITK-SNAP started in 1999 with SNAP (SNake Automatic Partitioning) and was first developed by Paul Yushkevich with the guidance of Guido Gerig.
It is open-source software distributed under the GNU General Public License.
It is written in C++ and leverages the [Insight Segmentation and Registration Toolkit (ITK)](https://itk.org/).
@@ -130,11 +86,11 @@ ITK-SNAP will be used in Practical 1.
* Once you have downloaded the installer you should run it to install ITK-SNAP.
* When you run the installer on Windows a window may pop up as shown below, with 'Don't run' as the only option given.
- 
+ 
* However, If you click on 'More info' a new 'Run anyway' button will appear which you can click on to run the installer.
- 
+ 
* Then all of the default options can be selected during installation.
diff --git a/src/mirc/__init__.py b/src/mirc/__init__.py
deleted file mode 100644
index 370211ff..00000000
--- a/src/mirc/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""mirc"""
-
-__version__ = "0.0.1"
diff --git a/src/mirc/examples/exampleSolution3.py b/src/mirc/examples/exampleSolution3.py
deleted file mode 100644
index 29cee332..00000000
--- a/src/mirc/examples/exampleSolution3.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-example solution for the image registration exercises 3 for module MPHY0025 (IPMI)
-
-Author: Jamie McClelland UCL
-Contributors: Zakaria Senousy and Miguel Xochicale UCL-ARC
-"""
-import matplotlib.pyplot as plt
-from src.mirc.utils.demonsReg import demonsReg
-from src.mirc.utils.utils3 import dispImage, dispDefField, calcJacobian
-import numpy as np
-import skimage.io
-#%matplotlib qt
-
-cine_MR_img_1 = skimage.io.imread('cine_MR_1.png')
-cine_MR_img_2 = skimage.io.imread('cine_MR_2.png')
-cine_MR_img_3 = skimage.io.imread('cine_MR_3.png')
-
-
-cine_MR_img_1 = np.double(cine_MR_img_1)
-cine_MR_img_2 = np.double(cine_MR_img_2)
-cine_MR_img_3 = np.double(cine_MR_img_3)
-
-cine_MR_img_1 = np.flip(cine_MR_img_1.T, 1)
-cine_MR_img_2 = np.flip(cine_MR_img_2.T, 1)
-cine_MR_img_3 = np.flip(cine_MR_img_3.T, 1)
-
-plt.figure()
-dispImage(cine_MR_img_1)
-plt.figure()
-dispImage(cine_MR_img_2)
-plt.figure()
-dispImage(cine_MR_img_3)
-plt.figure()
-dispImage(cine_MR_img_1 - cine_MR_img_2)
-plt.figure()
-dispImage(cine_MR_img_1 - cine_MR_img_3)
-
-plt.close('all')
-
-
-[warped_image_def, def_field_def] = demonsReg(cine_MR_img_1, cine_MR_img_2)
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_2, sigma_elastic=0)
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_2, sigma_fluid=0)
-
-plt.figure()
-dispImage(warped_image_def)
-plt.figure()
-dispDefField(def_field_def, spacing=2)
-
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_2, num_lev=1)
-
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_2, num_lev=6)
-
-[warped_image_add, def_field_add] = demonsReg(cine_MR_img_1, cine_MR_img_2, sigma_elastic=0.5)
-
-[J, J_Mat] = calcJacobian(def_field_add)
-plt.figure()
-dispImage(J)
-plt.set_cmap('jet')
-plt.colorbar()
-plt.figure()
-dispImage(J<=0)
-
-[warped_image_comp, def_field_comp] = demonsReg(cine_MR_img_1, cine_MR_img_2, sigma_elastic=0.5, use_composition=True)
-[J, J_Mat] = calcJacobian(def_field_comp)
-plt.figure()
-dispImage(J)
-plt.set_cmap('jet')
-plt.colorbar()
-plt.figure()
-dispImage(J<=0)
-plt.figure()
-dispImage(warped_image_add)
-
-
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_2, check_MSD=False, disp_freq=50)
-
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_2, check_MSD=False, disp_freq=50, max_it=500)
-
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_2, use_target_grad=True)
-
-plt.figure()
-dispImage(warped_image_def)
-plt.figure()
-dispDefField(def_field_def, spacing=2)
-
-[warped_image, def_field] = demonsReg(cine_MR_img_1, cine_MR_img_3, sigma_elastic=0, sigma_fluid=5, use_composition=True, num_lev=6, disp_freq=0)
-[warped_image, def_field] = demonsReg(cine_MR_img_3, cine_MR_img_1, sigma_elastic=0.6, sigma_fluid=0.5, use_composition=True, num_lev=6, disp_freq=0)
-
-
diff --git a/src/mirc/utils/demonsReg.py b/src/mirc/utils/demonsReg.py
deleted file mode 100644
index 0348faf2..00000000
--- a/src/mirc/utils/demonsReg.py
+++ /dev/null
@@ -1,351 +0,0 @@
-"""
-function to peform a registration between two 2D images using the demons algorithm
-
-provided for use in image registration exercises 3 for module MPHY0025 (IPMI)
-
-Authors: Jamie McClelland, UCL
-Contributors: Zakaria Senousy and Miguel Xochicale, UCL-ARC
-"""
-import matplotlib
-matplotlib.use('TkAgg')
-import matplotlib.pyplot as plt
-
-import numpy as np
-from skimage.transform import rescale, resize
-from scipy.ndimage import gaussian_filter
-from src.mirc.utils.utils3 import dispImage, resampImageWithDefField, calcMSD, dispDefField, calcJacobian
-
-
-def demonsReg(source, target, sigma_elastic=1, sigma_fluid=1, num_lev=3, use_composition=False,
- use_target_grad=False, max_it=1000, check_MSD=True, disp_freq=3, disp_spacing=2,
- scale_update_for_display=10, disp_method_df='grid', disp_method_up='arrows'):
- """
- SYNTAX:
- demonsReg(source, target)
- demonsReg(source, target, ..., variable=value, ...)
- warped_image = demonsReg(...)
- warped_image, def_field = demonsReg(...)
-
- DESCRIPTION:
- Perform a registration between the 2D source image and the 2D target
- image using the demons algorithm. The source image is warped (resampled)
- into the space of the target image.
-
- The final warped image and deformation field can be returned as outputs
- from the function.
-
- There are a number of optional parameters which affect the registration
- or how the results are displayed, which are explained below. These can be
- speficied using variable=value inputs.
- The default values are given after the parameter name
- sigma_elastic = 1
- sigma_fluid = 1
- the amount of elastic and fluid regularistion to apply. these values
- specify the standard deviation of the Gaussian used to smooth the
- update (fluid) or displacement field (elastic). a value of 0 means no
- smoothing is applied.
- num_lev = 3
- the number of levels to use in the multi-resolution scheme
- use_composition = false
- specifies whether the registration is performed using the classical
- demons algorithm, where the updates are added to the current
- transformation, or using the diffeomorphic demons algorithm, where
- the updates are composed with the current transformation. Set
- use_composition to true to compose the updates, or to false to add
- the updates.
- use_target_grad = false
- logical (true/false) value indicating whether the target image
- gradient or warped image gradient is used when calculating the
- demons forces.
- max_it = 1000
- the maximum number of iterations to perform.
- check_MSD = true
- logical value indicating if the Mean Squared Difference (MSD)
- should be checked for improvement at each iteration. If true, the
- MSD will be evaluated at each iteration, and if there is no
- improvement since the previous iteration the registration will move
- to the next resolution level or finish if it is on the final level.
- disp_freq = 3
- the frequency with which to update the displayed images. the images
- will be updated every disp_freq iterations. If disp_freq is set to
- 0 the images will not be updated during the registration
- disp_spacing = 2
- the spacing between the grid lines or arrows when displaying the
- deformation field and update.
- scale_update_for_display = 10
- the factor used to scale the update field for displaying
- disp_method_df = 'grid'
- the display method for the deformation field.
- can be 'grid' or 'arrows'
- disp_method_up = 'arrows'
- the display method for the update. can be 'grid' or 'arrows'
- """
-
- # make copies of full resolution images
- source_full = source;
- target_full = target;
-
- #Preparing figure for live update during registration process
- fig, axs = plt.subplots(1, 3, figsize=(12, 6))
- iteration_text = fig.text(0.5, 0.92, '', ha='center', va='top', fontsize=10, color='black')
-
- # loop over resolution levels
- for lev in range(1, num_lev + 1):
-
- # resample images if not final level
- if lev != num_lev:
- resamp_factor = np.power(2, num_lev - lev)
- target = rescale(target_full, 1.0 / resamp_factor, mode='edge', order=3, anti_aliasing=True)
- source = rescale(source_full, 1.0 / resamp_factor, mode='edge', order=3, anti_aliasing=True)
- else:
- target = target_full
- source = source_full
-
- # if first level initialise def_field and disp_field
- if lev == 1:
- [X, Y] = np.mgrid[0:target.shape[0], 0:target.shape[1]]
- def_field = np.zeros((X.shape[0], X.shape[1], 2))
- def_field[:, :, 0] = X
- def_field[:, :, 1] = Y
- disp_field_x = np.zeros(target.shape)
- disp_field_y = np.zeros(target.shape)
- else:
- # otherwise upsample disp_field from previous level
- disp_field_x = 2 * resize(disp_field_x, (target.shape[0], target.shape[1]), mode='edge', order=3)
- disp_field_y = 2 * resize(disp_field_y, (target.shape[0], target.shape[1]), mode='edge', order=3)
- # recalculate def_field for this level from disp_field
- X, Y = np.mgrid[0:target.shape[0], 0:target.shape[1]]
- def_field = np.zeros((X.shape[0], X.shape[1], 2)) # clear def_field from previous level
- def_field[:, :, 0] = X + disp_field_x
- def_field[:, :, 1] = Y + disp_field_y
-
- #initialise updates
- update_x = np.zeros(target.shape)
- update_y = np.zeros(target.shape)
- update_def_field = np.zeros(def_field.shape)
-
- # calculate the transformed image at the start of this level
- warped_image = resampImageWithDefField(source, def_field)
-
- # store the current def_field and MSD value to check for improvements at
- # end of iteration
- def_field_prev = def_field.copy()
- prev_MSD = calcMSD(target, warped_image)
-
- # if target image gradient is being used this can be calculated now as it will
- # not change during the registration
- if use_target_grad:
- [img_grad_x, img_grad_y] = np.gradient(target)
-
-
-
- # Function to update the display
- def live_update():
- # Clear the axes
- for ax in axs:
- ax.clear()
-
- # Plotting the warped image in the first subplot
- plt.sca(axs[0])
- dispImage(warped_image, title='Warped Image')
- x_lims = plt.xlim()
- y_lims = plt.ylim()
- # Plotting the deformation field in the second subplot
- plt.sca(axs[1])
- dispDefField(def_field, spacing=disp_spacing, plot_type=disp_method_df)
- axs[1].set_xlim(x_lims)
- axs[1].set_ylim(y_lims)
- axs[1].set_title('Deformation Field')
-
- # Plotting the updated deformation field in the third subplot
- plt.sca(axs[2])
- up_field_to_display = scale_update_for_display * np.dstack((update_x, update_y))
- up_field_to_display += np.dstack((X, Y))
- dispDefField(up_field_to_display, spacing=disp_spacing, plot_type=disp_method_up)
- axs[2].set_xlim(x_lims)
- axs[2].set_ylim(y_lims)
- axs[2].set_title('Update Field')
-
- # Update the iteration text
- iteration_text.set_text('Level {0:d}, Iteration {1:d}: MSD = {2:.6f}'.format(lev, it, prev_MSD))
-
- # Adjust layout for better spacing
- plt.tight_layout()
-
- # Redraw the figure
- fig.canvas.draw()
- fig.canvas.flush_events() # Ensure the figure updates immediately
-
- plt.pause(0.5)
-
-
- # main iterative loop - repeat until max number of iterations reached
- for it in range(max_it):
-
- # calculate update from demons forces
- #
- # if the warped image gradient is used (instead of the target image gradient)
- # this needs to be calculated
- if not use_target_grad:
- [img_grad_x, img_grad_y] = np.gradient(warped_image)
-
- # calculate difference image
- diff = target - warped_image
- # calculate denominator of demons forces
- denom = np.power(img_grad_x, 2) + np.power(img_grad_y, 2) + np.power(diff, 2)
- # calculate x and y components of numerator of demons forces
- numer_x = diff * img_grad_x
- numer_y = diff * img_grad_y
- # calculate the x and y components of the update
- #denom[denom < 0.01] = np.nan
- update_x = numer_x / denom
- update_y = numer_y / denom
-
- # set nan values to 0
- update_x[np.isnan(update_x)] = 0
- update_y[np.isnan(update_y)] = 0
-
- # if fluid like regularisation used smooth the update
- if sigma_fluid > 0:
- update_x = gaussian_filter(update_x, sigma_fluid, mode='nearest')
- update_y = gaussian_filter(update_y, sigma_fluid, mode='nearest')
-
- # update displacement field using addition (original demons) or
- # composition (diffeomorphic demons)
- if use_composition:
- # compose update with current transformation - this is done by
- # transforming (resampling) the current transformation using the
- # update. we can use the same function as used for resampling
- # images, and treat each component of the current deformation
- # field as an image
- # the update is a displacement field, but to resample an image
- # we need a deformation field, so need to calculate deformation
- # field corresponding to update.
- update_def_field[:, :, 0] = update_x + X
- update_def_field[:, :, 1] = update_y + Y
- # use this to resample the current deformation field, storing
- # the result in the same variable, i.e. we overwrite/update the
- # current deformation field with the composed transformation
- def_field = resampImageWithDefField(def_field, update_def_field)
- # calculate the displacement field from the composed deformation field
- disp_field_x = def_field[:, :, 0] - X
- disp_field_y = def_field[:, :, 1] - Y
- # replace nans in disp field with 0s
- disp_field_x[np.isnan(disp_field_x)] = 0
- disp_field_y[np.isnan(disp_field_y)] = 0
- else:
- # add the update to the current displacement field
- disp_field_x = disp_field_x + update_x
- disp_field_y = disp_field_y + update_y
-
-
- # if elastic like regularisation used smooth the displacement field
- if sigma_elastic > 0:
- disp_field_x = gaussian_filter(disp_field_x, sigma_elastic, mode='nearest')
- disp_field_y = gaussian_filter(disp_field_y, sigma_elastic, mode='nearest')
-
- # update deformation field from disp field
- def_field[:, :, 0] = disp_field_x + X
- def_field[:, :, 1] = disp_field_y + Y
-
- # transform the image using the updated deformation field
- warped_image = resampImageWithDefField(source, def_field)
-
- # update images if required for this iteration
- if disp_freq > 0 and it % disp_freq == 0:
- # Create a single figure with 3 subplots in one row and three columns
- live_update()
-
- # calculate MSD between target and warped image
- MSD = calcMSD(target, warped_image)
-
- # display numerical results
- print('Level {0:d}, Iteration {1:d}: MSD = {2:f}\n'.format(lev, it, MSD))
-
- # check for improvement in MSD if required
- if check_MSD and MSD >= prev_MSD:
- # restore previous results and finish level
- def_field = def_field_prev
- warped_image = resampImageWithDefField(source, def_field)
- print('No improvement in MSD')
- break
-
- # update previous values of def_field and MSD
- def_field_prev = def_field.copy()
- prev_MSD = MSD.copy()
-
- #plt.show()
-
- if lev == num_lev:
- # Initialise global variables for current index tracking
- current_image_index = [0]
- current_mode_index = [0]
-
- # Define the images and titles
- images = [source, target, warped_image]
- image_titles = ['Source Image', 'Target Image', 'Warped Image']
- modes = ['Deformation Field', 'Jacobian']
-
-
- def on_key(event):
- if event.key == 'right':
- current_image_index[0] = (current_image_index[0] + 1) % len(images)
- elif event.key == 'left':
- current_image_index[0] = (current_image_index[0] - 1) % len(images)
- elif event.key == 'up' or event.key == 'down':
- current_mode_index[0] = (current_mode_index[0] + 1) % len(modes)
- update_display()
-
-
-
- def update_display():
- axs_combined[0].clear()
- plt.sca(axs_combined[0])
- dispImage(images[current_image_index[0]], title=image_titles[current_image_index[0]])
-
- axs_combined[1].clear()
- plt.sca(axs_combined[1])
- if modes[current_mode_index[0]] == 'Deformation Field':
- dispDefField(def_field, spacing=disp_spacing, plot_type=disp_method_df)
- axs_combined[1].set_title('Deformation Field')
-
- else:
- [jacobian, _] = calcJacobian(def_field)
- dispImage(jacobian, title='Jacobian')
- plt.set_cmap('jet')
- #plt.colorbar()
-
- axs_combined[2].clear()
- plt.sca(axs_combined[2])
- diff_image = images[current_image_index[0]] - target
- dispImage(diff_image, title='Difference Image')
-
- fig_combined.canvas.draw()
-
-
- # Create a single figure with 3 subplots
- fig_combined, axs_combined = plt.subplots(1, 3, figsize=(12, 6))
-
- # Display initial images
- plt.sca(axs_combined[0])
- dispImage(images[current_image_index[0]], title=image_titles[current_image_index[0]])
-
- plt.sca(axs_combined[1])
- dispDefField(def_field, spacing=disp_spacing, plot_type=disp_method_df)
- axs_combined[1].set_title('Deformation Field')
-
- plt.sca(axs_combined[2])
- diff_image = images[current_image_index[0]] - target
- dispImage(diff_image, title='Difference Image')
-
- # Add instructions for navigating images
- fig_combined.text(0.5, 0.02, 'Press <- or -> to navigate between source, target and warped images, Press Up or Down to switch between deformation field and Jacobian', ha='center', va='top', fontsize=12, color='black')
-
- # Connect the key event handler to the figure
- fig_combined.canvas.mpl_connect('key_press_event', on_key)
-
- plt.tight_layout()
- plt.show()
-
- # return the transformed image and the deformation field
- return warped_image, def_field
diff --git a/src/mirc/utils/utils3.py b/src/mirc/utils/utils3.py
deleted file mode 100644
index 96c4695e..00000000
--- a/src/mirc/utils/utils3.py
+++ /dev/null
@@ -1,202 +0,0 @@
-"""
-utility functions for use in the image registration exercises 3 for module MPHY0025 (IPMI)
-
-Jamie McClelland
-UCL
-"""
-
-import numpy as np
-import matplotlib.pyplot as plt
-plt.style.use('default')
-import scipy.interpolate as scii
-
-def dispImage(img, int_lims = [], title = ''):
- """
- function to display a grey-scale image that is stored in 'standard
- orientation' with y-axis on the 2nd dimension and 0 at the bottom
-
- SYNTAX:
- dispImage(img)
- dispImage(img, int_lims)
-
- INPUTS:
- img - image to be displayed
- int_lims - the intensity limits to use when displaying the image
- int_lims(1) = min intensity to display
- int_lims(2) = max intensity to display
- default = [np.nanmin(img), np.nanmax(img)]
- """
-
- #check if intensity limits have been provided, and if not set to min and
- #max of image
- if not int_lims:
- int_lims = [np.nanmin(img), np.nanmax(img)]
- #check if min and max are same (i.e. all values in img are equal)
- if int_lims[0] == int_lims[1]:
- #add one to int_lims(2) and subtract one from int_lims(1), so that
- #int_lims(2) is larger than int_lims(1) as required by imagesc
- #function
- int_lims[0] -= 1
- int_lims[1] += 1
-
- # take transpose of image to switch x and y dimensions and display with
- # first pixel having coordinates 0,0
- img = img.T
- plt.gca().clear()
- plt.imshow(img, cmap = 'gray', vmin = int_lims[0], vmax = int_lims[1], origin='lower')
-
- # Set title for the subplot
- plt.title(title)
-
- #set axis to be scaled equally (assumes isotropic pixel dimensions), tight
- #around the image
- plt.axis('image')
- plt.tight_layout()
-
-def resampImageWithDefField(source_img, def_field, interp_method = 'linear', pad_value=np.NaN):
- """
- function to resample a 2D image with a 2D deformation field
-
- SYNTAX:
- resamp_img = resampImageWithDefField(source_img, def_field)
- resamp_img = resampImageWithDefField(source_img, def_field, interp_method)
- resamp_img = resampImageWithDefField(source_img, def_field, interp_method, pad_value)
-
- INPUTS:
- source_img - the source image to be resampled, as a 2D array
- def_field - the deformation field, as a 3D array
- inter_method - any of the interpolation methods accepted by interpn
- function ('linear', 'nearest' and 'splinef2d')
- default = 'linear'
- pad_value - the value to assign to pixels that are outside the source
- image
- default = NaN
-
- OUTPUTS:
- resamp_img - the resampled image
-
- NOTES:
- the deformation field should be a 3D array, where the size of the
- first two dimensions is the size of the resampled image, and the size
- of the 3rd dimension is 2. def_field(:,:,1) contains the x coordinates
- of the transformed pixels, def_field(:,:,2) contains the y coordinates
- of the transformed pixels.
- the origin of the source image is assumed to be the bottom left pixel
- """
- x_coords = np.arange(source_img.shape[0], dtype = 'float')
- y_coords = np.arange(source_img.shape[1], dtype = 'float')
-
- # resample image using interpn function
- return scii.interpn((x_coords, y_coords), source_img, def_field, bounds_error=False, fill_value=pad_value, method=interp_method)
-
-def calcMSD(A,B):
- """
- function to calculate the mean of squared differences between two images
-
- SYNTAX:
- MSD = calcMSD(A, B)
-
- INPUTS:
- A - an image stored as a 2D array
- B - an image stored as a 2D array. B must the the same size as A
-
- OUTPUTS:
- MSD - the value of the mean of squared differences
-
- NOTE:
- if either of the images contain NaN values these pixels should be
- ignored when calculating the SSD.
- """
- # use nansum function to find sum of squared differences ignoring NaNs
- return np.nanmean((A-B)*(A-B))
-
-def calcJacobian(def_field):
- """
- a function to calculate the Jacobian from a deformation field
-
- SYNTAX:
- [J, J_Mat] = calcJacobian(def_field)
-
- INPUTS:
- def_field - the deformation field as a 3D array
-
- OUTPUTS:
- J - the Jacobian determinant for each pixel in the deformation field
- J_Mat - the full Jacobian matrix for each pixel in the deformation
- field as a 4D array (last 2 dimensions contain 2 x 2 matrix for
- each pixel)
- """
- # calculate gradient of x component of deformation field
- [grad_x_x, grad_x_y] = np.gradient(def_field[:, :, 0])
- # calculate gradient of y component of deformation field
- [grad_y_x, grad_y_y] = np.gradient(def_field[:, :, 1])
-
- # initlaise outputs as 0s
- J = np.zeros_like(grad_x_x)
- if grad_x_x.ndim == 2:
- J_Mat = np.zeros((grad_x_x.shape[0], grad_x_x.shape[1], 2, 2))
- elif grad_x_x.ndim == 3:
- J_Mat = np.zeros((grad_x_x.shape[0], grad_x_x.shape[1], grad_x_x.shape[2], 2, 2))
-
- # loop over pixels in the deformation field
- for x in range(grad_x_x.shape[0]):
- for y in range(grad_x_x.shape[1]):
- # form the Jacobian matrix for this pixel
- J_Mat_this_pix = np.array([[grad_x_x[x, y], grad_x_y[x, y]], [grad_y_x[x, y], grad_y_y[x, y]]])
- # calculate and store the determinant
- J[x, y] = np.linalg.det(J_Mat_this_pix)
- # and store the full matrix
- J_Mat[x, y, :, :] = J_Mat_this_pix
- # note - more efficient to calculate determinant from matrix in
- # temporary variable rather than directly from values in J_Mat as
- # the matrix values for a pixel are not stored contiguously in
- # memory in J_Mat
- return J, J_Mat
-
-def dispDefField(def_field, spacing=5, plot_type='grid'):
- """
- function to display a deformation field
-
- SYNTAX:
- dispDefField(def_field)
- dispDefField(def_field, spacing)
- dispDefField(def_field, spacing, plot_type)
-
- INPUTS:
- def_field - the deformation field as a 3D array
- spacing - the spacing of the grids/arrows in pixels
- default = 5
- plot_type - the type of plot to use, 'grid' or 'arrows'
- default = 'grid'
- """
- # calculate coordinates for plotting grid-lines/arrows
- x_inds = np.arange(0, def_field.shape[0], spacing)
- y_inds = np.arange(0, def_field.shape[1], spacing)
-
- # check if plotting grids
- if plot_type == 'grid':
-
- # plot vertical lines
- plt.plot(def_field[x_inds, :, 0].T, def_field[x_inds, :, 1].T, 'k', linewidth=0.5)
- #plot horizontal lines
- plt.plot(def_field[:, y_inds, 0], def_field[:, y_inds, 1], 'k', linewidth=0.5)
-
- else:
-
- if plot_type == 'arrows':
-
- # calculate grids of coords for plotting
- [Xs, Ys] = np.meshgrid(x_inds, y_inds, indexing='ij')
- # calculate displacement field for plotting
- disp_field_x = def_field[Xs, Ys, 0] - Xs
- disp_field_y = def_field[Xs, Ys, 1] - Ys
-
- #plot displacements using quiver function
- plt.quiver(Xs, Ys, disp_field_x, disp_field_y, angles='xy', scale_units='xy', scale=1)
-
- else:
- print('Display type must be grid or arrows')
-
- plt.axis('image')
-
-
\ No newline at end of file