Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,5 @@ tutorials/dataset-PixelPandemonium/*
#_*.py
dicom_select
examples
slicerio_data
*.nrrd
775 changes: 775 additions & 0 deletions TPTBox/core/internal/slicer_nrrd.py

Large diffs are not rendered by default.

175 changes: 6 additions & 169 deletions TPTBox/core/nii_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ def load(cls, path: Image_Reference, seg, c_val=None)-> Self:
return nii

@classmethod
def load_nrrd(cls, path: str | Path, seg: bool):
def load_nrrd(cls, path: str | Path, seg: bool,verbos=False):
"""
Load an NRRD file and convert it into a Nifti1Image object.

Expand All @@ -272,76 +272,8 @@ def load_nrrd(cls, path: str | Path, seg: bool):
import nrrd # pip install pynrrd, if pynrrd is not already installed
except ModuleNotFoundError:
raise ImportError("The `pynrrd` package is required but not installed. Install it with `pip install pynrrd`.") from None
_nrrd = nrrd.read(path)
data = _nrrd[0]

header = dict(_nrrd[1])
#print(data.shape, header)
#print(header)
# Example print out: OrderedDict([
# ('type', 'short'), ('dimension', 3), ('space', 'left-posterior-superior'),
# ('sizes', array([512, 512, 1637])),
# ('space directions', array([[0.9765625, 0. , 0. ],
# [0. , 0.9765625, 0. ],
# [0. , 0. , 0.6997555]])),
# ('kinds', ['domain', 'domain', 'domain']), ('endian', 'little'),
# ('encoding', 'gzip'),
# ('space origin', array([-249.51171875, -392.51171875, 119.7]))])

# Construct the affine transformation matrix
#print(header)
try:
#print(header['space directions'])
#print(header['space origin'])
space_directions = np.array(header['space directions'])
space_origin = np.array(header['space origin'])
#space_directions = space_directions[~np.isnan(space_directions).any(axis=1)] #Filter NAN
n = header['dimension']
#print(data.shape)
if space_directions.shape != (n, n):
space_directions = space_directions[~np.isnan(space_directions).all(axis=1)]
m = len(space_directions[0])
if m != n:
n=m
data = data.sum(axis=0)
space_directions = space_directions.T
if space_directions.shape != (n, n):
raise ValueError(f"Expected 'space directions' to be a nxn matrix. n = {n} is not {space_directions.shape}",space_directions)
if space_origin.shape != (n,):
raise ValueError("Expected 'space origin' to be a n-element vector. n = ", n, "is not",space_origin.shape )
space = header.get("space","left-posterior-superior")
affine = np.eye(n+1) # Initialize 4x4 identity matrix
affine[:n, :n] = space_directions # Set rotation and scaling
affine[:n, n] = space_origin # Set translation
#print(affine,space)
if space =="left-posterior-superior": #LPS (SITK-space)
affine[0] *=-1
affine[1] *=-1
elif space == "right-posterior-superior": #RPS
affine[0] *=-1
elif space == "left-anterior-superior": #LAS
affine[1] *=-1
elif space == "right-anterior-superior": #RAS
pass
else:
raise ValueError(space)
#print(affine)

except KeyError as e:
raise KeyError(f"Missing expected header field: {e}") from None
if len(data.shape) != n:
raise ValueError(f"{len(data.shape)=} diffrent from n = ", n)
ref_orientation = header.get("ref_orientation")
for i in ["ref_orientation","dimension","space directions","space origin""space","type","endian"]:
header.pop(i, None)
for key in list(header.keys()):
if "_Extent" in key:
del header[key]
nii = NII((data,affine,None),seg=seg,info = header)
if ref_orientation is not None:
nii.reorient_(ref_orientation)
return nii

from TPTBox.core.internal.slicer_nrrd import load_slicer_nrrd
return load_slicer_nrrd(path,seg,verbos=verbos)
@classmethod
def load_bids(cls, nii_bids: bids_files.BIDS_FILE):
nifty = None
Expand Down Expand Up @@ -1662,67 +1594,6 @@ def truncate_labels_beyond_reference(
):
return self.truncate_labels_beyond_reference_(idx,not_beyond,fill,axis,inclusion)

def infect_conv(self: NII, reference_mask: NII, max_iters=100,inplace=False):
"""
Expands labels from self_mask into regions of reference_mask == 1 via breadth-first diffusion.

Args:
self_mask (ndarray): (H, W) or (D, H, W) integer-labeled array.
reference_mask (ndarray): Binary array of same shape as self_mask.
max_iters (int): Maximum number of propagation steps.

Returns:
ndarray: Updated label mask.
"""
from scipy.ndimage import convolve
crop = reference_mask.compute_crop(0,1)
self.assert_affine(reference_mask)
self_mask = self.apply_crop(crop).get_seg_array().copy()
ref_mask = np.clip(reference_mask.apply_crop(crop).get_seg_array(), 0, 1)

ndim = len(self_mask.shape)

# Define neighborhood kernel
if ndim == 2:
kernel = np.array([[0, 1, 0],
[1, 0, 1],
[0, 1, 0]], dtype=np.uint8)
elif ndim == 3:
kernel = np.zeros((3, 3, 3), dtype=np.uint8)
kernel[1, 1, 0] = kernel[1, 1, 2] = 1
kernel[1, 0, 1] = kernel[1, 2, 1] = 1
kernel[0, 1, 1] = kernel[2, 1, 1] = 1
else:
raise NotImplementedError("Only 2D or 3D masks are supported.")
try:
from tqdm import tqdm
r = tqdm(range(max_iters),desc="infect")
except Exception:
r = range(max_iters)
for _ in r:
unlabeled = (self_mask == 0) & (ref_mask == 1)
updated = False

for label in np_unique(self_mask):
if label == 0:
continue # skip background

binary_label_mask = (self_mask == label).astype(np.uint8)
neighbor_count = convolve(binary_label_mask, kernel, mode="constant", cval=0)

# Find unlabeled voxels adjacent to current label
new_voxels = (neighbor_count > 0) & unlabeled

if np.any(new_voxels):
self_mask[new_voxels] = label
updated = True

if not updated:
break
org = self.get_seg_array()
org[crop] = self_mask
return self.set_array(org,inplace=inplace)

def infect(self: NII, reference_mask: NII, inplace=False,verbose=True,axis:int|str|None=None):
"""
Expands labels from self_mask into regions of reference_mask == 1 via breadth-first diffusion.
Expand Down Expand Up @@ -1856,8 +1727,7 @@ def clone(self):
@secure_save
def save(self,file:str|Path,make_parents=True,verbose:logging=True, dtype = None):
if make_parents:
Path(file).parent.mkdir(exist_ok=True,parents=True)

Path(file).parent.mkdir(0o771,exist_ok=True,parents=True)
arr = self.get_array() if not self.seg else self.get_seg_array()
if isinstance(arr,np.floating) and self.seg:
self.set_dtype_("smallest_uint")
Expand Down Expand Up @@ -1894,43 +1764,10 @@ def save_nrrd(self:Self, file: str | Path|bids_files.BIDS_FILE,make_parents=True
raise ImportError("The `pynrrd` package is required but not installed. Install it with `pip install pynrrd`." ) from None
if isinstance(file, bids_files.BIDS_FILE):
file = file.file['nrrd']
if not str(file).endswith(".nrrd"):
file = str(file)+".nrrd"
if make_parents:
Path(file).parent.mkdir(exist_ok=True,parents=True)
_header = {}
#if self.orientation not in [("L","P","S")]: #,("R","P","S"),("R","A","S"),("L","A","S")
# _header = {"ref_orientation": "".join(self.orientation)}
# self = self.reorient(("P","L","S")) # Convert to LAS-SimpleITK # noqa: PLW0642
# Slicer only allows LPS and flip of L and P axis
ori = "left-posterior-superior"# "-".join([_dirction_name_itksnap_dict[i] for i in self.orientation])

data = self.get_array()
affine = self.affine.copy()
affine[0] *=-1
affine[1] *=-1
# Extract header fields from the affine matrix
n = affine.shape[0] - 1
space_directions = affine[:n, :n]
space_origin = affine[:n, n]
_header["kinds"]= ['domain'] * n if "kinds" not in self.info else self.info["kinds"]
header = {
'type': str(data.dtype),
'dimension': n,
'space': ori,
'sizes': data.shape,#(data.shape[1],data.shape[0],data.shape[2]),
'space directions': space_directions.tolist(),
'space origin': space_origin,
'endian': 'little',
'encoding': 'gzip',
**_header,**self.info
}
header.pop("Segmentation_ConversionParameters", None)
# Save NRRD file
from TPTBox.core.internal.slicer_nrrd import save_slicer_nrrd
save_slicer_nrrd(self,file,make_parents=make_parents,verbose=verbose,**args)

log.print(f"Saveing {file}",verbose=verbose,ltype=Log_Type.SAVE,end='\r')
nrrd.write(str(file), data=data, header=header,**args) # nrrd only acepts strings...
log.print(f"Save {file} as {header['type']}",verbose=verbose,ltype=Log_Type.SAVE)

def __str__(self) -> str:
return f"{super().__str__()}, seg={self.seg}" # type: ignore
Expand Down
24 changes: 12 additions & 12 deletions TPTBox/segmentation/VibeSeg/inference_nnunet.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,15 +129,15 @@ def run_inference_on_file(
og_nii = input_nii[0].copy()

try:
zoom_old = ds_info.get("spacing")
if idx not in [527] and zoom_old is not None:
zoom_old = zoom_old[::-1]
zoom = ds_info.get("spacing")
if idx not in [527] and zoom is not None:
zoom = zoom[::-1]

zoom_old = ds_info.get("resolution_range", zoom_old)
if zoom_old is None:
zoom = plans_info["configurations"]["3d_fullres"]["spacing"]
if all(zoom[0] == z for z in zoom):
zoom_old = zoom
zoom = ds_info.get("resolution_range", zoom)
if zoom is None:
zoom_ = plans_info["configurations"]["3d_fullres"]["spacing"]
if all(zoom[0] == z for z in zoom_):
zoom = zoom_
# order = plans_info["transpose_backward"]
## order2 = plans_info["transpose_forward"]
# zoom = [zoom[order[0]], zoom[order[1]], zoom[order[2]]][::-1]
Expand All @@ -150,7 +150,7 @@ def run_inference_on_file(

# zoom_old = zoom_old[::-1]

zoom_old = [float(z) for z in zoom_old]
zoom = [float(z) for z in zoom]
except Exception:
pass
assert len(ds_info["channel_names"]) == len(input_nii), (
Expand All @@ -163,9 +163,9 @@ def run_inference_on_file(
print("orientation", orientation, f"{orientation_ref=}") if verbose else None
input_nii = [i.reorient(orientation) for i in input_nii]

if zoom_old is not None:
print("rescale", zoom, f"{zoom_old=}") if verbose else None
input_nii = [i.rescale_(zoom_old, mode=mode) for i in input_nii]
if zoom is not None:
print("rescale", input_nii[0].orientation, f"{zoom=}") if verbose else None
input_nii = [i.rescale_(zoom, mode=mode) for i in input_nii]
print(input_nii)
print("squash to float16") if verbose else None
input_nii = [squash_so_it_fits_in_float16(i) for i in input_nii]
Expand Down
48 changes: 42 additions & 6 deletions TPTBox/spine/snapshot2D/snapshot_modular.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
v_idx2name,
v_idx_order,
)
from TPTBox.mesh3D.mesh_colors import _color_map_in_row # vert_color_map
from TPTBox.mesh3D.mesh_colors import _color_map_in_row, get_color_by_label

NII.suppress_dtype_change_printout_in_set_array(True)
"""
Expand Down Expand Up @@ -430,6 +430,8 @@ def make_isotropic2dpluscolor(arr3d, zms2d, msk=False):

def get_contrasting_stroke_color(rgb):
# Convert RGBA to RGB if necessary
if isinstance(rgb, int):
rgb = list(get_color_by_label(rgb).rgb / 255.0)
if len(rgb) == 4:
rgb = rgb[:3]
luminance = 0.299 * rgb[0] + 0.587 * rgb[1] + 0.114 * rgb[2]
Expand Down Expand Up @@ -505,7 +507,15 @@ def plot_sag_centroids(
elif len(x) == 4:
v = (np.array(ctd[x[0], x[1]]) + np.array(ctd[x[2], x[3]])) / 2

axs.add_patch(FancyArrow(v[1] * zms[1], v[0] * zms[0], c, d, color=cmap(color - 1 % LABEL_MAX % cmap.N)))
axs.add_patch(
FancyArrow(
v[1] * zms[1],
v[0] * zms[0],
c,
d,
color=cmap(color - 1 % LABEL_MAX % cmap.N),
)
)
if "text_sag" in ctd.info:
for color, x in ctd.info["text_sag"]:
backgroundcolor = get_contrasting_stroke_color(color)
Expand Down Expand Up @@ -578,15 +588,29 @@ def plot_cor_centroids(
)
except Exception:
pass

if "line_segments_cor" in ctd.info:
for color, x, (c, d) in ctd.info["line_segments_cor"]:
# if isinstance(color, int):
# color = list(get_color_by_label(color).rgb / 255.0)

if len(x) == 2:
v = ctd[x]
elif len(x) == 4:
v = (np.array(ctd[x[0], x[1]]) + np.array(ctd[x[2], x[3]])) / 2
axs.add_patch(FancyArrow(v[2] * zms[2], v[0] * zms[0], c, d, color=cmap(color - 1 % LABEL_MAX % cmap.N)))
axs.add_patch(
FancyArrow(
v[2] * zms[2],
v[0] * zms[0],
c,
d,
color=cmap(color - 1 % LABEL_MAX % cmap.N),
)
)
if "text_cor" in ctd.info:
for color, x in ctd.info["text_cor"]:
if isinstance(color, int):
color = list(get_color_by_label(color).rgb / 255.0) # noqa: PLW2901
backgroundcolor = get_contrasting_stroke_color(color)
if isinstance(color, Sequence) and len(color) == 2:
color, curve_location = color # noqa: PLW2901
Expand Down Expand Up @@ -1038,9 +1062,21 @@ def create_snapshot( # noqa: C901
)

fig, axs = create_figure(dpi, img_list, has_title=frame.title is None)
for ax, (img, msk, ctd, wdw, is_sag, alpha, cmap, zms, curve_location, poi_labelmap, hide_centroid_labels, title, frame) in zip(
axs, frame_list
):
for ax, (
img,
msk,
ctd,
wdw,
is_sag,
alpha,
cmap,
zms,
curve_location,
poi_labelmap,
hide_centroid_labels,
title,
frame,
) in zip(axs, frame_list):
if title is not None:
ax.set_title(title, fontdict={"fontsize": 18, "color": "black"}, loc="center")
if img.ndim == 3:
Expand Down
2 changes: 1 addition & 1 deletion TPTBox/spine/spinestats/distances.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def _compute_distance(
all_pois_computed = True
if not all_pois_computed:
poi = calc_poi_from_subreg_vert(vert, subreg, extend_to=poi, subreg_id=[l1, l2])
poi.info[key] = poi.calculate_distances_poi_two_locations(l1, l2, keep_zoom=False)
poi.info[key] = poi.calculate_distances_poi_across_regions(l1, l2, keep_zoom=False)
return poi


Expand Down
Loading