From fcc09ad24f390422b6c785f9c5f693bfe3b08085 Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Wed, 19 Feb 2025 13:24:25 +0100 Subject: [PATCH 1/9] remove boundingbox dependcy from point module --- siibra/locations/point.py | 11 +---------- siibra/locations/pointcloud.py | 2 ++ 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/siibra/locations/point.py b/siibra/locations/point.py index ff04fd1ce..2ddd7caa7 100644 --- a/siibra/locations/point.py +++ b/siibra/locations/point.py @@ -14,7 +14,7 @@ # limitations under the License. """Singular coordinate defined on a space, possibly with an uncertainty.""" -from . import location, boundingbox, pointcloud +from . import location, pointcloud from ..commons import logger from ..retrieval.requests import HttpRequest @@ -115,8 +115,6 @@ def homogeneous(self): def intersection(self, other: location.Location) -> "Point": if isinstance(other, Point): return self if self == other else None - elif isinstance(other, pointcloud.PointCloud): - return self if self in other else None else: return self if other.intersection(self) else None @@ -305,13 +303,6 @@ def __getitem__(self, index): assert 0 <= index < 3 return self.coordinate[index] - @property - def boundingbox(self): - w = max(self.sigma or 0, 1e-6) # at least a micrometer - return boundingbox.BoundingBox( - self - w, self + w, self.space, self.sigma - ) - def bigbrain_section(self): """ Estimate the histological section number of BigBrain diff --git a/siibra/locations/pointcloud.py b/siibra/locations/pointcloud.py index 37cedfbae..c109cf374 100644 --- a/siibra/locations/pointcloud.py +++ b/siibra/locations/pointcloud.py @@ -122,6 +122,8 @@ def intersection(self, other: location.Location): if isinstance(other, PointCloud): intersecting_points = [p for p in self if p.coordinate in other.coordinates] + elif isinstance(other, point.Point): + return other if other in self else None else: intersecting_points = [p for p in self if p.intersects(other)] if len(intersecting_points) == 0: From 8bba09e93b972e71cf859467b859f01bfb1ac0dc Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Wed, 19 Feb 2025 17:06:18 +0100 Subject: [PATCH 2/9] WIP: make patch sampling a live query --- siibra/__init__.py | 4 +- siibra/experimental/__init__.py | 19 -- siibra/experimental/contour.py | 61 ----- .../experimental/cortical_profile_sampler.py | 57 ---- siibra/experimental/patch.py | 98 ------- siibra/features/image/__init__.py | 6 +- siibra/features/image/sections.py | 48 +++- siibra/features/image/volume_of_interest.py | 7 - siibra/livequeries/bigbrain.py | 258 ++++++++++++++---- siibra/locations/__init__.py | 3 +- siibra/locations/boundingbox.py | 2 +- .../plane3d.py => locations/experimental.py} | 108 +++++++- siibra/locations/pointcloud.py | 43 +++ 13 files changed, 408 insertions(+), 306 deletions(-) delete mode 100644 siibra/experimental/__init__.py delete mode 100644 siibra/experimental/contour.py delete mode 100644 siibra/experimental/cortical_profile_sampler.py delete mode 100644 siibra/experimental/patch.py rename siibra/{experimental/plane3d.py => locations/experimental.py} (73%) diff --git a/siibra/__init__.py b/siibra/__init__.py index a43a0782b..053add948 100644 --- a/siibra/__init__.py +++ b/siibra/__init__.py @@ -36,10 +36,9 @@ from .retrieval.cache import Warmup, WarmupLevel from . import configuration -from . import experimental from .configuration import factory from . import features, livequeries -from siibra.locations import Point, PointCloud +from siibra.locations import Point, PointCloud, Plane, BoundingBox import os as _os logger.info(f"Version: {__version__}") @@ -151,6 +150,7 @@ def __dir__(): "MapType", "Point", "PointCloud", + "BoundingBox", "QUIET", "VERBOSE", "fetch_ebrains_token", diff --git a/siibra/experimental/__init__.py b/siibra/experimental/__init__.py deleted file mode 100644 index 488da89d1..000000000 --- a/siibra/experimental/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2018-2025 -# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .plane3d import Plane3D -from .contour import Contour -from .cortical_profile_sampler import CorticalProfileSampler -from .patch import Patch diff --git a/siibra/experimental/contour.py b/siibra/experimental/contour.py deleted file mode 100644 index 36c636eb5..000000000 --- a/siibra/experimental/contour.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2018-2025 -# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ..locations import point, pointcloud, boundingbox - -import numpy as np - - -class Contour(pointcloud.PointCloud): - """ - A PointCloud that represents a contour line. - The only difference is that the point order is relevant, - and consecutive points are thought as being connected by an edge. - - In fact, PointCloud assumes order as well, but no connections between points. - """ - - def __init__(self, coordinates, space=None, sigma_mm=0, labels: list = None): - pointcloud.PointCloud.__init__(self, coordinates, space, sigma_mm, labels) - - def crop(self, voi: boundingbox.BoundingBox): - """ - Crop the contour with a volume of interest. - Since the contour might be split from the cropping, - returns a set of contour segments. - """ - segments = [] - - # set the contour point labels to a linear numbering - # so we can use them after the intersection to detect splits. - old_labels = self.labels - self.labels = list(range(len(self))) - cropped = self.intersection(voi) - - if cropped is not None and not isinstance(cropped, point.Point): - assert isinstance(cropped, pointcloud.PointCloud) - # Identify contour splits are by discontinuouities ("jumps") - # of their labels, which denote positions in the original contour - jumps = np.diff([self.labels.index(lb) for lb in cropped.labels]) - splits = [0] + list(np.where(jumps > 1)[0] + 1) + [len(cropped)] - for i, j in zip(splits[:-1], splits[1:]): - segments.append( - self.__class__(cropped.coordinates[i:j, :], space=cropped.space) - ) - - # reset labels of the input contour points. - self.labels = old_labels - - return segments diff --git a/siibra/experimental/cortical_profile_sampler.py b/siibra/experimental/cortical_profile_sampler.py deleted file mode 100644 index a9856f504..000000000 --- a/siibra/experimental/cortical_profile_sampler.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2018-2025 -# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import contour -from ..locations import point -from ..core import parcellation - -import numpy as np - - -class CorticalProfileSampler: - """Samples cortical profiles from the cortical layer maps.""" - - def __init__(self): - self.layermap = parcellation.Parcellation.get_instance( - "cortical layers" - ).get_map(space="bigbrain", maptype="labelled") - - def query(self, query_point: point.Point): - q = query_point.warp(self.layermap.space) - smallest_dist = np.inf - best_match = None - for layername in self.layermap.regions: - vertices = self.layermap.fetch(region=layername, format="mesh")["verts"] - dists = np.sqrt(((vertices - q.coordinate) ** 2).sum(1)) - best = np.argmin(dists) - if dists[best] < smallest_dist: - best_match = (layername, best) - smallest_dist = dists[best] - - best_vertex = best_match[1] - hemisphere = "left" if "left" in best_match[0] else "right" - print(f"Best match is vertex #{best_match[1]} in {best_match[0]}.") - - profile = [ - (_, self.layermap.fetch(region=_, format="mesh")["verts"][best_vertex]) - for _ in self.layermap.regions - if hemisphere in _ - ] - - return contour.Contour( - [p[1] for p in profile], - space=self.layermap.space, - labels=[p[0] for p in profile], - ) diff --git a/siibra/experimental/patch.py b/siibra/experimental/patch.py deleted file mode 100644 index 47e37bd04..000000000 --- a/siibra/experimental/patch.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2018-2025 -# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ..volumes import volume -from ..locations import pointcloud, boundingbox -from ..commons import translation_matrix, y_rotation_matrix - -import numpy as np -import math -from nilearn import image - - -class Patch: - - def __init__(self, corners: pointcloud.PointCloud): - """Construct a patch in physical coordinates. - As of now, only patches aligned in the y plane of the physical space - are supported.""" - # TODO: need to ensure that the points are planar, if more than 3 - assert len(corners) == 4 - assert len(np.unique(corners.coordinates[:, 1])) == 1 - self.corners = corners - - @property - def space(self): - return self.corners.space - - def flip(self): - """Flips the patch. """ - self.corners._coordinates = self.corners.coordinates[[2, 3, 0, 1]] - - def extract_volume(self, image_volume: volume.Volume, resolution_mm: float): - """ - fetches image data in a planar patch. - TODO The current implementation only covers patches which are strictly - define in the y plane. A future implementation should accept arbitrary - oriented patches.accept arbitrary oriented patches. - """ - assert image_volume.space == self.space - - # Extend the 2D patch into a 3D structure - # this is only valid if the patch plane lies within the image canvas. - canvas = image_volume.get_boundingbox() - assert canvas.minpoint[1] <= self.corners.coordinates[0, 1] - assert canvas.maxpoint[1] >= self.corners.coordinates[0, 1] - XYZ = self.corners.coordinates - voi = boundingbox.BoundingBox( - XYZ.min(0)[:3], XYZ.max(0)[:3], space=image_volume.space - ) - # enforce the patch to have the same y dimensions - voi.minpoint[1] = canvas.minpoint[1] - voi.maxpoint[1] = canvas.maxpoint[1] - patch = image_volume.fetch(voi=voi, resolution_mm=resolution_mm) - assert patch is not None - - # patch rotation defined in physical space - vx, vy, vz = XYZ[1] - XYZ[0] - alpha = -math.atan2(-vz, -vx) - cx, cy, cz = XYZ.mean(0) - rot_phys = np.linalg.multi_dot( - [ - translation_matrix(cx, cy, cz), - y_rotation_matrix(alpha), - translation_matrix(-cx, -cy, -cz), - ] - ) - - # rotate the patch in physical space - affine_rot = np.dot(rot_phys, patch.affine) - - # crop in the rotated space - pixels = ( - np.dot(np.linalg.inv(affine_rot), self.corners.homogeneous.T) - .astype("int") - .T - ) - # keep a pixel distance to avoid black border pixels - xmin, ymin, zmin = pixels.min(0)[:3] + 1 - xmax, ymax, zmax = pixels.max(0)[:3] - 1 - h, w = xmax - xmin, zmax - zmin - affine = np.dot(affine_rot, translation_matrix(xmin, 0, zmin)) - return volume.from_nifti( - image.resample_img(patch, target_affine=affine, target_shape=[h, 1, w]), - space=image_volume.space, - name=f"Rotated patch with corner points {self.corners} sampled from {image_volume.name}", - ) diff --git a/siibra/features/image/__init__.py b/siibra/features/image/__init__.py index b48b4aa3e..9463a0c70 100644 --- a/siibra/features/image/__init__.py +++ b/siibra/features/image/__init__.py @@ -22,6 +22,8 @@ XPCTVolumeOfInterest, LSFMVolumeOfInterest, DTIVolumeOfInterest - # SegmentedVolumeOfInterest ) -from .sections import CellbodyStainedSection +from .sections import ( + CellbodyStainedSection, + BigBrain1MicronPatch +) diff --git a/siibra/features/image/sections.py b/siibra/features/image/sections.py index 9a4f06e87..18e9cfcdb 100644 --- a/siibra/features/image/sections.py +++ b/siibra/features/image/sections.py @@ -15,12 +15,56 @@ """Multimodal data features in 2D section.""" from . import image +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ...locations import Patch + from ...features.anchor import AnatomicalAnchor class CellbodyStainedSection( image.Image, - configuration_folder='features/images/sections/cellbody', - category="cellular" + configuration_folder="features/images/sections/cellbody", + category="cellular", ): def __init__(self, **kwargs): image.Image.__init__(self, **kwargs, modality="cell body staining") + + +class BigBrain1MicronPatch(image.Image, category="cellular"): + def __init__( + self, + patch: "Patch", + section: CellbodyStainedSection, + relevance: float, + anchor: "AnatomicalAnchor", + **kwargs + ): + self._patch = patch + self._section = section + self.relevance = relevance + image.Image.__init__( + self, + name=f"Cortical patch in {section.name}", + modality=section.modality, + space_spec=section._space_spec, + providers=list(section._providers.values()), + region=None, + datasets=section.datasets, + bbox=patch.corners.boundingbox, + id=None + ) + self._anchor_cached = anchor + + def fetch(self, flip=False, **kwargs): + assert "voi" not in kwargs + res = kwargs.get("resolution_mm", -1) + mb = kwargs.get("max_bytes") + if flip: + return self._patch.flip().extract_volume( + self._section, resolution_mm=res, max_bytes=mb + ) + else: + return self._patch.extract_volume( + self._section, resolution_mm=res, max_bytes=mb + ) diff --git a/siibra/features/image/volume_of_interest.py b/siibra/features/image/volume_of_interest.py index a9cffed1b..921b20184 100644 --- a/siibra/features/image/volume_of_interest.py +++ b/siibra/features/image/volume_of_interest.py @@ -79,10 +79,3 @@ class LSFMVolumeOfInterest( def __init__(self, modality, **kwargs): image.Image.__init__(self, **kwargs, modality=modality) -# class SegmentedVolumeOfInterest( -# image.Image, -# configuration_folder="features/images/vois/segmentation", -# category="segmentation" -# ): -# def __init__(self, **kwargs): -# image.Image.__init__(self, **kwargs, modality="segmentation") diff --git a/siibra/livequeries/bigbrain.py b/siibra/livequeries/bigbrain.py index f699260d6..62d1f4c71 100644 --- a/siibra/livequeries/bigbrain.py +++ b/siibra/livequeries/bigbrain.py @@ -16,17 +16,26 @@ from . import query -from ..features.tabular import bigbrain_intensity_profile, layerwise_bigbrain_intensities +from ..features.tabular import ( + bigbrain_intensity_profile, + layerwise_bigbrain_intensities, +) +from ..features.image import volume_of_interest, CellbodyStainedSection +from ..features.image.sections import BigBrain1MicronPatch from ..features import anchor as _anchor -from ..commons import logger -from ..locations import point, pointcloud, location +from ..commons import logger, siibra_tqdm +from ..locations import point, pointcloud, location, experimental from ..core import structure +from ..core.concept import get_registry from ..retrieval import requests, cache from ..retrieval.datasets import GenericDataset +from ..volumes import Volume + import numpy as np from typing import List from os import path +from scipy.spatial import KDTree class WagstylProfileLoader: @@ -41,23 +50,23 @@ class WagstylProfileLoader: DATASET = GenericDataset( name="HIBALL workshop on cortical layers", contributors=[ - 'Konrad Wagstyl', - 'Stéphanie Larocque', - 'Guillem Cucurull', - 'Claude Lepage', - 'Joseph Paul Cohen', - 'Sebastian Bludau', - 'Nicola Palomero-Gallagher', - 'Lindsay B. Lewis', - 'Thomas Funck', - 'Hannah Spitzer', - 'Timo Dickscheid', - 'Paul C. Fletcher', - 'Adriana Romero', - 'Karl Zilles', - 'Katrin Amunts', - 'Yoshua Bengio', - 'Alan C. Evans' + "Konrad Wagstyl", + "Stéphanie Larocque", + "Guillem Cucurull", + "Claude Lepage", + "Joseph Paul Cohen", + "Sebastian Bludau", + "Nicola Palomero-Gallagher", + "Lindsay B. Lewis", + "Thomas Funck", + "Hannah Spitzer", + "Timo Dickscheid", + "Paul C. Fletcher", + "Adriana Romero", + "Karl Zilles", + "Katrin Amunts", + "Yoshua Bengio", + "Alan C. Evans", ], url="https://github.com/kwagstyl/cortical_layers_tutorial/", description="Cortical profiles of BigBrain staining intensities computed by Konrad Wagstyl, " @@ -67,7 +76,7 @@ class WagstylProfileLoader: "http://dx.doi.org/10.1371/journal.pbio.3000678." "The data is taken from the tutorial at " "https://github.com/kwagstyl/cortical_layers_tutorial. Each vertex is " - "assigned to the regional map when queried." + "assigned to the regional map when queried.", ) def __init__(self): @@ -76,15 +85,22 @@ def __init__(self): @property def profile_labels(self): - return np.arange(0., 1., 1. / self._profiles.shape[1]) + return np.arange(0.0, 1.0, 1.0 / self._profiles.shape[1]) @classmethod def _load(cls): # read thicknesses, in mm, and normalize by their last column which is the total thickness - thickness = requests.HttpRequest(f"{cls.REPO}/{cls.THICKNESSES_FILE_LEFT}").data.T - total_thickness = thickness[:, :-1].sum(1) # last column is the computed total thickness + thickness = requests.HttpRequest( + f"{cls.REPO}/{cls.THICKNESSES_FILE_LEFT}" + ).data.T + total_thickness = thickness[:, :-1].sum( + 1 + ) # last column is the computed total thickness valid = np.where(total_thickness > 0)[0] - cls._boundary_depths = np.c_[np.zeros_like(valid), (thickness[valid, :] / total_thickness[valid, None]).cumsum(1)] + cls._boundary_depths = np.c_[ + np.zeros_like(valid), + (thickness[valid, :] / total_thickness[valid, None]).cumsum(1), + ] cls._boundary_depths[:, -1] = 1 # account for float calculation errors # find profiles with valid thickness @@ -112,15 +128,23 @@ def __len__(self): cache.Warmup.register_warmup_fn()(lambda: WagstylProfileLoader._load()) -class BigBrainProfileQuery(query.LiveQuery, args=[], FeatureType=bigbrain_intensity_profile.BigBrainIntensityProfile): +class BigBrainProfileQuery( + query.LiveQuery, + args=[], + FeatureType=bigbrain_intensity_profile.BigBrainIntensityProfile, +): def __init__(self): query.LiveQuery.__init__(self) - def query(self, concept: structure.BrainStructure, **kwargs) -> List[bigbrain_intensity_profile.BigBrainIntensityProfile]: + def query( + self, concept: structure.BrainStructure, **kwargs + ) -> List[bigbrain_intensity_profile.BigBrainIntensityProfile]: loader = WagstylProfileLoader() - mesh_vertices = pointcloud.PointCloud(loader._vertices, space='bigbrain') - matched = concept.intersection(mesh_vertices) # returns a reduced PointCloud with og indices as labels + mesh_vertices = pointcloud.PointCloud(loader._vertices, space="bigbrain") + matched = concept.intersection( + mesh_vertices + ) # returns a reduced PointCloud with og indices as labels if matched is None: return [] if isinstance(matched, point.Point): @@ -134,21 +158,21 @@ def query(self, concept: structure.BrainStructure, **kwargs) -> List[bigbrain_in features = [] for i in indices: anchor = _anchor.AnatomicalAnchor( - location=point.Point(loader._vertices[i], space='bigbrain'), + location=point.Point(loader._vertices[i], space="bigbrain"), region=str(concept), - species='Homo sapiens' + species="Homo sapiens", ) prof = bigbrain_intensity_profile.BigBrainIntensityProfile( anchor=anchor, depths=loader.profile_labels, values=loader._profiles[i], - boundaries=loader._boundary_depths[i] + boundaries=loader._boundary_depths[i], ) prof.anchor._assignments[concept] = _anchor.AnatomicalAssignment( query_structure=concept, assigned_structure=concept, qualification=_anchor.Qualification.CONTAINED, - explanation=f"Surface vertex of BigBrain cortical profile was filtered using {concept}" + explanation=f"Surface vertex of BigBrain cortical profile was filtered using {concept}", ) prof.datasets = [WagstylProfileLoader.DATASET] features.append(prof) @@ -156,16 +180,24 @@ def query(self, concept: structure.BrainStructure, **kwargs) -> List[bigbrain_in return features -class LayerwiseBigBrainIntensityQuery(query.LiveQuery, args=[], FeatureType=layerwise_bigbrain_intensities.LayerwiseBigBrainIntensities): +class LayerwiseBigBrainIntensityQuery( + query.LiveQuery, + args=[], + FeatureType=layerwise_bigbrain_intensities.LayerwiseBigBrainIntensities, +): def __init__(self): query.LiveQuery.__init__(self) - def query(self, concept: structure.BrainStructure, **kwargs) -> List[layerwise_bigbrain_intensities.LayerwiseBigBrainIntensities]: + def query( + self, concept: structure.BrainStructure, **kwargs + ) -> List[layerwise_bigbrain_intensities.LayerwiseBigBrainIntensities]: loader = WagstylProfileLoader() - mesh_vertices = pointcloud.PointCloud(loader._vertices, space='bigbrain') - matched = concept.intersection(mesh_vertices) # returns a reduced PointCloud with og indices as labels if the concept is a region + mesh_vertices = pointcloud.PointCloud(loader._vertices, space="bigbrain") + matched = concept.intersection( + mesh_vertices + ) # returns a reduced PointCloud with og indices as labels if the concept is a region if matched is None: return [] if isinstance(matched, point.Point): @@ -180,27 +212,161 @@ def query(self, concept: structure.BrainStructure, **kwargs) -> List[layerwise_b # compute array of layer labels for all coefficients in profiles_left N = matched_profiles.shape[1] prange = np.arange(N) - layer_labels = 7 - np.array([ - [np.array([[(prange < T) * 1] for i, T in enumerate((b * N).astype('int'))]).squeeze().sum(0)] - for b in boundary_depths - ]).reshape((-1, 200)) + layer_labels = 7 - np.array( + [ + [ + np.array( + [ + [(prange < T) * 1] + for i, T in enumerate((b * N).astype("int")) + ] + ) + .squeeze() + .sum(0) + ] + for b in boundary_depths + ] + ).reshape((-1, 200)) anchor = _anchor.AnatomicalAnchor( - location=pointcloud.PointCloud(loader._vertices[indices, :], space='bigbrain'), + location=pointcloud.PointCloud( + loader._vertices[indices, :], space="bigbrain" + ), region=str(concept), - species='Homo sapiens' + species="Homo sapiens", ) result = layerwise_bigbrain_intensities.LayerwiseBigBrainIntensities( anchor=anchor, - means=[matched_profiles[layer_labels == layer].mean() for layer in range(1, 7)], - stds=[matched_profiles[layer_labels == layer].std() for layer in range(1, 7)], + means=[ + matched_profiles[layer_labels == layer].mean() for layer in range(1, 7) + ], + stds=[ + matched_profiles[layer_labels == layer].std() for layer in range(1, 7) + ], ) result.anchor._assignments[concept] = _anchor.AnatomicalAssignment( query_structure=concept, assigned_structure=concept, qualification=_anchor.Qualification.CONTAINED, - explanation=f"Surface vertices of BigBrain cortical profiles were filtered using {concept}" + explanation=f"Surface vertices of BigBrain cortical profiles were filtered using {concept}", ) result.datasets = [WagstylProfileLoader.DATASET] return [result] + + +class BigBrain1MicronPatchQuery( + query.LiveQuery, args=[], FeatureType=BigBrain1MicronPatch +): + + def __init__(self): + self.layermap = get_registry("Map").get("cortical layers bigbrain") + query.LiveQuery.__init__(self) + + def _get_closest_profile(self, query_point: point.Point, hemisphere: str): + q = query_point.warp(self.layermap.space) + smallest_dist = np.inf + best_match = None + for layername in self.layermap.regions: + if hemisphere not in layername: + continue + vertices = self.layermap.fetch(region=layername, format="mesh")["verts"] + dists = np.sqrt(((vertices - q.coordinate) ** 2).sum(1)) + best = np.argmin(dists) + if dists[best] < smallest_dist: + best_match = (layername, best) + smallest_dist = dists[best] + + best_vertex = best_match[1] + logger.debug(f"Best match is vertex #{best_match[1]} in {best_match[0]}.") + + profile = [ + (_, self.layermap.fetch(region=_, format="mesh")["verts"][best_vertex]) + for _ in self.layermap.regions + if hemisphere in _ + ] + + return pointcloud.Contour( + [p[1] for p in profile], + space=self.layermap.space, + labels=[p[0] for p in profile], + ) + + def query( + self, concept: structure.BrainStructure, **kwargs + ) -> List[BigBrain1MicronPatch]: + + if not isinstance(concept, Volume): + logger.warning( + "Querying BigBrain1MicronPatch features requires to " + "query with an image volume." + ) + return [] + + features = [] + sections = [ + s + for s in CellbodyStainedSection._get_instances() + if s.get_boundingbox().intersects(concept) + ] + if not sections: + return [] + lower_threshold = kwargs.get("lower_threshold", 0) + bb_bbox = concept.get_boundingbox().warp('bigbrain') + + for hemisphere in ["left", "right"]: + print("considering hemisphere", hemisphere) + l4 = self.layermap.parcellation.get_region("4 " + hemisphere) + l4mesh = self.layermap.fetch(l4, format="mesh") + layerverts = { + n: self.layermap.fetch(region=n, format="mesh")["verts"] + for n in self.layermap.regions if hemisphere in n + } + assert l4.name in layerverts + l4verts = pointcloud.PointCloud(layerverts[l4.name], "bigbrain") + if not l4verts.boundingbox.intersects(bb_bbox): + continue + print("hemisphere", hemisphere, "good") + vertex_tree = KDTree(layerverts[l4.name]) + for s in siibra_tqdm(sections, unit="sections", desc="Testing sections"): + imgplane = experimental.Plane.from_image(s) + try: + contour = imgplane.intersect_mesh(l4mesh) + except AssertionError: + logger.error(f"Could not intersect with layer 4 mesh: {s.name}") + continue + all_points = pointcloud.from_points(sum(map(list, contour), [])) + all_probs = concept.evaluate_points(all_points) + points = { + pt.coordinate: prob + for pt, prob in zip(all_points, all_probs) + if prob >= lower_threshold + } + _, indices = vertex_tree.query(np.array(list(points.keys()))) + for prob, nnb in zip(points.values(), indices): + prof = pointcloud.Contour( + [ + layerverts[_][nnb] + for _ in self.layermap.regions + if hemisphere in _ + ], + space=self.layermap.space, + ) + patch = imgplane.get_enclosing_patch(prof) + if patch is None: + continue + anchor = _anchor.AnatomicalAnchor( + location=patch.corners, species="Homo sapiens" + ) + anchor._assignments[concept] = _anchor.AnatomicalAssignment( + query_structure=concept, + assigned_structure=s.anchor.volume, + qualification=_anchor.Qualification.CONTAINED + ) + features.append( + BigBrain1MicronPatch( + patch=patch, section=s, relevance=prob, anchor=anchor + ) + ) + + return features diff --git a/siibra/locations/__init__.py b/siibra/locations/__init__.py index 1bf902b88..44c7ddf82 100644 --- a/siibra/locations/__init__.py +++ b/siibra/locations/__init__.py @@ -16,7 +16,8 @@ from .location import Location from .point import Point -from .pointcloud import PointCloud, from_points +from .pointcloud import PointCloud, Contour, from_points +from .experimental import Patch, Plane from .boundingbox import BoundingBox diff --git a/siibra/locations/boundingbox.py b/siibra/locations/boundingbox.py index ae986e18d..ce51773d3 100644 --- a/siibra/locations/boundingbox.py +++ b/siibra/locations/boundingbox.py @@ -85,7 +85,7 @@ def __init__( self.maxpoint[d] = self.minpoint[d] + minsize if self.volume == 0: - logger.warning(f"Zero-volume bounding box from points {point1} and {point2} in {self.space} space.") + logger.debug(f"Zero-volume bounding box from points {point1} and {point2} in {self.space} space.") @property def id(self) -> str: diff --git a/siibra/experimental/plane3d.py b/siibra/locations/experimental.py similarity index 73% rename from siibra/experimental/plane3d.py rename to siibra/locations/experimental.py index ea4677e2e..41d6c9432 100644 --- a/siibra/experimental/plane3d.py +++ b/siibra/locations/experimental.py @@ -13,15 +13,98 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import contour -from . import patch -from ..locations import point, pointcloud from ..volumes import volume +from . import point, pointcloud, boundingbox +from ..commons import translation_matrix, y_rotation_matrix, SIIBRA_MAX_FETCH_SIZE_GIB import numpy as np +import math +from nilearn import image -class Plane3D: +class Patch: + + def __init__(self, corners: pointcloud.PointCloud): + """Construct a patch in physical coordinates. + As of now, only patches aligned in the y plane of the physical space + are supported.""" + # TODO: need to ensure that the points are planar, if more than 3 + assert len(corners) == 4 + assert len(np.unique(corners.coordinates[:, 1])) == 1 + self.corners = corners + + @property + def space(self): + return self.corners.space + + def flip(self): + """Returns a flipped version of the patch.""" + new_corners = self.corners.coordinates.copy()[[2, 3, 0, 1]] + return Patch(pointcloud.PointCloud(new_corners, self.space)) + + def extract_volume( + self, + image_volume: volume.Volume, + resolution_mm: float, + max_bytes: float = SIIBRA_MAX_FETCH_SIZE_GIB, + ): + """ + fetches image data in a planar patch. + TODO The current implementation only covers patches which are strictly + define in the y plane. A future implementation should accept arbitrary + oriented patches.accept arbitrary oriented patches. + """ + assert image_volume.space == self.space + + # Extend the 2D patch into a 3D structure + # this is only valid if the patch plane lies within the image canvas. + canvas = image_volume.get_boundingbox() + assert canvas.minpoint[1] <= self.corners.coordinates[0, 1] + assert canvas.maxpoint[1] >= self.corners.coordinates[0, 1] + XYZ = self.corners.coordinates + voi = boundingbox.BoundingBox( + XYZ.min(0)[:3], XYZ.max(0)[:3], space=image_volume.space + ) + # enforce the patch to have the same y dimensions + voi.minpoint[1] = canvas.minpoint[1] + voi.maxpoint[1] = canvas.maxpoint[1] + patch = image_volume.fetch(voi=voi, resolution_mm=resolution_mm, max_bytes=max_bytes) + assert patch is not None + + # patch rotation defined in physical space + vx, vy, vz = XYZ[1] - XYZ[0] + alpha = -math.atan2(-vz, -vx) + cx, cy, cz = XYZ.mean(0) + rot_phys = np.linalg.multi_dot( + [ + translation_matrix(cx, cy, cz), + y_rotation_matrix(alpha), + translation_matrix(-cx, -cy, -cz), + ] + ) + + # rotate the patch in physical space + affine_rot = np.dot(rot_phys, patch.affine) + + # crop in the rotated space + pixels = ( + np.dot(np.linalg.inv(affine_rot), self.corners.homogeneous.T) + .astype("int") + .T + ) + # keep a pixel distance to avoid black border pixels + xmin, ymin, zmin = pixels.min(0)[:3] + 1 + xmax, ymax, zmax = pixels.max(0)[:3] - 1 + h, w = xmax - xmin, zmax - zmin + affine = np.dot(affine_rot, translation_matrix(xmin, 0, zmin)) + return volume.from_nifti( + image.resample_img(patch, target_affine=affine, target_shape=[h, 1, w]), + space=image_volume.space, + name=f"Rotated patch with corner points {self.corners} sampled from {image_volume.name}", + ) + + +class Plane: """ A 3D plane in reference space. This shall eventually be derived from siibra.Location @@ -64,8 +147,6 @@ def intersect_line_segments(self, startpoints: np.ndarray, endpoints: np.ndarray Returns the set of intersection points. The line segments are given by two Nx3 arrays of their start- and endpoints. The result is an Nx3 list of intersection coordinates. - TODO This returns an intersection even if the line segment intersects the plane, - """ directions = endpoints - startpoints lengths = np.linalg.norm(directions, axis=1) @@ -148,7 +229,7 @@ def intersect_mesh(self, mesh: dict): # should include the exact same set of points. Verify this now. sortrows = lambda A: A[np.lexsort(A.T[::-1]), :] err = (sortrows(fwd_intersections) - sortrows(bwd_intersections)).sum() - assert err == 0 + assert err == 0, f"intersection inconsistency: {err}" # Due to the above property, we can construct closed contours in the # intersection plane by following the interleaved fwd/bwd roles of intersection @@ -175,7 +256,7 @@ def intersect_mesh(self, mesh: dict): # finish the current contour. result.append( - contour.Contour(np.array(points), labels=labels, space=self.space) + pointcloud.Contour(np.array(points), labels=labels, space=self.space) ) if len(face_indices) > 0: # prepare to process another contour segment @@ -236,8 +317,15 @@ def get_enclosing_patch(self, points: pointcloud.PointCloud, margin=[0.5, 0.5]): ) err = (self.project_points(corners).coordinates - corners.coordinates).sum() if err > 1e-5: - print(f"WARNING: patch coordinates were not exactly in-plane (error={err}).") - return patch.Patch(self.project_points(corners)) + print( + f"WARNING: patch coordinates were not exactly in-plane (error={err})." + ) + + try: + patch = Patch(self.project_points(corners)) + except AssertionError: + patch = None + return patch @classmethod def from_image(cls, image: volume.Volume): diff --git a/siibra/locations/pointcloud.py b/siibra/locations/pointcloud.py index c109cf374..c33e7bb8d 100644 --- a/siibra/locations/pointcloud.py +++ b/siibra/locations/pointcloud.py @@ -344,3 +344,46 @@ def label_colors(self): logger.error("Matplotlib is not available. Label colors is disabled.") return None return colormaps.rainbow(np.linspace(0, 1, max(self.labels) + 1)) + + +class Contour(PointCloud): + """ + A PointCloud that represents a contour line. + The only difference is that the point order is relevant, + and consecutive points are thought as being connected by an edge. + + In fact, PointCloud assumes order as well, but no connections between points. + """ + + def __init__(self, coordinates, space=None, sigma_mm=0, labels: list = None): + PointCloud.__init__(self, coordinates, space, sigma_mm, labels) + + def crop(self, voi: "_boundingbox.BoundingBox"): + """ + Crop the contour with a volume of interest. + Since the contour might be split from the cropping, + returns a set of contour segments. + """ + segments = [] + + # set the contour point labels to a linear numbering + # so we can use them after the intersection to detect splits. + old_labels = self.labels + self.labels = list(range(len(self))) + cropped = self.intersection(voi) + + if cropped is not None and not isinstance(cropped, point.Point): + assert isinstance(cropped, PointCloud) + # Identify contour splits are by discontinuouities ("jumps") + # of their labels, which denote positions in the original contour + jumps = np.diff([self.labels.index(lb) for lb in cropped.labels]) + splits = [0] + list(np.where(jumps > 1)[0] + 1) + [len(cropped)] + for i, j in zip(splits[:-1], splits[1:]): + segments.append( + self.__class__(cropped.coordinates[i:j, :], space=cropped.space) + ) + + # reset labels of the input contour points. + self.labels = old_labels + + return segments From 2bd32c76290dde4d7aa329cf1ad245ef47fda8c4 Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Wed, 19 Feb 2025 18:00:37 +0100 Subject: [PATCH 3/9] fetching a patch worked, see examples/tutorials/2025-paper-fig5.py --- examples/tutorials/2025-paper-fig5.py | 26 ++++++++++++++++++++ siibra/features/image/sections.py | 13 +++++++--- siibra/livequeries/bigbrain.py | 13 +++++----- siibra/locations/experimental.py | 35 +++++++++++++++------------ 4 files changed, 62 insertions(+), 25 deletions(-) create mode 100644 examples/tutorials/2025-paper-fig5.py diff --git a/examples/tutorials/2025-paper-fig5.py b/examples/tutorials/2025-paper-fig5.py new file mode 100644 index 000000000..87db679f3 --- /dev/null +++ b/examples/tutorials/2025-paper-fig5.py @@ -0,0 +1,26 @@ +# %% +import siibra +assert siibra.__version__ >= "1.0.1" +import matplotlib.pyplot as plt +import numpy as np +from nilearn import plotting + +# %% +# 1: get a region map +area, hemisphere = '4p', 'right' +parc = siibra.parcellations.get('julich 3.0.3') +region = parc.get_region(f"{area} {hemisphere}") +pmap = parc.get_map('mni152', 'statistical').get_volume(region) + +# %% +# 2: find a corresponding brain section +patches = siibra.features.get(pmap, "BigBrain1MicronPatch", lower_threshold=0.8) + +# %% +print(len(patches)) + +# %% +p1 = patches[0] +# %% +p1.fetch() +# %% diff --git a/siibra/features/image/sections.py b/siibra/features/image/sections.py index 18e9cfcdb..707b74cd0 100644 --- a/siibra/features/image/sections.py +++ b/siibra/features/image/sections.py @@ -51,20 +51,25 @@ def __init__( providers=list(section._providers.values()), region=None, datasets=section.datasets, - bbox=patch.corners.boundingbox, + bbox=patch.boundingbox, id=None ) self._anchor_cached = anchor + def __repr__(self): + return ( + f"<{self.__class__.__name__}(space_spec={self._space_spec}, " + f"name='{self.name}', patch='{self._patch}', providers={self._providers})>" + ) + def fetch(self, flip=False, **kwargs): assert "voi" not in kwargs res = kwargs.get("resolution_mm", -1) - mb = kwargs.get("max_bytes") if flip: return self._patch.flip().extract_volume( - self._section, resolution_mm=res, max_bytes=mb + self._section, resolution_mm=res ) else: return self._patch.extract_volume( - self._section, resolution_mm=res, max_bytes=mb + self._section, resolution_mm=res ) diff --git a/siibra/livequeries/bigbrain.py b/siibra/livequeries/bigbrain.py index 62d1f4c71..aacf6be70 100644 --- a/siibra/livequeries/bigbrain.py +++ b/siibra/livequeries/bigbrain.py @@ -259,8 +259,9 @@ class BigBrain1MicronPatchQuery( query.LiveQuery, args=[], FeatureType=BigBrain1MicronPatch ): - def __init__(self): + def __init__(self, lower_threshold=0.): self.layermap = get_registry("Map").get("cortical layers bigbrain") + self.lower_threshold = lower_threshold query.LiveQuery.__init__(self) def _get_closest_profile(self, query_point: point.Point, hemisphere: str): @@ -311,7 +312,6 @@ def query( ] if not sections: return [] - lower_threshold = kwargs.get("lower_threshold", 0) bb_bbox = concept.get_boundingbox().warp('bigbrain') for hemisphere in ["left", "right"]: @@ -326,9 +326,8 @@ def query( l4verts = pointcloud.PointCloud(layerverts[l4.name], "bigbrain") if not l4verts.boundingbox.intersects(bb_bbox): continue - print("hemisphere", hemisphere, "good") vertex_tree = KDTree(layerverts[l4.name]) - for s in siibra_tqdm(sections, unit="sections", desc="Testing sections"): + for i, s in enumerate(siibra_tqdm(sections, unit="sections", desc="Testing sections")): imgplane = experimental.Plane.from_image(s) try: contour = imgplane.intersect_mesh(l4mesh) @@ -340,8 +339,10 @@ def query( points = { pt.coordinate: prob for pt, prob in zip(all_points, all_probs) - if prob >= lower_threshold + if prob >= self.lower_threshold } + if len(points) == 0: + continue _, indices = vertex_tree.query(np.array(list(points.keys()))) for prob, nnb in zip(points.values(), indices): prof = pointcloud.Contour( @@ -356,7 +357,7 @@ def query( if patch is None: continue anchor = _anchor.AnatomicalAnchor( - location=patch.corners, species="Homo sapiens" + location=patch, species="Homo sapiens" ) anchor._assignments[concept] = _anchor.AnatomicalAssignment( query_structure=concept, diff --git a/siibra/locations/experimental.py b/siibra/locations/experimental.py index 41d6c9432..0c4398a0f 100644 --- a/siibra/locations/experimental.py +++ b/siibra/locations/experimental.py @@ -15,14 +15,14 @@ from ..volumes import volume from . import point, pointcloud, boundingbox -from ..commons import translation_matrix, y_rotation_matrix, SIIBRA_MAX_FETCH_SIZE_GIB +from ..commons import translation_matrix, y_rotation_matrix import numpy as np import math from nilearn import image -class Patch: +class Patch(pointcloud.PointCloud): def __init__(self, corners: pointcloud.PointCloud): """Construct a patch in physical coordinates. @@ -31,22 +31,27 @@ def __init__(self, corners: pointcloud.PointCloud): # TODO: need to ensure that the points are planar, if more than 3 assert len(corners) == 4 assert len(np.unique(corners.coordinates[:, 1])) == 1 - self.corners = corners - - @property - def space(self): - return self.corners.space + pointcloud.PointCloud.__init__( + self, + coordinates=corners.coordinates, + space=corners.space, + sigma_mm=corners.sigma_mm, + labels=corners.labels + ) + # self.corners = corners + + def __str__(self): + return f"Patch with boundingbox {self.boundingbox}" def flip(self): """Returns a flipped version of the patch.""" - new_corners = self.corners.coordinates.copy()[[2, 3, 0, 1]] + new_corners = self.coordinates.copy()[[2, 3, 0, 1]] return Patch(pointcloud.PointCloud(new_corners, self.space)) def extract_volume( self, image_volume: volume.Volume, resolution_mm: float, - max_bytes: float = SIIBRA_MAX_FETCH_SIZE_GIB, ): """ fetches image data in a planar patch. @@ -59,16 +64,16 @@ def extract_volume( # Extend the 2D patch into a 3D structure # this is only valid if the patch plane lies within the image canvas. canvas = image_volume.get_boundingbox() - assert canvas.minpoint[1] <= self.corners.coordinates[0, 1] - assert canvas.maxpoint[1] >= self.corners.coordinates[0, 1] - XYZ = self.corners.coordinates + assert canvas.minpoint[1] <= self.coordinates[0, 1] + assert canvas.maxpoint[1] >= self.coordinates[0, 1] + XYZ = self.coordinates voi = boundingbox.BoundingBox( XYZ.min(0)[:3], XYZ.max(0)[:3], space=image_volume.space ) # enforce the patch to have the same y dimensions voi.minpoint[1] = canvas.minpoint[1] voi.maxpoint[1] = canvas.maxpoint[1] - patch = image_volume.fetch(voi=voi, resolution_mm=resolution_mm, max_bytes=max_bytes) + patch = image_volume.fetch(voi=voi, resolution_mm=resolution_mm) assert patch is not None # patch rotation defined in physical space @@ -88,7 +93,7 @@ def extract_volume( # crop in the rotated space pixels = ( - np.dot(np.linalg.inv(affine_rot), self.corners.homogeneous.T) + np.dot(np.linalg.inv(affine_rot), self.homogeneous.T) .astype("int") .T ) @@ -100,7 +105,7 @@ def extract_volume( return volume.from_nifti( image.resample_img(patch, target_affine=affine, target_shape=[h, 1, w]), space=image_volume.space, - name=f"Rotated patch with corner points {self.corners} sampled from {image_volume.name}", + name=f"Rotated patch with corner points {self.coordinates} sampled from {image_volume.name}", ) From f554f2c00378658a776f77f25ac8ccc047cfc4e7 Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Thu, 20 Feb 2025 07:20:32 +0100 Subject: [PATCH 4/9] Cleanup and document the BigBrain1MicronPatchQuery --- siibra/livequeries/bigbrain.py | 86 +++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 38 deletions(-) diff --git a/siibra/livequeries/bigbrain.py b/siibra/livequeries/bigbrain.py index aacf6be70..8732b551a 100644 --- a/siibra/livequeries/bigbrain.py +++ b/siibra/livequeries/bigbrain.py @@ -29,7 +29,7 @@ from ..core.concept import get_registry from ..retrieval import requests, cache from ..retrieval.datasets import GenericDataset -from ..volumes import Volume +from ..volumes import Volume, from_array import numpy as np @@ -258,84 +258,86 @@ def query( class BigBrain1MicronPatchQuery( query.LiveQuery, args=[], FeatureType=BigBrain1MicronPatch ): + """ + Sample approximately orthogonal cortical image patches + from BigBrain 1 micron sections, guided by an image volume + in a supported reference space providing. The image + volume is used as a weighted mask to extract patches + along the cortical midsurface with nonzero weights in the + input image. + An optional lower_threshold can be used to narrow down + the search + The weight is stored with the resulting features. + """ def __init__(self, lower_threshold=0.): self.layermap = get_registry("Map").get("cortical layers bigbrain") self.lower_threshold = lower_threshold query.LiveQuery.__init__(self) - def _get_closest_profile(self, query_point: point.Point, hemisphere: str): - q = query_point.warp(self.layermap.space) - smallest_dist = np.inf - best_match = None - for layername in self.layermap.regions: - if hemisphere not in layername: - continue - vertices = self.layermap.fetch(region=layername, format="mesh")["verts"] - dists = np.sqrt(((vertices - q.coordinate) ** 2).sum(1)) - best = np.argmin(dists) - if dists[best] < smallest_dist: - best_match = (layername, best) - smallest_dist = dists[best] - - best_vertex = best_match[1] - logger.debug(f"Best match is vertex #{best_match[1]} in {best_match[0]}.") - - profile = [ - (_, self.layermap.fetch(region=_, format="mesh")["verts"][best_vertex]) - for _ in self.layermap.regions - if hemisphere in _ - ] - - return pointcloud.Contour( - [p[1] for p in profile], - space=self.layermap.space, - labels=[p[0] for p in profile], - ) - def query( self, concept: structure.BrainStructure, **kwargs ) -> List[BigBrain1MicronPatch]: + # make sure input is an image volume + # TODO function should be extended to deal with other concepts as well if not isinstance(concept, Volume): logger.warning( "Querying BigBrain1MicronPatch features requires to " "query with an image volume." ) return [] - - features = [] + + # threshold image volume, if requested + if self.lower_threshold > 0: + img = concept.fetch() + arr = img.get_fdata() + arr[arr < self.lower_threshold] = 0 + vol = from_array(arr, img.affine, space=concept.space, name="filtered volume") + else: + vol = concept + bb_bbox = vol.get_boundingbox().warp('bigbrain') + + # find 1 micron BigBrain sections intersecting the thresholded volume sections = [ - s - for s in CellbodyStainedSection._get_instances() + s for s in CellbodyStainedSection._get_instances() if s.get_boundingbox().intersects(concept) ] if not sections: return [] - bb_bbox = concept.get_boundingbox().warp('bigbrain') + # extract relevant patches + features = [] for hemisphere in ["left", "right"]: - print("considering hemisphere", hemisphere) + + # get layer 4 mesh in the hemisphere l4 = self.layermap.parcellation.get_region("4 " + hemisphere) l4mesh = self.layermap.fetch(l4, format="mesh") layerverts = { n: self.layermap.fetch(region=n, format="mesh")["verts"] for n in self.layermap.regions if hemisphere in n } - assert l4.name in layerverts l4verts = pointcloud.PointCloud(layerverts[l4.name], "bigbrain") if not l4verts.boundingbox.intersects(bb_bbox): continue + + # for each relevant BigBrain 1 micron section, intersect layer IV mesh + # to obtain midcortex-locations, and build their orthogonal patches. + # store the concept's value with the patch. vertex_tree = KDTree(layerverts[l4.name]) for i, s in enumerate(siibra_tqdm(sections, unit="sections", desc="Testing sections")): + + # compute layer IV contour in the image plane imgplane = experimental.Plane.from_image(s) try: contour = imgplane.intersect_mesh(l4mesh) except AssertionError: logger.error(f"Could not intersect with layer 4 mesh: {s.name}") continue + + # score the contour points with the query image volume all_points = pointcloud.from_points(sum(map(list, contour), [])) - all_probs = concept.evaluate_points(all_points) + all_probs = vol.evaluate_points(all_points) points = { pt.coordinate: prob for pt, prob in zip(all_points, all_probs) @@ -343,8 +345,15 @@ def query( } if len(points) == 0: continue + + # For each contour point, + # - find the closest BigBrain layer surface vertex, + # - build the profile of corresponding vertices across all layers + # - project the profile to the image section + # - determine the oriented patch along the profile _, indices = vertex_tree.query(np.array(list(points.keys()))) for prob, nnb in zip(points.values(), indices): + prof = pointcloud.Contour( [ layerverts[_][nnb] @@ -356,6 +365,7 @@ def query( patch = imgplane.get_enclosing_patch(prof) if patch is None: continue + anchor = _anchor.AnatomicalAnchor( location=patch, species="Homo sapiens" ) From 17dda04b76d7cb94b36b7ddb614cbf0ab40deefa Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Thu, 20 Feb 2025 07:25:44 +0100 Subject: [PATCH 5/9] Rename Patch to AxisAlignedPatch, apply threshold for section search in BigBrain patch sampling --- siibra/features/image/sections.py | 4 ++-- siibra/livequeries/bigbrain.py | 6 +++++- siibra/locations/__init__.py | 2 +- siibra/locations/experimental.py | 12 ++++++------ 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/siibra/features/image/sections.py b/siibra/features/image/sections.py index 707b74cd0..65e940248 100644 --- a/siibra/features/image/sections.py +++ b/siibra/features/image/sections.py @@ -18,7 +18,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from ...locations import Patch + from ...locations import AxisAlignedPatch from ...features.anchor import AnatomicalAnchor @@ -34,7 +34,7 @@ def __init__(self, **kwargs): class BigBrain1MicronPatch(image.Image, category="cellular"): def __init__( self, - patch: "Patch", + patch: "AxisAlignedPatch", section: CellbodyStainedSection, relevance: float, anchor: "AnatomicalAnchor", diff --git a/siibra/livequeries/bigbrain.py b/siibra/livequeries/bigbrain.py index 8732b551a..41608d18a 100644 --- a/siibra/livequeries/bigbrain.py +++ b/siibra/livequeries/bigbrain.py @@ -290,6 +290,10 @@ def query( # threshold image volume, if requested if self.lower_threshold > 0: + logger.info( + f"Applying lower threshold of {self.lower_threshold} " + "for BigBrain 1 micron patch query." + ) img = concept.fetch() arr = img.get_fdata() arr[arr < self.lower_threshold] = 0 @@ -301,7 +305,7 @@ def query( # find 1 micron BigBrain sections intersecting the thresholded volume sections = [ s for s in CellbodyStainedSection._get_instances() - if s.get_boundingbox().intersects(concept) + if s.get_boundingbox().intersects(vol) ] if not sections: return [] diff --git a/siibra/locations/__init__.py b/siibra/locations/__init__.py index 44c7ddf82..9b0913b80 100644 --- a/siibra/locations/__init__.py +++ b/siibra/locations/__init__.py @@ -17,7 +17,7 @@ from .location import Location from .point import Point from .pointcloud import PointCloud, Contour, from_points -from .experimental import Patch, Plane +from .experimental import AxisAlignedPatch, Plane from .boundingbox import BoundingBox diff --git a/siibra/locations/experimental.py b/siibra/locations/experimental.py index 0c4398a0f..d74206d4b 100644 --- a/siibra/locations/experimental.py +++ b/siibra/locations/experimental.py @@ -22,7 +22,7 @@ from nilearn import image -class Patch(pointcloud.PointCloud): +class AxisAlignedPatch(pointcloud.PointCloud): def __init__(self, corners: pointcloud.PointCloud): """Construct a patch in physical coordinates. @@ -32,12 +32,12 @@ def __init__(self, corners: pointcloud.PointCloud): assert len(corners) == 4 assert len(np.unique(corners.coordinates[:, 1])) == 1 pointcloud.PointCloud.__init__( - self, + self, coordinates=corners.coordinates, space=corners.space, sigma_mm=corners.sigma_mm, labels=corners.labels - ) + ) # self.corners = corners def __str__(self): @@ -46,7 +46,7 @@ def __str__(self): def flip(self): """Returns a flipped version of the patch.""" new_corners = self.coordinates.copy()[[2, 3, 0, 1]] - return Patch(pointcloud.PointCloud(new_corners, self.space)) + return AxisAlignedPatch(pointcloud.PointCloud(new_corners, self.space)) def extract_volume( self, @@ -112,7 +112,7 @@ def extract_volume( class Plane: """ A 3D plane in reference space. - This shall eventually be derived from siibra.Location + TODO This shall eventually be derived from siibra.Location """ def __init__(self, point1: point.Point, point2: point.Point, point3: point.Point): @@ -327,7 +327,7 @@ def get_enclosing_patch(self, points: pointcloud.PointCloud, margin=[0.5, 0.5]): ) try: - patch = Patch(self.project_points(corners)) + patch = AxisAlignedPatch(self.project_points(corners)) except AssertionError: patch = None return patch From 7602e189d4f2d4cab5311127883c582a2c499967 Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Thu, 20 Feb 2025 07:46:49 +0100 Subject: [PATCH 6/9] Add bigbrain_section and vertex number to bigbrain patch features --- examples/tutorials/2025-paper-fig5.py | 17 +++++++++++------ siibra/features/image/sections.py | 13 ++++++++++--- siibra/livequeries/bigbrain.py | 10 +++++++--- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/examples/tutorials/2025-paper-fig5.py b/examples/tutorials/2025-paper-fig5.py index 87db679f3..01ffe43f5 100644 --- a/examples/tutorials/2025-paper-fig5.py +++ b/examples/tutorials/2025-paper-fig5.py @@ -9,18 +9,23 @@ # 1: get a region map area, hemisphere = '4p', 'right' parc = siibra.parcellations.get('julich 3.0.3') -region = parc.get_region(f"{area} {hemisphere}") +region = parc.get_region("4p right") pmap = parc.get_map('mni152', 'statistical').get_volume(region) # %% # 2: find a corresponding brain section patches = siibra.features.get(pmap, "BigBrain1MicronPatch", lower_threshold=0.8) +print(f"Found {len(patches)} patches.") # %% -print(len(patches)) +N = 5 +f, axs = plt.subplots(1, N) +for n, (patch, ax) in enumerate(zip(patches[:N], axs.ravel())): + patchimg = patch.fetch() + patchdata = patchimg.get_fdata().squeeze() + ax.imshow(patchdata, cmap='gray', vmin=0, vmax=2**16) + ax.axis('off') + ax.set_title(f"Patch {n} - section {patch.bigbrain_section}") + -# %% -p1 = patches[0] -# %% -p1.fetch() # %% diff --git a/siibra/features/image/sections.py b/siibra/features/image/sections.py index 65e940248..4b1a53fff 100644 --- a/siibra/features/image/sections.py +++ b/siibra/features/image/sections.py @@ -36,12 +36,14 @@ def __init__( self, patch: "AxisAlignedPatch", section: CellbodyStainedSection, + vertex: int, relevance: float, anchor: "AnatomicalAnchor", **kwargs ): self._patch = patch self._section = section + self.vertex = vertex self.relevance = relevance image.Image.__init__( self, @@ -59,17 +61,22 @@ def __init__( def __repr__(self): return ( f"<{self.__class__.__name__}(space_spec={self._space_spec}, " - f"name='{self.name}', patch='{self._patch}', providers={self._providers})>" + f"name='{self.name}', section='{self.bigbrain_section}', vertex='{self.vertex}', " + f"providers={self._providers})>" ) + @property + def bigbrain_section(self): + return self.get_boundingbox().minpoint.bigbrain_section() + def fetch(self, flip=False, **kwargs): assert "voi" not in kwargs res = kwargs.get("resolution_mm", -1) if flip: return self._patch.flip().extract_volume( self._section, resolution_mm=res - ) + ).fetch() else: return self._patch.extract_volume( self._section, resolution_mm=res - ) + ).fetch() diff --git a/siibra/livequeries/bigbrain.py b/siibra/livequeries/bigbrain.py index 41608d18a..7c1321400 100644 --- a/siibra/livequeries/bigbrain.py +++ b/siibra/livequeries/bigbrain.py @@ -329,7 +329,9 @@ def query( # to obtain midcortex-locations, and build their orthogonal patches. # store the concept's value with the patch. vertex_tree = KDTree(layerverts[l4.name]) - for i, s in enumerate(siibra_tqdm(sections, unit="sections", desc="Testing sections")): + for s in siibra_tqdm( + sections, unit="sections", desc=f"Sampling patches in {hemisphere} hemisphere" + ): # compute layer IV contour in the image plane imgplane = experimental.Plane.from_image(s) @@ -380,8 +382,10 @@ def query( ) features.append( BigBrain1MicronPatch( - patch=patch, section=s, relevance=prob, anchor=anchor + patch=patch, section=s, vertex=nnb, relevance=prob, anchor=anchor ) ) - return features + # return the patches sorted by relevance (ie. probability) + return sorted(features, key=lambda p: p.relevance, reverse=True) + From 69d4892e37e60c30597aa84ca62790227cd07e96 Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Thu, 20 Feb 2025 07:49:34 +0100 Subject: [PATCH 7/9] remove recursion in string representation --- siibra/features/image/sections.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/siibra/features/image/sections.py b/siibra/features/image/sections.py index 4b1a53fff..a446c29b5 100644 --- a/siibra/features/image/sections.py +++ b/siibra/features/image/sections.py @@ -61,8 +61,9 @@ def __init__( def __repr__(self): return ( f"<{self.__class__.__name__}(space_spec={self._space_spec}, " - f"name='{self.name}', section='{self.bigbrain_section}', vertex='{self.vertex}', " - f"providers={self._providers})>" + f"name='{self.name}', " + f"section='{self._section.get_boundingbox().minpoint.bigbrain_section()}', " + f"vertex='{self.vertex}', providers={self._providers})>" ) @property From 659738186648577c30c4427f627e99e8f4f6aec1 Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Thu, 20 Feb 2025 08:06:56 +0100 Subject: [PATCH 8/9] comparin fig5 tutorial with paper figure --- examples/tutorials/2025-paper-fig5.py | 15 +++++++-------- siibra/locations/experimental.py | 7 ++++++- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/examples/tutorials/2025-paper-fig5.py b/examples/tutorials/2025-paper-fig5.py index 01ffe43f5..379280db8 100644 --- a/examples/tutorials/2025-paper-fig5.py +++ b/examples/tutorials/2025-paper-fig5.py @@ -14,18 +14,17 @@ # %% # 2: find a corresponding brain section -patches = siibra.features.get(pmap, "BigBrain1MicronPatch", lower_threshold=0.8) +patches = siibra.features.get(pmap, "BigBrain1MicronPatch", lower_threshold=0.7) print(f"Found {len(patches)} patches.") # %% -N = 5 -f, axs = plt.subplots(1, N) -for n, (patch, ax) in enumerate(zip(patches[:N], axs.ravel())): +section = 3556 +candidates = [p for p in patches if p.bigbrain_section == 3556] +N = len(candidates) +f, axs = plt.subplots(N, 1) +for n, (patch, ax) in enumerate(zip(candidates, axs.ravel())): patchimg = patch.fetch() patchdata = patchimg.get_fdata().squeeze() ax.imshow(patchdata, cmap='gray', vmin=0, vmax=2**16) ax.axis('off') - ax.set_title(f"Patch {n} - section {patch.bigbrain_section}") - - -# %% + ax.set_title(f"#{n}/{patch.vertex}") diff --git a/siibra/locations/experimental.py b/siibra/locations/experimental.py index d74206d4b..d39c1af70 100644 --- a/siibra/locations/experimental.py +++ b/siibra/locations/experimental.py @@ -103,7 +103,12 @@ def extract_volume( h, w = xmax - xmin, zmax - zmin affine = np.dot(affine_rot, translation_matrix(xmin, 0, zmin)) return volume.from_nifti( - image.resample_img(patch, target_affine=affine, target_shape=[h, 1, w]), + image.resample_img( + patch, + target_affine=affine, + target_shape=[h, 1, w], + force_resample=True + ), space=image_volume.space, name=f"Rotated patch with corner points {self.coordinates} sampled from {image_volume.name}", ) From fa589dbaaaa8ad4cc5a9d030da0b3ec3a2349055 Mon Sep 17 00:00:00 2001 From: Timo Dickscheid Date: Thu, 20 Feb 2025 08:20:38 +0100 Subject: [PATCH 9/9] fig5 example produces the same patch as in the manuscript! --- examples/tutorials/2025-paper-fig5.py | 25 ++++++++++++------------- siibra/livequeries/bigbrain.py | 8 ++++---- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/examples/tutorials/2025-paper-fig5.py b/examples/tutorials/2025-paper-fig5.py index 379280db8..8f760db10 100644 --- a/examples/tutorials/2025-paper-fig5.py +++ b/examples/tutorials/2025-paper-fig5.py @@ -2,29 +2,28 @@ import siibra assert siibra.__version__ >= "1.0.1" import matplotlib.pyplot as plt -import numpy as np -from nilearn import plotting # %% -# 1: get a region map -area, hemisphere = '4p', 'right' -parc = siibra.parcellations.get('julich 3.0.3') +# 1: Retrieve probability map of a motor area in Julich-Brain +parc = siibra.parcellations.get('julich 3.1') region = parc.get_region("4p right") pmap = parc.get_map('mni152', 'statistical').get_volume(region) # %% -# 2: find a corresponding brain section +# 2: Extract BigBrain 1 micron patches with high probability in this area patches = siibra.features.get(pmap, "BigBrain1MicronPatch", lower_threshold=0.7) print(f"Found {len(patches)} patches.") # %% +# 3: Display highly rated samples, here further reduced to a predefined section section = 3556 -candidates = [p for p in patches if p.bigbrain_section == 3556] -N = len(candidates) -f, axs = plt.subplots(N, 1) -for n, (patch, ax) in enumerate(zip(candidates, axs.ravel())): - patchimg = patch.fetch() - patchdata = patchimg.get_fdata().squeeze() +candidates = filter(lambda p: p.bigbrain_section == 3556, patches) +f, axs = plt.subplots(1, 3, figsize=(8, 24)) +for patch, ax in zip(list(candidates)[:3], axs.ravel()): + patchdata = patch.fetch().get_fdata().squeeze() ax.imshow(patchdata, cmap='gray', vmin=0, vmax=2**16) ax.axis('off') - ax.set_title(f"#{n}/{patch.vertex}") + ax.set_title(f"#{section} - {patch.vertex}", fontsize=10) + + +# %% diff --git a/siibra/livequeries/bigbrain.py b/siibra/livequeries/bigbrain.py index 7c1321400..719d6fd2e 100644 --- a/siibra/livequeries/bigbrain.py +++ b/siibra/livequeries/bigbrain.py @@ -20,7 +20,7 @@ bigbrain_intensity_profile, layerwise_bigbrain_intensities, ) -from ..features.image import volume_of_interest, CellbodyStainedSection +from ..features.image import CellbodyStainedSection from ..features.image.sections import BigBrain1MicronPatch from ..features import anchor as _anchor from ..commons import logger, siibra_tqdm @@ -287,7 +287,7 @@ def query( "query with an image volume." ) return [] - + # threshold image volume, if requested if self.lower_threshold > 0: logger.info( @@ -302,7 +302,7 @@ def query( vol = concept bb_bbox = vol.get_boundingbox().warp('bigbrain') - # find 1 micron BigBrain sections intersecting the thresholded volume + # find 1 micron BigBrain sections intersecting the thresholded volume sections = [ s for s in CellbodyStainedSection._get_instances() if s.get_boundingbox().intersects(vol) @@ -388,4 +388,4 @@ def query( # return the patches sorted by relevance (ie. probability) return sorted(features, key=lambda p: p.relevance, reverse=True) - +