[1]:
from brainlit.utils.session import NeuroglancerSession
import napari
from napari.utils import nbscreenshot
%gui qt
---------------------------------------------------------------------------
ModuleNotFoundError                       Traceback (most recent call last)
<ipython-input-1-a0e52f9228da> in <module>
----> 1 from brainlit.utils.session import NeuroglancerSession
      2 import napari
      3 from napari.utils import nbscreenshot
      4 get_ipython().run_line_magic('gui', 'qt')

ModuleNotFoundError: No module named 'brainlit'

Downloading Brain data tutorial

We have prepared 2 brain volumes, as well as axon segment labels, at the below s3 urls (see uploading_brains.ipynb). The method demonstrated below pulls a region of the volume around an annotated axon point set by the user.

1) Define Variables

  • mip ranges from higher resolution (0) to lower resolution (1).

  • v_id are vertex ids ranging from the soma (0) to the end of the axon (1649).

  • radius is the radius to pull around the selected point, in voxels.

[2]:
"""
dir = "s3://open-neurodata/brainlit/brain1"
dir_segments = "s3://open-neurodata/brainlit/brain1_segments"
dir_2 = "s3://open-neurodata/brainlit/brain2"
dir_2_segments = "s3://open-neurodata/brainlit/brain2_segments"
mip = 0
v_id = 0
radius = 75
"""
[2]:
'\ndir = "s3://open-neurodata/brainlit/brain1"\ndir_segments = "s3://open-neurodata/brainlit/brain1_segments"\ndir_2 = "s3://open-neurodata/brainlit/brain2"\ndir_2_segments = "s3://open-neurodata/brainlit/brain2_segments"\nmip = 0\nv_id = 0\nradius = 75\n'

2) Create a NeuroglancerSession instance and download the volume.

[3]:
"""
# get image and center point
ngl_sess = NeuroglancerSession(mip = mip, url = dir, url_segments=dir_segments)
img, bbox, vox = ngl_sess.pull_voxel(2, v_id, radius)
print(f"\n\nDownloaded volume is of shape {img.shape}, with total intensity {sum(sum(sum(img)))}.")
"""
[3]:
'\n# get image and center point\nngl_sess = NeuroglancerSession(mip = mip, url = dir, url_segments=dir_segments)\nimg, bbox, vox = ngl_sess.pull_voxel(2, v_id, radius)\nprint(f"\n\nDownloaded volume is of shape {img.shape}, with total intensity {sum(sum(sum(img)))}.")\n'

3) Generate a graph from the segment data within the volume, and convert it to paths.

[4]:
"""
G_paths = ngl_sess.get_segments(2, bbox)
G_sub = G_paths[0]
paths = G_paths[1]

print(f"Selected volume contains {G_sub.number_of_nodes()} nodes and {len(paths)} paths")
"""
[4]:
'\nG_paths = ngl_sess.get_segments(2, bbox)\nG_sub = G_paths[0]\npaths = G_paths[1]\n\nprint(f"Selected volume contains {G_sub.number_of_nodes()} nodes and {len(paths)} paths")\n'

4) View the volume with paths overlaid via napari.¶

[5]:
"""
viewer = napari.Viewer(ndisplay=3)
viewer.add_image(img)
viewer.add_shapes(data=paths, shape_type='path', edge_width=0.1, edge_color='blue', opacity=0.1)
viewer.add_points(vox, size=1, opacity=0.5)
nbscreenshot(viewer)
"""
[5]:
"\nviewer = napari.Viewer(ndisplay=3)\nviewer.add_image(img)\nviewer.add_shapes(data=paths, shape_type='path', edge_width=0.1, edge_color='blue', opacity=0.1)\nviewer.add_points(vox, size=1, opacity=0.5)\nnbscreenshot(viewer)\n"
[ ]: