Skip to content

Mcir cil #204

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 15 commits into
base: master
Choose a base branch
from
6 changes: 6 additions & 0 deletions notebooks/MR/README.md
Original file line number Diff line number Diff line change
@@ -19,6 +19,12 @@ Jupyter notebooks for the MR exercises. Recommended order:

3. [f_create_undersampled_kspace](f_create_undersampled_kspace.ipynb) demonstrates a retrospective data under-sampling.

## Advanced topics

1. [g_non_cartesian_reconstruction](g_non_cartesian_reconstruction.ipynb) shows how to using optimisation approaches from CIL to reconstruct a 3D non-Cartesian dataset acquired with a Golden radial phase encoding trajectory.

2. [h_mr_mcir_grpe](h_mr_mcir_grpe.ipynb) gives an overview of all necessary steps for a motion corrected MR image reconstruction: obtaingin a motion surrogate to identify which k-space point was acquired in which motion state, reconstructing motion resolved images, estimating non-rigid motion fields describing the spatial transformation between the different motion states and finally utilising these motion fields and motion surrogates to carry out a motion corrected image reconstruction.


## Feel free to ignore

547 changes: 547 additions & 0 deletions notebooks/MR/g_non_cartesian_reconstruction.ipynb

Large diffs are not rendered by default.

703 changes: 703 additions & 0 deletions notebooks/MR/h_mr_mcir_grpe.ipynb

Large diffs are not rendered by default.

402 changes: 402 additions & 0 deletions notebooks/PET/MCIR_CIL.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,402 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#%% Initial imports etc\n",
"import numpy\n",
"from numpy.linalg import norm\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.animation as animation\n",
"import os\n",
"import sys\n",
"import shutil\n",
"\n",
"from ccpi.utilities.jupyter import *\n",
"from ccpi.utilities.display import *\n",
"\n",
"#%% Use the 'pet' prefix for all STIR-based SIRF functions\n",
"# This is done here to explicitly differentiate between SIRF pet functions and \n",
"# anything else.\n",
"import sirf.STIR as pet\n",
"from sirf.Utilities import examples_data_path\n",
"\n",
"pet.AcquisitionData.set_storage_scheme('memory')\n",
"\n",
"#%% Go to directory with input files\n",
"# Adapt this path to your situation (or start everything in the relevant directory)\n",
"os.chdir(examples_data_path('PET'))\n",
"\n",
"#%% Copy files to a working folder and change directory to where these files are.\n",
"# We do this to avoid cluttering your SIRF files. This way, you can delete \n",
"# working_folder and start from scratch.\n",
"if False:\n",
" shutil.rmtree('working_folder/brain',True)\n",
" shutil.copytree('brain','working_folder/brain')\n",
"os.chdir('working_folder/brain')\n",
"\n",
"#%% Read in images\n",
"# Here we will read some images provided with the demo using the ImageData class.\n",
"# These are in Interfile format. (A text header pointing to a .v file with the binary data).\n",
"image = pet.ImageData('emission.hv')\n",
"mu_map = pet.ImageData('attenuation.hv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"direction = 0\n",
"islicer(image,direction, cmap='viridis')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#%% Create a SIRF acquisition model\n",
"# We will use the ray-tracing matrix here as our simple PET model.\n",
"# There is more to the accquisition model, but that's for another demo.\n",
"am = pet.AcquisitionModelUsingRayTracingMatrix()\n",
"# Ask STIR to use 5 LORs per sinogram-element\n",
"am.set_num_tangential_LORs(5)\n",
"\n",
"#%% Specify sinogram dimensions\n",
"# We need to say what scanner to use, what dimensions etc.\n",
"# You do this by using existing PET data as a 'template'. \n",
"# Here, we read a file supplied with the demo as an AcquisitionData object\n",
"templ = pet.AcquisitionData('template_sinogram.hs')\n",
"# Now set-up our acquisition model with all information that it needs about the data and image.\n",
"am.set_up(templ,image)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# ADD NOISE\n",
"\n",
"sino = am.direct(image)\n",
"\n",
"def add_noise(counts, sinogram):\n",
" sino_arr = sinogram.as_array()\n",
" minmax = (sino_arr.min(), sino_arr.max())\n",
" if counts > 0 and counts <= 1:\n",
" counts = counts * (minmax[1] - minmax[0])\n",
" elif isinstance (counts, int):\n",
" pass\n",
" \n",
" sino_arr = counts * ((sino_arr -minmax[0]) / (minmax[1]-minmax[0]))\n",
" noisy_counts = sinogram * 0.\n",
" noisy_counts.fill( numpy.random.poisson(sino_arr) )\n",
" \n",
" return noisy_counts\n",
"\n",
"\n",
"minmax = sino.as_array().min(), sino.as_array().max()\n",
"\n",
"noisy_counts = add_noise(1, sino)\n",
"\n",
"s0 = islicer(noisy_counts.as_array()[0], 0, cmap='inferno_r')\n",
"s1 = islicer(sino.as_array()[0], 0, cmap='inferno_r')\n",
"link_islicer(s0,s1)\n",
"\n",
"#del sino"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import sirf.Reg as Reg\n",
"from scipy.spatial.transform import Rotation as R\n",
"\n",
"def get_resampler(directions, angles, degrees=True ):\n",
" '''example input 'zy', [87,13], degrees=True'''\n",
" r = R.from_euler(directions, angles, degrees=degrees)\n",
"\n",
" mat = r.as_dcm()\n",
"\n",
"\n",
"\n",
" tm = Reg.AffineTransformation()\n",
" mat4 = tm.as_array()\n",
"\n",
" for i in range(3):\n",
" for j in range(3):\n",
" mat4[i][j] = mat[i][j]\n",
"\n",
" tm = Reg.AffineTransformation(mat4)\n",
"\n",
" mat = tm.as_array()\n",
"\n",
" resampler = Reg.NiftyResample()\n",
" resampler.set_reference_image(image)\n",
" resampler.set_floating_image(image)\n",
" resampler.add_transformation(tm)\n",
" resampler.set_padding_value(0)\n",
" resampler.set_interpolation_type_to_linear()\n",
" \n",
" return resampler"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create different motion state\n",
"rotations = [[-1.2,3.],[1.2,-3.],[0.,-5.], [.2,2.]]\n",
"rotations = [ [10 * rot[0],rot[1]] for rot in rotations ]\n",
"\n",
"resamplers = [ get_resampler('zy', rot, degrees=True) for rot in rotations ]\n",
"\n",
"# create the new AcquisitionData for the motion states\n",
"rotated_sinos = []\n",
"\n",
"for rot, resampler in zip(*(rotations, resamplers)):\n",
" # new ImageData\n",
" out = resampler.direct(image)\n",
" # new AcquisitionData\n",
" rs = am.direct(out)\n",
" # add noise\n",
" rs = add_noise(1,rs)\n",
" rotated_sinos.append(rs)\n",
"\n",
"del out, rs\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#s0 = islicer(acquired_data.as_array()[0], 0, cmap='viridis')\n",
"\n",
"a = rotated_sinos[0]-rotated_sinos[1]\n",
"print (type(a))\n",
"\n",
"s1 = islicer(resamplers[0].direct(image).as_array(), 0, cmap='viridis_r')\n",
"s2 = islicer(resamplers[1].direct(image).as_array(), 0, cmap='viridis_r')\n",
"s3 = islicer((rotated_sinos[0]-rotated_sinos[1]).as_array()[0],0,cmap='viridis')\n",
"link_islicer(s1,s2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ccpi.optimisation.operators import CompositionOperator, BlockOperator, LinearOperator\n",
"\n",
"\n",
"C = [ CompositionOperator(am, resampler, preallocate=True) for resampler in resamplers ]\n",
"# C = [ am for _ in resamplers ]\n",
"# norms = [ LinearOperator.PowerMethod(op, 25)[0] for op in C ]\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# n = [nn[0] for nn in norms]\n",
"# norms = n\n",
"# print (norms, sum(norms))\n",
"\n",
"from ccpi.plugins.regularisers import FGP_TV\n",
"#FGP_TV??"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ccpi.optimisation.algorithms import PDHG\n",
"from ccpi.optimisation.functions import KullbackLeibler, IndicatorBox, BlockFunction\n",
"from ccpi.optimisation.operators import BlockOperator\n",
"from ccpi.plugins.regularisers import FGP_TV\n",
"\n",
"#regularisation parameters for TV\n",
"# \n",
"r_alpha = 5e-1\n",
"r_iterations = 500\n",
"r_tolerance = 1e-7\n",
"r_iso = 0\n",
"r_nonneg = 1\n",
"r_printing = 0\n",
"\n",
"TV = FGP_TV(r_alpha, r_iterations, r_tolerance, r_iso,r_nonneg,r_printing,'cpu')\n",
"\n",
"motion = True\n",
"if motion:\n",
" #noisy_counts is the GT forward projected + noise\n",
" kl = [ KullbackLeibler(b=rotated_sino, eta=(rotated_sino * 0 + 1e-5)) for rotated_sino in rotated_sinos ] \n",
" f = BlockFunction(*kl)\n",
" K = BlockOperator(*C)\n",
" normK = K.norm(iterations=2)\n",
" #normK = numpy.sqrt(sum( norms ))\n",
"else:\n",
" f = KullbackLeibler(b=noisy_counts, eta=(noisy_counts * 0 + 1e-5))\n",
" K = am\n",
" normK = LinearOperator.PowerMethod(am, 25)[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"rotated_sino.shape"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"f(K.direct(image))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sigma = 1/normK\n",
"tau = 1/normK \n",
"\n",
"G = IndicatorBox(lower=0)\n",
"# G = TV\n",
"# print (f(acquired_data*0.+1e-5))\n",
"# print (f(acquired_data*0.))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def do_nothing(self):\n",
" return 0.\n",
"setattr(PDHG, 'update_objective', do_nothing)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Setup and run PDHG\n",
"\n",
"\n",
"pdhg = PDHG(f = f, g = G, operator = K, sigma = sigma, tau = tau, \n",
" max_iteration = 1000,\n",
" update_objective_interval = 4)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"na = numpy.zeros((2,2))\n",
"a = [na, na]\n",
"na += 1\n",
"\n",
"print (a)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pdhg.run(8, verbose=False)\n",
"\n",
"pdhg_recon = pdhg.get_output() "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pdhg.max_iteration = 2000\n",
"#pdhg.run()\n",
"\n",
"pdhg_l1_recon = pdhg.get_output() \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pdhg_l1_recon = pdhg.get_output()\n",
"iM, im = image.as_array().max(), image.as_array().min()\n",
"rM, rm = pdhg_l1_recon.as_array().max(), pdhg_l1_recon.as_array().min()\n",
"\n",
"i_scaled = ((image -im) / (iM-im))\n",
"r_scaled = ((pdhg_l1_recon -rm) / (rM-rm))\n",
"s0 = islicer(i_scaled, 0, cmap='inferno')\n",
"s1 = islicer(r_scaled, 0, cmap='inferno')\n",
"\n",
"link_islicer(s0, s1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#pdhg.get_output().write('PDHG_MCIR_noNoise_Motion_1000it_TV0.h')"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython"
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
427 changes: 427 additions & 0 deletions notebooks/PET/MemoryTest.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,427 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#%% Initial imports etc\n",
"import numpy\n",
"from numpy.linalg import norm\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.animation as animation\n",
"import os\n",
"import sys\n",
"import shutil\n",
"\n",
"import sirf.pyiutilities as pyiutil\n",
"\n",
"from ccpi.utilities.jupyter import *\n",
"from ccpi.utilities.display import *\n",
"\n",
"#%% Use the 'pet' prefix for all STIR-based SIRF functions\n",
"# This is done here to explicitly differentiate between SIRF pet functions and \n",
"# anything else.\n",
"import sirf.STIR as pet\n",
"from sirf.Utilities import examples_data_path\n",
"\n",
"import os\n",
"import psutil\n",
"\n",
"import sirf.Reg as Reg\n",
"from sirf import SIRF\n",
"\n",
"process = psutil.Process(os.getpid())\n",
"\n",
"# schemes: file, memory\n",
"pet.AcquisitionData.set_storage_scheme('file')\n",
"\n",
"#%% Go to directory with input files\n",
"# Adapt this path to your situation (or start everything in the relevant directory)\n",
"os.chdir(examples_data_path('PET'))\n",
"\n",
"#%% Copy files to a working folder and change directory to where these files are.\n",
"# We do this to avoid cluttering your SIRF files. This way, you can delete \n",
"# working_folder and start from scratch.\n",
"if True:\n",
" shutil.rmtree('working_folder/memorytest',True)\n",
" shutil.copytree('brain','working_folder/memorytest')\n",
"os.chdir('working_folder/memorytest')\n",
"\n",
"#%% Read in images\n",
"# Here we will read some images provided with the demo using the ImageData class.\n",
"# These are in Interfile format. (A text header pointing to a .v file with the binary data).\n",
"image = pet.ImageData('emission.hv')\n",
"\n",
"#%% Create a SIRF acquisition model\n",
"# We will use the ray-tracing matrix here as our simple PET model.\n",
"# There is more to the accquisition model, but that's for another demo.\n",
"am = pet.AcquisitionModelUsingRayTracingMatrix()\n",
"# Ask STIR to use 5 LORs per sinogram-element\n",
"am.set_num_tangential_LORs(5)\n",
"\n",
"#%% Specify sinogram dimensions\n",
"# We need to say what scanner to use, what dimensions etc.\n",
"# You do this by using existing PET data as a 'template'. \n",
"# Here, we read a file supplied with the demo as an AcquisitionData object\n",
"templ = pet.AcquisitionData('template_sinogram.hs')\n",
"# Now set-up our acquisition model with all information that it needs about the data and image.\n",
"am.set_up(templ,image)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"i2 = image.copy()\n",
"\n",
"i2 = 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"\n",
"mat4 = numpy.eye(4)\n",
"tm = Reg.AffineTransformation(mat4)\n",
"\n",
"resampler = Reg.NiftyResample()\n",
"resampler.set_reference_image(image)\n",
"resampler.set_floating_image(image)\n",
"resampler.add_transformation(tm)\n",
"resampler.set_padding_value(0)\n",
"resampler.set_interpolation_type_to_linear()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print ( issubclass(image, SIRF.DataContainer) )\n",
"\n",
"direct = resampler.direct(image)\n",
"\n",
"direct = 0\n",
"\n",
"adjoint = resampler.adjoint(image)\n",
"adjoint = 0"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"memocc = []\n",
"direct = resampler.direct(image)\n",
"adjoint = resampler.adjoint(image)\n",
"for i in range(100):\n",
" # new ImageData\n",
" resampler.direct(image, out=direct)\n",
" # new AcquisitionData\n",
" resampler.adjoint(direct, out=adjoint)\n",
"# f(image)\n",
" if i % 10 == 0 and i > 0:\n",
" memocc.append(process.memory_percent())\n",
" print(i, memocc[-1])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"from scipy.spatial.transform import Rotation as R\n",
"\n",
"def get_resampler(directions, angles, image, degrees=True ):\n",
" '''example input 'zy', [87,13], degrees=True'''\n",
" r = R.from_euler(directions, angles, degrees=degrees)\n",
"\n",
" mat = r.as_dcm()\n",
"\n",
" mat4 = numpy.eye(4)\n",
" print (mat4.shape, mat4)\n",
"\n",
" for i in range(3):\n",
" for j in range(3):\n",
" mat4[i][j] = mat[i][j]\n",
"\n",
" tm = Reg.AffineTransformation(mat4)\n",
"\n",
" mat = tm.as_array()\n",
"\n",
" resampler = Reg.NiftyResample()\n",
" print (id(resampler), resampler.handle)\n",
" resampler.set_reference_image(image)\n",
" resampler.set_floating_image(image)\n",
" resampler.add_transformation(tm)\n",
" resampler.set_padding_value(0)\n",
" resampler.set_interpolation_type_to_linear()\n",
" \n",
" return resampler"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# create different motion state\n",
"rotations = [[-1.2,3.],[1.2,-3.],[0.,-5.], [.2,2.]]\n",
"rotations = [ [10 * rot[0],rot[1]] for rot in rotations ]\n",
"\n",
"# resamplers = [ get_resampler('zy', rot, image, degrees=True) for rot in rotations ]\n",
"resampler = get_resampler('zy', [-12.,30.], image, degrees=True) \n",
"# create the new AcquisitionData for the motion states\n",
"memocc = []\n",
"\n",
"\n",
"def f(image):\n",
" step = resampler.direct(image)\n",
" out = resampler.adjoint(step)\n",
" return out\n",
"\n",
"for i in range(100):\n",
" # new ImageData\n",
" # step = resampler.direct(image)\n",
" # new AcquisitionData\n",
" out = resampler.adjoint(step)\n",
"# f(image)\n",
" if i % 10 == 0 and i > 0:\n",
" memocc.append(process.memory_percent())\n",
" print(i, memocc[-1], memocc[-1]/float(i))\n",
"# del out\n",
"\n",
"\n",
"id_old = None\n",
"handle_old = None"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i in range(2):\n",
" out = f(image)\n",
"print (resampler.handle)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print (type(step), type(image))\n",
"\n",
"import sirf\n",
"\n",
"print (isinstance (step, sirf.SIRF.ImageData))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"\n",
"step = resampler.direct(image)\n",
"ids.append((id(step), step.handle))\n",
"for i in ids:\n",
" print (i)\n",
"\n",
"\n",
"print (sys.getrefcount(step))\n",
"#out = resampler.adjoint (step)\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i, handle in ids:\n",
" print (\"trying to delete handle\", handle)\n",
" #pyiutil.deleteDataHandle(handle)\n",
" step.__del__()\n",
"# out = resampler.adjoint(step)\n",
"# print (id (out))\n",
"# print (out.handle)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"memocc2 = []\n",
"step = am.direct(image)\n",
"out = am.adjoint(step)\n",
"\n",
"for i in range(101):\n",
" # new ImageData\n",
" am.direct(image, out=step)\n",
" # new AcquisitionData\n",
" am.adjoint(step, out=out)\n",
" if i % 100 == 0 and i > 0:\n",
" memocc2.append(process.memory_percent())\n",
" print(i, memocc2[-1], (memocc2[-1])/float(i))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy\n",
"\n",
"class A(object):\n",
" def __init__(self,array,parent):\n",
" self.array = array.copy()\n",
" self.parent = parent\n",
" print (\"calling __init__ {}\".format(self.__class__.__name__))\n",
" \n",
" def __del__(self):\n",
" print (\"calling __del__ {}\".format(self.__class__.__name__))\n",
" \n",
" def direct(self,x):\n",
" return type(self)(self.array + x, id(self.array))\n",
" \n",
"class B(A):\n",
" def __init__(self, array):\n",
" super(B,self).__init__(array)\n",
" self.a = A(array,id(array))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"aa = A(numpy.zeros(shape=(10,10)), None)\n",
"x = numpy.ones(shape = (10,10))\n",
"\n",
"bb = B(numpy.zeros(shape=(10,10)),1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"b = aa.direct(x)\n",
"c = bb.direct(x)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import weakref\n",
"class FooType(object):\n",
" def __init__(self, did, parent):\n",
" self.id = did\n",
" self.parent = parent\n",
" print 'Foo', self.id, 'born'\n",
"\n",
" def __del__(self):\n",
" print 'Foo', self.id, 'died'\n",
"\n",
"\n",
"class BarType(object):\n",
" def __init__(self, did):\n",
" self.id = did\n",
" #self.foo = weakref.ref( FooType(did, self) )\n",
" self.foo = FooType(id, self)\n",
" print 'Bar', self.id, 'born'\n",
"\n",
" def __del__(self):\n",
" print 'Bar', self.id, 'died'\n",
" def __str__(self):\n",
" return \"{} id {}\".format(self.__class__.__name__ , self.id)\n",
"\n",
"\n",
"b = BarType(12)\n",
"#b=0\n",
"print (b)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print (b)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"i2 = image.copy()\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython"
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
156 changes: 156 additions & 0 deletions notebooks/PET/ViewResults.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#%% Initial imports etc\n",
"import numpy\n",
"from numpy.linalg import norm\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.animation as animation\n",
"import os\n",
"import sys\n",
"import shutil\n",
"\n",
"from ccpi.utilities.jupyter import *\n",
"from ccpi.utilities.display import *\n",
"\n",
"#%% Use the 'pet' prefix for all STIR-based SIRF functions\n",
"# This is done here to explicitly differentiate between SIRF pet functions and \n",
"# anything else.\n",
"import sirf.STIR as pet\n",
"from sirf.Utilities import examples_data_path\n",
"\n",
"import sirf.Reg as Reg\n",
"from scipy.spatial.transform import Rotation as R\n",
"from ccpi.optimisation.operators import CompositionOperator, BlockOperator, LinearOperator\n",
"\n",
"from ccpi.plugins.regularisers import FGP_TV\n",
"\n",
"\n",
"def add_noise(counts, sinogram):\n",
" sino_arr = sinogram.as_array()\n",
" minmax = (sino_arr.min(), sino_arr.max())\n",
"\n",
" counts = 300\n",
" sino_arr = counts * (sino_arr / minmax[1])\n",
" noisy_counts = sinogram * 0.\n",
" noisy_counts.fill( numpy.random.poisson(sino_arr) )\n",
" \n",
" return noisy_counts\n",
"\n",
"\n",
"pet.AcquisitionData.set_storage_scheme('memory')\n",
"\n",
"#%% Go to directory with input files\n",
"# Adapt this path to your situation (or start everything in the relevant directory)\n",
"os.chdir(examples_data_path('PET'))\n",
"\n",
"#%% Copy files to a working folder and change directory to where these files are.\n",
"# We do this to avoid cluttering your SIRF files. This way, you can delete \n",
"# working_folder and start from scratch.\n",
"if False:\n",
" shutil.rmtree('working_folder/brain',True)\n",
" shutil.copytree('brain','working_folder/brain')\n",
"os.chdir('working_folder/brain')\n",
"\n",
"#%% Read in images\n",
"# Here we will read some images provided with the demo using the ImageData class.\n",
"# These are in Interfile format. (A text header pointing to a .v file with the binary data).\n",
"image = pet.ImageData('emission.hv')\n",
"mu_map = pet.ImageData('attenuation.hv')\n",
"\n",
"#%% Create a SIRF acquisition model\n",
"# We will use the ray-tracing matrix here as our simple PET model.\n",
"# There is more to the accquisition model, but that's for another demo.\n",
"am = pet.AcquisitionModelUsingRayTracingMatrix()\n",
"# Ask STIR to use 5 LORs per sinogram-element\n",
"am.set_num_tangential_LORs(5)\n",
"\n",
"#%% Specify sinogram dimensions\n",
"# We need to say what scanner to use, what dimensions etc.\n",
"# You do this by using existing PET data as a 'template'. \n",
"# Here, we read a file supplied with the demo as an AcquisitionData object\n",
"templ = pet.AcquisitionData('template_sinogram.hs')\n",
"# Now set-up our acquisition model with all information that it needs about the data and image.\n",
"am.set_up(templ,image)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%ls\n",
"#plotter2D??"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"mcir_no_TV = pet.ImageData('PDHG_MCIR_noNoise_Motion_1000it_TV0.hv')\n",
"mcir_TV = pet.ImageData('PDHG_MCIR_noNoise_Motion_1000it_TV05.hv')\n",
"no_mcir = pet.ImageData('PDHG_noMCIR_noNoise_Motion_1000it_TV0.hv')\n",
"no_motion = pet.ImageData('PDHG_noMCIR_noNoise_noMotion_1000it_TV0.hv')\n",
"\n",
"print (image.shape)\n",
"%matplotlib inline\n",
"plotter2D([vv.as_array()[12] for vv in [image, no_motion, mcir_no_TV, mcir_TV, no_mcir ] ] , \n",
" titles=['Ground Truth', 'No Motion' , 'MCIR Positivity Constraint', 'MCIR FGP TV', 'No MCIR' ] , cmap = 'inferno')\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"%matplotlib inline\n",
"lines = [vv.as_array()[5][100] for vv in [image, no_motion, mcir_no_TV, mcir_TV, no_mcir ] ]\n",
"#print (lines[0])\n",
"plt.plot(lines[0], label=\"Ground Truth\")\n",
"plt.plot(lines[1], label=\"No motion\")\n",
"plt.plot(lines[2], label=\"MCIR G>0\")\n",
"plt.plot(lines[3], label=\"MCIR TV\")\n",
"plt.plot(lines[4], label=\"No MCIR\")\n",
"plt.legend()\n",
"#plt.label('Ground Truth')\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 2",
"language": "python",
"name": "python2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython"
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
13 changes: 12 additions & 1 deletion scripts/download_data.sh
Original file line number Diff line number Diff line change
@@ -166,7 +166,7 @@ then
pushd "$DOWNLOAD_DIR"
echo Downloading MR data

# Get Zenodo dataset
# Get Zenodo datasets
URL=https://zenodo.org/record/2633785/files/
filenameGRAPPA=PTB_ACRPhantom_GRAPPA.zip
# (re)download md5 checksum
@@ -178,6 +178,17 @@ then
pushd "${DATA_PATH}/MR"
echo "Unpacking ${filenameGRAPPA}"
unzip -o "${DOWNLOAD_DIR}/${filenameGRAPPA}"

URL=https://zenodo.org/record/7903282/files/
filenameGRPE=3D_GRPE_no_motion.h5
# (re)download md5 checksum
echo "82aa7000fb6c1d42f138f81473aa671e ${filenameGRPE}" > "${filenameGRPE}.md5"
download "$filenameGRPE" "$URL"

filenameGRPE_motion=3D_GRPE_motion.h5
# (re)download md5 checksum
echo "111cfdb05c2e9d1ef75f69ebe58931ed ${filenameGRPE_motion}" > "${filenameGRPE_motion}.md5"
download "$filenameGRPE_motion" "$URL"
popd
else
echo "MR data NOT downloaded. If you need it, rerun this script with the -h option to get help."