summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEdoardo Pasca <edo.paskino@gmail.com>2019-03-14 11:49:32 +0000
committerEdoardo Pasca <edo.paskino@gmail.com>2019-03-14 11:49:32 +0000
commitc9748b96e531a64c4e56909ab19a0b82fc01eb45 (patch)
treeb87d39e1885768b92ecf908c567e4d89c6c62a99
parent2dabf103a7d56de9afcf252dab881b097a9cd259 (diff)
parent9080a553104e466da8d38024df141a298408677a (diff)
downloadframework-c9748b96e531a64c4e56909ab19a0b82fc01eb45.tar.gz
framework-c9748b96e531a64c4e56909ab19a0b82fc01eb45.tar.bz2
framework-c9748b96e531a64c4e56909ab19a0b82fc01eb45.tar.xz
framework-c9748b96e531a64c4e56909ab19a0b82fc01eb45.zip
Merge branch 'composite_operator_datacontainer' into block_function
-rwxr-xr-xWrappers/Python/ccpi/framework/BlockDataContainer.py1
-rwxr-xr-xWrappers/Python/ccpi/framework/framework.py12
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/Algorithm.py7
-rwxr-xr-xWrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py12
-rwxr-xr-xWrappers/Python/ccpi/optimisation/funcs.py45
-rwxr-xr-xWrappers/Python/ccpi/optimisation/operators/BlockOperator.py2
-rwxr-xr-xWrappers/Python/ccpi/optimisation/ops.py11
-rwxr-xr-xWrappers/Python/ccpi/processors.py1026
-rw-r--r--Wrappers/Python/wip/CGLS_tikhonov.py25
-rw-r--r--Wrappers/Python/wip/CreatePhantom.py242
10 files changed, 827 insertions, 556 deletions
diff --git a/Wrappers/Python/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/ccpi/framework/BlockDataContainer.py
index d509d25..b9f5c5f 100755
--- a/Wrappers/Python/ccpi/framework/BlockDataContainer.py
+++ b/Wrappers/Python/ccpi/framework/BlockDataContainer.py
@@ -96,7 +96,6 @@ class BlockDataContainer(object):
shape=self.shape)
def multiply(self, other, *args, **kwargs):
- print ("BlockDataContainer" , other)
self.is_compatible(other)
out = kwargs.get('out', None)
if isinstance(other, Number):
diff --git a/Wrappers/Python/ccpi/framework/framework.py b/Wrappers/Python/ccpi/framework/framework.py
index 1413e21..029a80d 100755
--- a/Wrappers/Python/ccpi/framework/framework.py
+++ b/Wrappers/Python/ccpi/framework/framework.py
@@ -54,7 +54,8 @@ class ImageGeometry(object):
center_x=0,
center_y=0,
center_z=0,
- channels=1):
+ channels=1,
+ dimension_labels=None):
self.voxel_num_x = voxel_num_x
self.voxel_num_y = voxel_num_y
@@ -66,6 +67,7 @@ class ImageGeometry(object):
self.center_y = center_y
self.center_z = center_z
self.channels = channels
+ self.dimension_labels = dimension_labels
def get_min_x(self):
return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x
@@ -113,6 +115,8 @@ class ImageGeometry(object):
return repres
def allocate(self, value=0, dimension_labels=None):
'''allocates an ImageData according to the size expressed in the instance'''
+ if dimension_labels is None:
+ dimension_labels = self.dimension_labels
out = ImageData(geometry=self, dimension_labels=dimension_labels)
if value != 0:
out += value
@@ -130,7 +134,8 @@ class AcquisitionGeometry(object):
dist_source_center=None,
dist_center_detector=None,
channels=1,
- angle_unit='degree'
+ angle_unit='degree',
+ dimension_labels=None
):
"""
General inputs for standard type projection geometries
@@ -171,6 +176,7 @@ class AcquisitionGeometry(object):
self.pixel_size_v = pixel_size_v
self.channels = channels
+ self.dimension_labels = dimension_labels
def clone(self):
'''returns a copy of the AcquisitionGeometry'''
@@ -198,6 +204,8 @@ class AcquisitionGeometry(object):
return repres
def allocate(self, value=0, dimension_labels=None):
'''allocates an AcquisitionData according to the size expressed in the instance'''
+ if dimension_labels is None:
+ dimension_labels = self.dimension_labels
out = AcquisitionData(geometry=self, dimension_labels=dimension_labels)
if value != 0:
out += value
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
index 680b268..cc99344 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/Algorithm.py
@@ -140,7 +140,7 @@ class Algorithm(object):
raise ValueError('Update objective interval must be an integer >= 1')
else:
raise ValueError('Update objective interval must be an integer >= 1')
- def run(self, iterations, verbose=False, callback=None):
+ def run(self, iterations, verbose=True, callback=None):
'''run n iterations and update the user with the callback if specified'''
if self.should_stop():
print ("Stop cryterion has been reached.")
@@ -149,8 +149,9 @@ class Algorithm(object):
if verbose:
print ("Iteration {}/{}, objective {}".format(self.iteration,
self.max_iteration, self.get_last_objective()) )
- if callback is not None:
- callback(self.iteration, self.get_last_objective())
+ else:
+ if callback is not None:
+ callback(self.iteration, self.get_last_objective())
i += 1
if i == iterations:
break
diff --git a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
index 7794b4d..f1e4132 100755
--- a/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
+++ b/Wrappers/Python/ccpi/optimisation/algorithms/GradientDescent.py
@@ -51,13 +51,17 @@ class GradientDescent(Algorithm):
def set_up(self, x_init, objective_function, rate):
'''initialisation of the algorithm'''
self.x = x_init.copy()
- if self.memopt:
- self.x_update = x_init.copy()
self.objective_function = objective_function
self.rate = rate
self.loss.append(objective_function(x_init))
self.iteration = 0
-
+ try:
+ self.memopt = self.objective_function.memopt
+ except AttributeError as ae:
+ self.memopt = False
+ if self.memopt:
+ self.x_update = x_init.copy()
+
def update(self):
'''Single iteration'''
if self.memopt:
@@ -65,7 +69,7 @@ class GradientDescent(Algorithm):
self.x_update *= -self.rate
self.x += self.x_update
else:
- self.x += -self.rate * self.objective_function.grad(self.x)
+ self.x += -self.rate * self.objective_function.gradient(self.x)
def update_objective(self):
self.loss.append(self.objective_function(self.x))
diff --git a/Wrappers/Python/ccpi/optimisation/funcs.py b/Wrappers/Python/ccpi/optimisation/funcs.py
index 99af275..4f84889 100755
--- a/Wrappers/Python/ccpi/optimisation/funcs.py
+++ b/Wrappers/Python/ccpi/optimisation/funcs.py
@@ -20,6 +20,7 @@
from ccpi.optimisation.ops import Identity, FiniteDiff2D
import numpy
from ccpi.framework import DataContainer
+import warnings
def isSizeCorrect(data1 ,data2):
@@ -40,8 +41,12 @@ class Function(object):
def __init__(self):
self.L = None
def __call__(self,x, out=None): raise NotImplementedError
- def grad(self, x): raise NotImplementedError
- def prox(self, x, tau): raise NotImplementedError
+ def grad(self, x):
+ warnings.warn("grad method is deprecated. use gradient instead", DeprecationWarning)
+ return self.gradient(x, out=None)
+ def prox(self, x, tau):
+ warnings.warn("prox method is deprecated. use proximal instead", DeprecationWarning)
+ return self.proximal(x,tau,out=None)
def gradient(self, x, out=None): raise NotImplementedError
def proximal(self, x, tau, out=None): raise NotImplementedError
@@ -141,12 +146,20 @@ class Norm2sq(Function):
self.A = A # Should be an operator, default identity
self.b = b # Default zero DataSet?
self.c = c # Default 1.
- self.memopt = memopt
if memopt:
- #self.direct_placehold = A.adjoint(b)
- self.direct_placehold = A.allocate_direct()
- self.adjoint_placehold = A.allocate_adjoint()
-
+ try:
+ self.adjoint_placehold = A.range_geometry().allocate()
+ self.direct_placehold = A.domain_geometry().allocate()
+ self.memopt = True
+ except NameError as ne:
+ warnings.warn(str(ne))
+ self.memopt = False
+ except NotImplementedError as nie:
+ print (nie)
+ warnings.warn(str(nie))
+ self.memopt = False
+ else:
+ self.memopt = False
# Compute the Lipschitz parameter from the operator if possible
# Leave it initialised to None otherwise
@@ -157,10 +170,9 @@ class Norm2sq(Function):
except NotImplementedError as noe:
pass
- def grad(self,x):
- #return 2*self.c*self.A.adjoint( self.A.direct(x) - self.b )
- return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
-
+ #def grad(self,x):
+ # return self.gradient(x, out=None)
+
def __call__(self,x):
#return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel()))
#if out is None:
@@ -178,12 +190,13 @@ class Norm2sq(Function):
self.A.direct(x, out=self.adjoint_placehold)
self.adjoint_placehold.__isub__( self.b )
self.A.adjoint(self.adjoint_placehold, out=self.direct_placehold)
- self.direct_placehold.__imul__(2.0 * self.c)
- # can this be avoided?
- out.fill(self.direct_placehold)
+ #self.direct_placehold.__imul__(2.0 * self.c)
+ ## can this be avoided?
+ #out.fill(self.direct_placehold)
+ self.direct_placehold.multiply(2.0*self.c, out=out)
else:
- return self.grad(x)
-
+ return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
+
class ZeroFun(Function):
diff --git a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
index 8298c03..21ea104 100755
--- a/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
+++ b/Wrappers/Python/ccpi/optimisation/operators/BlockOperator.py
@@ -91,7 +91,7 @@ class BlockOperator(Operator):
Raises: ValueError if the contained Operators are not linear
'''
- if not functools.reduce(lambda x,y: x and y, self.operators.is_linear(), True):
+ if not functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True):
raise ValueError('Not all operators in Block are linear.')
shape = self.get_output_shape(x.shape, adjoint=True)
res = []
diff --git a/Wrappers/Python/ccpi/optimisation/ops.py b/Wrappers/Python/ccpi/optimisation/ops.py
index e9e7f44..6afb97a 100755
--- a/Wrappers/Python/ccpi/optimisation/ops.py
+++ b/Wrappers/Python/ccpi/optimisation/ops.py
@@ -49,9 +49,9 @@ class Operator(object):
def allocate_adjoint(self):
'''Allocates memory on the X space'''
raise NotImplementedError
- def range_dim(self):
+ def range_geometry(self):
raise NotImplementedError
- def domain_dim(self):
+ def domain_geometry(self):
raise NotImplementedError
def __rmul__(self, other):
'''reverse multiplication of Operator with number sets the variable scalar in the Operator'''
@@ -97,7 +97,8 @@ class TomoIdentity(Operator):
self.s1 = 1.0
self.geometry = geometry
-
+ def is_linear(self):
+ return True
def direct(self,x,out=None):
if out is None:
@@ -128,6 +129,10 @@ class TomoIdentity(Operator):
raise ValueError("Wrong geometry type: expected ImageGeometry of AcquisitionGeometry, got ", type(self.geometry))
def allocate_adjoint(self):
return self.allocate_direct()
+ def range_geometry(self):
+ return self.geometry
+ def domain_geometry(self):
+ return self.geometry
diff --git a/Wrappers/Python/ccpi/processors.py b/Wrappers/Python/ccpi/processors.py
index 3a3671a..ccef410 100755
--- a/Wrappers/Python/ccpi/processors.py
+++ b/Wrappers/Python/ccpi/processors.py
@@ -1,514 +1,514 @@
-# -*- coding: utf-8 -*-
-# This work is part of the Core Imaging Library developed by
-# Visual Analytics and Imaging System Group of the Science Technology
-# Facilities Council, STFC
-
-# Copyright 2018 Edoardo Pasca
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\
- AcquisitionGeometry, ImageGeometry, ImageData
-from ccpi.reconstruction.parallelbeam import alg as pbalg
-import numpy
-from scipy import ndimage
-
-import matplotlib.pyplot as plt
-
-
-class Normalizer(DataProcessor):
- '''Normalization based on flat and dark
-
- This processor read in a AcquisitionData and normalises it based on
- the instrument reading with and without incident photons or neutrons.
-
- Input: AcquisitionData
- Parameter: 2D projection with flat field (or stack)
- 2D projection with dark field (or stack)
- Output: AcquisitionDataSetn
- '''
-
- def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5):
- kwargs = {
- 'flat_field' : flat_field,
- 'dark_field' : dark_field,
- # very small number. Used when there is a division by zero
- 'tolerance' : tolerance
- }
-
- #DataProcessor.__init__(self, **kwargs)
- super(Normalizer, self).__init__(**kwargs)
- if not flat_field is None:
- self.set_flat_field(flat_field)
- if not dark_field is None:
- self.set_dark_field(dark_field)
-
- def check_input(self, dataset):
- if dataset.number_of_dimensions == 3 or\
- dataset.number_of_dimensions == 2:
- return True
- else:
- raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
- .format(dataset.number_of_dimensions))
-
- def set_dark_field(self, df):
- if type(df) is numpy.ndarray:
- if len(numpy.shape(df)) == 3:
- raise ValueError('Dark Field should be 2D')
- elif len(numpy.shape(df)) == 2:
- self.dark_field = df
- elif issubclass(type(df), DataContainer):
- self.dark_field = self.set_dark_field(df.as_array())
-
- def set_flat_field(self, df):
- if type(df) is numpy.ndarray:
- if len(numpy.shape(df)) == 3:
- raise ValueError('Flat Field should be 2D')
- elif len(numpy.shape(df)) == 2:
- self.flat_field = df
- elif issubclass(type(df), DataContainer):
- self.flat_field = self.set_flat_field(df.as_array())
-
- @staticmethod
- def normalize_projection(projection, flat, dark, tolerance):
- a = (projection - dark)
- b = (flat-dark)
- with numpy.errstate(divide='ignore', invalid='ignore'):
- c = numpy.true_divide( a, b )
- c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0
- return c
-
- @staticmethod
- def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark):
- '''returns the estimated relative error of the normalised projection
-
- n = (projection - dark) / (flat - dark)
- Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d)
- '''
- a = (projection - dark)
- b = (flat-dark)
- df = delta_flat / flat
- dd = delta_dark / dark
- rel_norm_error = (b + a) / (b * a) * (df + dd)
- return rel_norm_error
-
- def process(self, out=None):
-
- projections = self.get_input()
- dark = self.dark_field
- flat = self.flat_field
-
- if projections.number_of_dimensions == 3:
- if not (projections.shape[1:] == dark.shape and \
- projections.shape[1:] == flat.shape):
- raise ValueError('Flats/Dark and projections size do not match.')
-
-
- a = numpy.asarray(
- [ Normalizer.normalize_projection(
- projection, flat, dark, self.tolerance) \
- for projection in projections.as_array() ]
- )
- elif projections.number_of_dimensions == 2:
- a = Normalizer.normalize_projection(projections.as_array(),
- flat, dark, self.tolerance)
- y = type(projections)( a , True,
- dimension_labels=projections.dimension_labels,
- geometry=projections.geometry)
- return y
-
-
-class CenterOfRotationFinder(DataProcessor):
- '''Processor to find the center of rotation in a parallel beam experiment
-
- This processor read in a AcquisitionDataSet and finds the center of rotation
- based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078
-
- Input: AcquisitionDataSet
-
- Output: float. center of rotation in pixel coordinate
- '''
-
- def __init__(self):
- kwargs = {
-
- }
-
- #DataProcessor.__init__(self, **kwargs)
- super(CenterOfRotationFinder, self).__init__(**kwargs)
-
- def check_input(self, dataset):
- if dataset.number_of_dimensions == 3:
- if dataset.geometry.geom_type == 'parallel':
- return True
- else:
- raise ValueError('{0} is suitable only for parallel beam geometry'\
- .format(self.__class__.__name__))
- else:
- raise ValueError("Expected input dimensions is 3, got {0}"\
- .format(dataset.number_of_dimensions))
-
-
- # #########################################################################
- # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #
- # #
- # Copyright 2015. UChicago Argonne, LLC. This software was produced #
- # under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
- # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
- # U.S. Department of Energy. The U.S. Government has rights to use, #
- # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
- # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
- # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
- # modified to produce derivative works, such modified software should #
- # be clearly marked, so as not to confuse it with the version available #
- # from ANL. #
- # #
- # Additionally, redistribution and use in source and binary forms, with #
- # or without modification, are permitted provided that the following #
- # conditions are met: #
- # #
- # * Redistributions of source code must retain the above copyright #
- # notice, this list of conditions and the following disclaimer. #
- # #
- # * Redistributions in binary form must reproduce the above copyright #
- # notice, this list of conditions and the following disclaimer in #
- # the documentation and/or other materials provided with the #
- # distribution. #
- # #
- # * Neither the name of UChicago Argonne, LLC, Argonne National #
- # Laboratory, ANL, the U.S. Government, nor the names of its #
- # contributors may be used to endorse or promote products derived #
- # from this software without specific prior written permission. #
- # #
- # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
- # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
- # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
- # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
- # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
- # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
- # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
- # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
- # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
- # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
- # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
- # POSSIBILITY OF SUCH DAMAGE. #
- # #########################################################################
-
- @staticmethod
- def as_ndarray(arr, dtype=None, copy=False):
- if not isinstance(arr, numpy.ndarray):
- arr = numpy.array(arr, dtype=dtype, copy=copy)
- return arr
-
- @staticmethod
- def as_dtype(arr, dtype, copy=False):
- if not arr.dtype == dtype:
- arr = numpy.array(arr, dtype=dtype, copy=copy)
- return arr
-
- @staticmethod
- def as_float32(arr):
- arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32)
- return CenterOfRotationFinder.as_dtype(arr, numpy.float32)
-
-
-
-
- @staticmethod
- def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5,
- ratio=2., drop=20):
- """
- Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`.
-
- Parameters
- ----------
- tomo : ndarray
- 3D tomographic data.
- ind : int, optional
- Index of the slice to be used for reconstruction.
- smin, smax : int, optional
- Reference to the horizontal center of the sinogram.
- srad : float, optional
- Fine search radius.
- step : float, optional
- Step of fine searching.
- ratio : float, optional
- The ratio between the FOV of the camera and the size of object.
- It's used to generate the mask.
- drop : int, optional
- Drop lines around vertical center of the mask.
-
- Returns
- -------
- float
- Rotation axis location.
-
- Notes
- -----
- The function may not yield a correct estimate, if:
-
- - the sample size is bigger than the field of view of the camera.
- In this case the ``ratio`` argument need to be set larger
- than the default of 2.0.
-
- - there is distortion in the imaging hardware. If there's
- no correction applied, the center of the projection image may
- yield a better estimate.
-
- - the sample contrast is weak. Paganin's filter need to be applied
- to overcome this.
-
- - the sample was changed during the scan.
- """
- tomo = CenterOfRotationFinder.as_float32(tomo)
-
- if ind is None:
- ind = tomo.shape[1] // 2
- _tomo = tomo[:, ind, :]
-
-
-
- # Reduce noise by smooth filters. Use different filters for coarse and fine search
- _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1))
- _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2))
-
- # Coarse and fine searches for finding the rotation center.
- if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k)
- #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :]
- #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop)
- #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop)
- init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin,
- smax, ratio, drop)
- fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,
- step, init_cen,
- ratio, drop)
- else:
- init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs,
- smin, smax,
- ratio, drop)
- fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,
- step, init_cen,
- ratio, drop)
-
- #logger.debug('Rotation center search finished: %i', fine_cen)
- return fine_cen
-
-
- @staticmethod
- def _search_coarse(sino, smin, smax, ratio, drop):
- """
- Coarse search for finding the rotation center.
- """
- (Nrow, Ncol) = sino.shape
- centerfliplr = (Ncol - 1.0) / 2.0
-
- # Copy the sinogram and flip left right, the purpose is to
- # make a full [0;2Pi] sinogram
- _copy_sino = numpy.fliplr(sino[1:])
-
- # This image is used for compensating the shift of sinogram 2
- temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32')
- temp_img[:] = sino[-1]
-
- # Start coarse search in which the shift step is 1
- listshift = numpy.arange(smin, smax + 1)
- listmetric = numpy.zeros(len(listshift), dtype='float32')
- mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol,
- 0.5 * ratio * Ncol, drop)
- for i in listshift:
- _sino = numpy.roll(_copy_sino, i, axis=1)
- if i >= 0:
- _sino[:, 0:i] = temp_img[:, 0:i]
- else:
- _sino[:, i:] = temp_img[:, i:]
- listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift(
- #pyfftw.interfaces.numpy_fft.fft2(
- # numpy.vstack((sino, _sino)))
- numpy.fft.fft2(numpy.vstack((sino, _sino)))
- )) * mask)
- minpos = numpy.argmin(listmetric)
- return centerfliplr + listshift[minpos] / 2.0
-
- @staticmethod
- def _search_fine(sino, srad, step, init_cen, ratio, drop):
- """
- Fine search for finding the rotation center.
- """
- Nrow, Ncol = sino.shape
- centerfliplr = (Ncol + 1.0) / 2.0 - 1.0
- # Use to shift the sinogram 2 to the raw CoR.
- shiftsino = numpy.int16(2 * (init_cen - centerfliplr))
- _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1)
- if init_cen <= centerfliplr:
- lefttake = numpy.int16(numpy.ceil(srad + 1))
- righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1))
- else:
- lefttake = numpy.int16(numpy.ceil(
- init_cen - (Ncol - 1 - init_cen) + srad + 1))
- righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1))
- Ncol1 = righttake - lefttake + 1
- mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1,
- 0.5 * ratio * Ncol, drop)
- numshift = numpy.int16((2 * srad) / step) + 1
- listshift = numpy.linspace(-srad, srad, num=numshift)
- listmetric = numpy.zeros(len(listshift), dtype='float32')
- factor1 = numpy.mean(sino[-1, lefttake:righttake])
- num1 = 0
- for i in listshift:
- _sino = ndimage.interpolation.shift(
- _copy_sino, (0, i), prefilter=False)
- factor2 = numpy.mean(_sino[0,lefttake:righttake])
- _sino = _sino * factor1 / factor2
- sinojoin = numpy.vstack((sino, _sino))
- listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift(
- #pyfftw.interfaces.numpy_fft.fft2(
- # sinojoin[:, lefttake:righttake + 1])
- numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1])
- )) * mask)
- num1 = num1 + 1
- minpos = numpy.argmin(listmetric)
- return init_cen + listshift[minpos] / 2.0
-
- @staticmethod
- def _create_mask(nrow, ncol, radius, drop):
- du = 1.0 / ncol
- dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi)
- centerrow = numpy.ceil(nrow / 2) - 1
- centercol = numpy.ceil(ncol / 2) - 1
- # added by Edoardo Pasca
- centerrow = int(centerrow)
- centercol = int(centercol)
- mask = numpy.zeros((nrow, ncol), dtype='float32')
- for i in range(nrow):
- num1 = numpy.round(((i - centerrow) * dv / radius) / du)
- (p1, p2) = numpy.int16(numpy.clip(numpy.sort(
- (-num1 + centercol, num1 + centercol)), 0, ncol - 1))
- mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32')
- if drop < centerrow:
- mask[centerrow - drop:centerrow + drop + 1,
- :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32')
- mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32')
- return mask
-
- def process(self, out=None):
-
- projections = self.get_input()
-
- cor = CenterOfRotationFinder.find_center_vo(projections.as_array())
-
- return cor
-
-
-class AcquisitionDataPadder(DataProcessor):
- '''Normalization based on flat and dark
-
- This processor read in a AcquisitionData and normalises it based on
- the instrument reading with and without incident photons or neutrons.
-
- Input: AcquisitionData
- Parameter: 2D projection with flat field (or stack)
- 2D projection with dark field (or stack)
- Output: AcquisitionDataSetn
- '''
-
- def __init__(self,
- center_of_rotation = None,
- acquisition_geometry = None,
- pad_value = 1e-5):
- kwargs = {
- 'acquisition_geometry' : acquisition_geometry,
- 'center_of_rotation' : center_of_rotation,
- 'pad_value' : pad_value
- }
-
- super(AcquisitionDataPadder, self).__init__(**kwargs)
-
- def check_input(self, dataset):
- if self.acquisition_geometry is None:
- self.acquisition_geometry = dataset.geometry
- if dataset.number_of_dimensions == 3:
- return True
- else:
- raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
- .format(dataset.number_of_dimensions))
-
- def process(self, out=None):
- projections = self.get_input()
- w = projections.get_dimension_size('horizontal')
- delta = w - 2 * self.center_of_rotation
-
- padded_width = int (
- numpy.ceil(abs(delta)) + w
- )
- delta_pix = padded_width - w
-
- voxel_per_pixel = 1
- geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(),
- self.acquisition_geometry.angles,
- self.center_of_rotation,
- voxel_per_pixel )
-
- padded_geometry = self.acquisition_geometry.clone()
-
- padded_geometry.pixel_num_h = geom['n_h']
- padded_geometry.pixel_num_v = geom['n_v']
-
- delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h
- delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v
-
- if delta_pix_h == 0:
- delta_pix_h = delta_pix
- padded_geometry.pixel_num_h = padded_width
- #initialize a new AcquisitionData with values close to 0
- out = AcquisitionData(geometry=padded_geometry)
- out = out + self.pad_value
-
-
- #pad in the horizontal-vertical plane -> slice on angles
- if delta > 0:
- #pad left of middle
- command = "out.array["
- for i in range(out.number_of_dimensions):
- if out.dimension_labels[i] == 'horizontal':
- value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w)
- command = command + str(value)
- else:
- if out.dimension_labels[i] == 'vertical' :
- value = '{0}:'.format(delta_pix_v)
- command = command + str(value)
- else:
- command = command + ":"
- if i < out.number_of_dimensions -1:
- command = command + ','
- command = command + '] = projections.array'
- #print (command)
- else:
- #pad right of middle
- command = "out.array["
- for i in range(out.number_of_dimensions):
- if out.dimension_labels[i] == 'horizontal':
- value = '{0}:{1}'.format(0, w)
- command = command + str(value)
- else:
- if out.dimension_labels[i] == 'vertical' :
- value = '{0}:'.format(delta_pix_v)
- command = command + str(value)
- else:
- command = command + ":"
- if i < out.number_of_dimensions -1:
- command = command + ','
- command = command + '] = projections.array'
- #print (command)
- #cleaned = eval(command)
- exec(command)
+# -*- coding: utf-8 -*-
+# This work is part of the Core Imaging Library developed by
+# Visual Analytics and Imaging System Group of the Science Technology
+# Facilities Council, STFC
+
+# Copyright 2018 Edoardo Pasca
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+# http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\
+ AcquisitionGeometry, ImageGeometry, ImageData
+from ccpi.reconstruction.parallelbeam import alg as pbalg
+import numpy
+from scipy import ndimage
+
+import matplotlib.pyplot as plt
+
+
+class Normalizer(DataProcessor):
+ '''Normalization based on flat and dark
+
+ This processor read in a AcquisitionData and normalises it based on
+ the instrument reading with and without incident photons or neutrons.
+
+ Input: AcquisitionData
+ Parameter: 2D projection with flat field (or stack)
+ 2D projection with dark field (or stack)
+ Output: AcquisitionDataSetn
+ '''
+
+ def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5):
+ kwargs = {
+ 'flat_field' : flat_field,
+ 'dark_field' : dark_field,
+ # very small number. Used when there is a division by zero
+ 'tolerance' : tolerance
+ }
+
+ #DataProcessor.__init__(self, **kwargs)
+ super(Normalizer, self).__init__(**kwargs)
+ if not flat_field is None:
+ self.set_flat_field(flat_field)
+ if not dark_field is None:
+ self.set_dark_field(dark_field)
+
+ def check_input(self, dataset):
+ if dataset.number_of_dimensions == 3 or\
+ dataset.number_of_dimensions == 2:
+ return True
+ else:
+ raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
+ .format(dataset.number_of_dimensions))
+
+ def set_dark_field(self, df):
+ if type(df) is numpy.ndarray:
+ if len(numpy.shape(df)) == 3:
+ raise ValueError('Dark Field should be 2D')
+ elif len(numpy.shape(df)) == 2:
+ self.dark_field = df
+ elif issubclass(type(df), DataContainer):
+ self.dark_field = self.set_dark_field(df.as_array())
+
+ def set_flat_field(self, df):
+ if type(df) is numpy.ndarray:
+ if len(numpy.shape(df)) == 3:
+ raise ValueError('Flat Field should be 2D')
+ elif len(numpy.shape(df)) == 2:
+ self.flat_field = df
+ elif issubclass(type(df), DataContainer):
+ self.flat_field = self.set_flat_field(df.as_array())
+
+ @staticmethod
+ def normalize_projection(projection, flat, dark, tolerance):
+ a = (projection - dark)
+ b = (flat-dark)
+ with numpy.errstate(divide='ignore', invalid='ignore'):
+ c = numpy.true_divide( a, b )
+ c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0
+ return c
+
+ @staticmethod
+ def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark):
+ '''returns the estimated relative error of the normalised projection
+
+ n = (projection - dark) / (flat - dark)
+ Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d)
+ '''
+ a = (projection - dark)
+ b = (flat-dark)
+ df = delta_flat / flat
+ dd = delta_dark / dark
+ rel_norm_error = (b + a) / (b * a) * (df + dd)
+ return rel_norm_error
+
+ def process(self, out=None):
+
+ projections = self.get_input()
+ dark = self.dark_field
+ flat = self.flat_field
+
+ if projections.number_of_dimensions == 3:
+ if not (projections.shape[1:] == dark.shape and \
+ projections.shape[1:] == flat.shape):
+ raise ValueError('Flats/Dark and projections size do not match.')
+
+
+ a = numpy.asarray(
+ [ Normalizer.normalize_projection(
+ projection, flat, dark, self.tolerance) \
+ for projection in projections.as_array() ]
+ )
+ elif projections.number_of_dimensions == 2:
+ a = Normalizer.normalize_projection(projections.as_array(),
+ flat, dark, self.tolerance)
+ y = type(projections)( a , True,
+ dimension_labels=projections.dimension_labels,
+ geometry=projections.geometry)
+ return y
+
+
+class CenterOfRotationFinder(DataProcessor):
+ '''Processor to find the center of rotation in a parallel beam experiment
+
+ This processor read in a AcquisitionDataSet and finds the center of rotation
+ based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078
+
+ Input: AcquisitionDataSet
+
+ Output: float. center of rotation in pixel coordinate
+ '''
+
+ def __init__(self):
+ kwargs = {
+
+ }
+
+ #DataProcessor.__init__(self, **kwargs)
+ super(CenterOfRotationFinder, self).__init__(**kwargs)
+
+ def check_input(self, dataset):
+ if dataset.number_of_dimensions == 3:
+ if dataset.geometry.geom_type == 'parallel':
+ return True
+ else:
+ raise ValueError('{0} is suitable only for parallel beam geometry'\
+ .format(self.__class__.__name__))
+ else:
+ raise ValueError("Expected input dimensions is 3, got {0}"\
+ .format(dataset.number_of_dimensions))
+
+
+ # #########################################################################
+ # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved. #
+ # #
+ # Copyright 2015. UChicago Argonne, LLC. This software was produced #
+ # under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
+ # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
+ # U.S. Department of Energy. The U.S. Government has rights to use, #
+ # reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
+ # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
+ # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
+ # modified to produce derivative works, such modified software should #
+ # be clearly marked, so as not to confuse it with the version available #
+ # from ANL. #
+ # #
+ # Additionally, redistribution and use in source and binary forms, with #
+ # or without modification, are permitted provided that the following #
+ # conditions are met: #
+ # #
+ # * Redistributions of source code must retain the above copyright #
+ # notice, this list of conditions and the following disclaimer. #
+ # #
+ # * Redistributions in binary form must reproduce the above copyright #
+ # notice, this list of conditions and the following disclaimer in #
+ # the documentation and/or other materials provided with the #
+ # distribution. #
+ # #
+ # * Neither the name of UChicago Argonne, LLC, Argonne National #
+ # Laboratory, ANL, the U.S. Government, nor the names of its #
+ # contributors may be used to endorse or promote products derived #
+ # from this software without specific prior written permission. #
+ # #
+ # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
+ # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
+ # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
+ # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
+ # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
+ # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
+ # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
+ # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
+ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
+ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
+ # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
+ # POSSIBILITY OF SUCH DAMAGE. #
+ # #########################################################################
+
+ @staticmethod
+ def as_ndarray(arr, dtype=None, copy=False):
+ if not isinstance(arr, numpy.ndarray):
+ arr = numpy.array(arr, dtype=dtype, copy=copy)
+ return arr
+
+ @staticmethod
+ def as_dtype(arr, dtype, copy=False):
+ if not arr.dtype == dtype:
+ arr = numpy.array(arr, dtype=dtype, copy=copy)
+ return arr
+
+ @staticmethod
+ def as_float32(arr):
+ arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32)
+ return CenterOfRotationFinder.as_dtype(arr, numpy.float32)
+
+
+
+
+ @staticmethod
+ def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5,
+ ratio=2., drop=20):
+ """
+ Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`.
+
+ Parameters
+ ----------
+ tomo : ndarray
+ 3D tomographic data.
+ ind : int, optional
+ Index of the slice to be used for reconstruction.
+ smin, smax : int, optional
+ Reference to the horizontal center of the sinogram.
+ srad : float, optional
+ Fine search radius.
+ step : float, optional
+ Step of fine searching.
+ ratio : float, optional
+ The ratio between the FOV of the camera and the size of object.
+ It's used to generate the mask.
+ drop : int, optional
+ Drop lines around vertical center of the mask.
+
+ Returns
+ -------
+ float
+ Rotation axis location.
+
+ Notes
+ -----
+ The function may not yield a correct estimate, if:
+
+ - the sample size is bigger than the field of view of the camera.
+ In this case the ``ratio`` argument need to be set larger
+ than the default of 2.0.
+
+ - there is distortion in the imaging hardware. If there's
+ no correction applied, the center of the projection image may
+ yield a better estimate.
+
+ - the sample contrast is weak. Paganin's filter need to be applied
+ to overcome this.
+
+ - the sample was changed during the scan.
+ """
+ tomo = CenterOfRotationFinder.as_float32(tomo)
+
+ if ind is None:
+ ind = tomo.shape[1] // 2
+ _tomo = tomo[:, ind, :]
+
+
+
+ # Reduce noise by smooth filters. Use different filters for coarse and fine search
+ _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1))
+ _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2))
+
+ # Coarse and fine searches for finding the rotation center.
+ if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k)
+ #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :]
+ #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop)
+ #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop)
+ init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin,
+ smax, ratio, drop)
+ fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,
+ step, init_cen,
+ ratio, drop)
+ else:
+ init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs,
+ smin, smax,
+ ratio, drop)
+ fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,
+ step, init_cen,
+ ratio, drop)
+
+ #logger.debug('Rotation center search finished: %i', fine_cen)
+ return fine_cen
+
+
+ @staticmethod
+ def _search_coarse(sino, smin, smax, ratio, drop):
+ """
+ Coarse search for finding the rotation center.
+ """
+ (Nrow, Ncol) = sino.shape
+ centerfliplr = (Ncol - 1.0) / 2.0
+
+ # Copy the sinogram and flip left right, the purpose is to
+ # make a full [0;2Pi] sinogram
+ _copy_sino = numpy.fliplr(sino[1:])
+
+ # This image is used for compensating the shift of sinogram 2
+ temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32')
+ temp_img[:] = sino[-1]
+
+ # Start coarse search in which the shift step is 1
+ listshift = numpy.arange(smin, smax + 1)
+ listmetric = numpy.zeros(len(listshift), dtype='float32')
+ mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol,
+ 0.5 * ratio * Ncol, drop)
+ for i in listshift:
+ _sino = numpy.roll(_copy_sino, i, axis=1)
+ if i >= 0:
+ _sino[:, 0:i] = temp_img[:, 0:i]
+ else:
+ _sino[:, i:] = temp_img[:, i:]
+ listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift(
+ #pyfftw.interfaces.numpy_fft.fft2(
+ # numpy.vstack((sino, _sino)))
+ numpy.fft.fft2(numpy.vstack((sino, _sino)))
+ )) * mask)
+ minpos = numpy.argmin(listmetric)
+ return centerfliplr + listshift[minpos] / 2.0
+
+ @staticmethod
+ def _search_fine(sino, srad, step, init_cen, ratio, drop):
+ """
+ Fine search for finding the rotation center.
+ """
+ Nrow, Ncol = sino.shape
+ centerfliplr = (Ncol + 1.0) / 2.0 - 1.0
+ # Use to shift the sinogram 2 to the raw CoR.
+ shiftsino = numpy.int16(2 * (init_cen - centerfliplr))
+ _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1)
+ if init_cen <= centerfliplr:
+ lefttake = numpy.int16(numpy.ceil(srad + 1))
+ righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1))
+ else:
+ lefttake = numpy.int16(numpy.ceil(
+ init_cen - (Ncol - 1 - init_cen) + srad + 1))
+ righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1))
+ Ncol1 = righttake - lefttake + 1
+ mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1,
+ 0.5 * ratio * Ncol, drop)
+ numshift = numpy.int16((2 * srad) / step) + 1
+ listshift = numpy.linspace(-srad, srad, num=numshift)
+ listmetric = numpy.zeros(len(listshift), dtype='float32')
+ factor1 = numpy.mean(sino[-1, lefttake:righttake])
+ num1 = 0
+ for i in listshift:
+ _sino = ndimage.interpolation.shift(
+ _copy_sino, (0, i), prefilter=False)
+ factor2 = numpy.mean(_sino[0,lefttake:righttake])
+ _sino = _sino * factor1 / factor2
+ sinojoin = numpy.vstack((sino, _sino))
+ listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift(
+ #pyfftw.interfaces.numpy_fft.fft2(
+ # sinojoin[:, lefttake:righttake + 1])
+ numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1])
+ )) * mask)
+ num1 = num1 + 1
+ minpos = numpy.argmin(listmetric)
+ return init_cen + listshift[minpos] / 2.0
+
+ @staticmethod
+ def _create_mask(nrow, ncol, radius, drop):
+ du = 1.0 / ncol
+ dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi)
+ centerrow = numpy.ceil(nrow / 2) - 1
+ centercol = numpy.ceil(ncol / 2) - 1
+ # added by Edoardo Pasca
+ centerrow = int(centerrow)
+ centercol = int(centercol)
+ mask = numpy.zeros((nrow, ncol), dtype='float32')
+ for i in range(nrow):
+ num1 = numpy.round(((i - centerrow) * dv / radius) / du)
+ (p1, p2) = numpy.int16(numpy.clip(numpy.sort(
+ (-num1 + centercol, num1 + centercol)), 0, ncol - 1))
+ mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32')
+ if drop < centerrow:
+ mask[centerrow - drop:centerrow + drop + 1,
+ :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32')
+ mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32')
+ return mask
+
+ def process(self, out=None):
+
+ projections = self.get_input()
+
+ cor = CenterOfRotationFinder.find_center_vo(projections.as_array())
+
+ return cor
+
+
+class AcquisitionDataPadder(DataProcessor):
+ '''Normalization based on flat and dark
+
+ This processor read in a AcquisitionData and normalises it based on
+ the instrument reading with and without incident photons or neutrons.
+
+ Input: AcquisitionData
+ Parameter: 2D projection with flat field (or stack)
+ 2D projection with dark field (or stack)
+ Output: AcquisitionDataSetn
+ '''
+
+ def __init__(self,
+ center_of_rotation = None,
+ acquisition_geometry = None,
+ pad_value = 1e-5):
+ kwargs = {
+ 'acquisition_geometry' : acquisition_geometry,
+ 'center_of_rotation' : center_of_rotation,
+ 'pad_value' : pad_value
+ }
+
+ super(AcquisitionDataPadder, self).__init__(**kwargs)
+
+ def check_input(self, dataset):
+ if self.acquisition_geometry is None:
+ self.acquisition_geometry = dataset.geometry
+ if dataset.number_of_dimensions == 3:
+ return True
+ else:
+ raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
+ .format(dataset.number_of_dimensions))
+
+ def process(self, out=None):
+ projections = self.get_input()
+ w = projections.get_dimension_size('horizontal')
+ delta = w - 2 * self.center_of_rotation
+
+ padded_width = int (
+ numpy.ceil(abs(delta)) + w
+ )
+ delta_pix = padded_width - w
+
+ voxel_per_pixel = 1
+ geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(),
+ self.acquisition_geometry.angles,
+ self.center_of_rotation,
+ voxel_per_pixel )
+
+ padded_geometry = self.acquisition_geometry.clone()
+
+ padded_geometry.pixel_num_h = geom['n_h']
+ padded_geometry.pixel_num_v = geom['n_v']
+
+ delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h
+ delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v
+
+ if delta_pix_h == 0:
+ delta_pix_h = delta_pix
+ padded_geometry.pixel_num_h = padded_width
+ #initialize a new AcquisitionData with values close to 0
+ out = AcquisitionData(geometry=padded_geometry)
+ out = out + self.pad_value
+
+
+ #pad in the horizontal-vertical plane -> slice on angles
+ if delta > 0:
+ #pad left of middle
+ command = "out.array["
+ for i in range(out.number_of_dimensions):
+ if out.dimension_labels[i] == 'horizontal':
+ value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w)
+ command = command + str(value)
+ else:
+ if out.dimension_labels[i] == 'vertical' :
+ value = '{0}:'.format(delta_pix_v)
+ command = command + str(value)
+ else:
+ command = command + ":"
+ if i < out.number_of_dimensions -1:
+ command = command + ','
+ command = command + '] = projections.array'
+ #print (command)
+ else:
+ #pad right of middle
+ command = "out.array["
+ for i in range(out.number_of_dimensions):
+ if out.dimension_labels[i] == 'horizontal':
+ value = '{0}:{1}'.format(0, w)
+ command = command + str(value)
+ else:
+ if out.dimension_labels[i] == 'vertical' :
+ value = '{0}:'.format(delta_pix_v)
+ command = command + str(value)
+ else:
+ command = command + ":"
+ if i < out.number_of_dimensions -1:
+ command = command + ','
+ command = command + '] = projections.array'
+ #print (command)
+ #cleaned = eval(command)
+ exec(command)
return out \ No newline at end of file
diff --git a/Wrappers/Python/wip/CGLS_tikhonov.py b/Wrappers/Python/wip/CGLS_tikhonov.py
index f247896..e9bbcd9 100644
--- a/Wrappers/Python/wip/CGLS_tikhonov.py
+++ b/Wrappers/Python/wip/CGLS_tikhonov.py
@@ -11,8 +11,7 @@ import matplotlib.pyplot as plt
import numpy
from ccpi.framework import BlockDataContainer
from ccpi.optimisation.operators import BlockOperator
-from ccpi.optimisation.operators.BlockOperator import BlockLinearOperator
-
+
# Set up phantom size N x N x vert by creating ImageGeometry, initialising the
# ImageData object with this geometry and empty array and finally put some
# data into its array, and display one slice as image.
@@ -128,26 +127,26 @@ simplef.L = 0.00003
gd = GradientDescent( x_init=x_init, objective_function=simplef,
rate=simplef.L)
-gd.max_iteration = 10
+gd.max_iteration = 50
Kbig.direct(X_init)
Kbig.adjoint(B)
cg = CGLS()
cg.set_up(X_init, Kbig, B )
-cg.max_iteration = 5
+cg.max_iteration = 10
cgsmall = CGLS()
cgsmall.set_up(X_init, Ksmall, B )
-cgsmall.max_iteration = 5
+cgsmall.max_iteration = 10
cgs = CGLS()
cgs.set_up(x_init, A, b )
-cgs.max_iteration = 6
+cgs.max_iteration = 10
cgok = CGLS()
cgok.set_up(X_init, Kok, B )
-cgok.max_iteration = 6
+cgok.max_iteration = 10
# #
#out.__isub__(B)
#out2 = K.adjoint(out)
@@ -176,22 +175,22 @@ cgok.run(10, verbose=True)
# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
# #
fig = plt.figure()
-plt.subplot(1,6,1)
+plt.subplot(2,3,1)
plt.imshow(Phantom.subset(vertical=0).as_array())
plt.title('Simulated Phantom')
-plt.subplot(1,6,2)
+plt.subplot(2,3,2)
plt.imshow(gd.get_output().subset(vertical=0).as_array())
plt.title('Simple Gradient Descent')
-plt.subplot(1,6,3)
+plt.subplot(2,3,3)
plt.imshow(cgs.get_output().subset(vertical=0).as_array())
plt.title('Simple CGLS')
-plt.subplot(1,6,4)
+plt.subplot(2,3,5)
plt.imshow(cg.get_output().get_item(0).subset(vertical=0).as_array())
plt.title('Composite CGLS\nbig lambda')
-plt.subplot(1,6,5)
+plt.subplot(2,3,6)
plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=0).as_array())
plt.title('Composite CGLS\nsmall lambda')
-plt.subplot(1,6,6)
+plt.subplot(2,3,4)
plt.imshow(cgok.get_output().get_item(0).subset(vertical=0).as_array())
plt.title('Composite CGLS\nok lambda')
plt.show()
diff --git a/Wrappers/Python/wip/CreatePhantom.py b/Wrappers/Python/wip/CreatePhantom.py
new file mode 100644
index 0000000..4bf6ea4
--- /dev/null
+++ b/Wrappers/Python/wip/CreatePhantom.py
@@ -0,0 +1,242 @@
+import numpy
+import tomophantom
+from tomophantom import TomoP3D
+from tomophantom.supp.artifacts import ArtifactsClass as Artifact
+import os
+
+import matplotlib.pyplot as plt
+
+from ccpi.optimisation.algorithms import CGLS
+from ccpi.plugins.ops import CCPiProjectorSimple
+from ccpi.optimisation.ops import PowerMethodNonsquare
+from ccpi.optimisation.ops import TomoIdentity
+from ccpi.optimisation.funcs import Norm2sq, Norm1
+from ccpi.framework import ImageGeometry, AcquisitionGeometry, ImageData, AcquisitionData
+from ccpi.optimisation.algorithms import GradientDescent
+from ccpi.framework import BlockDataContainer
+from ccpi.optimisation.operators import BlockOperator
+
+
+model = 13 # select a model number from tomophantom library
+N_size = 64 # Define phantom dimensions using a scalar value (cubic phantom)
+path = os.path.dirname(tomophantom.__file__)
+path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
+
+#This will generate a N_size x N_size x N_size phantom (3D)
+phantom_tm = TomoP3D.Model(model, N_size, path_library3D)
+
+# detector column count (horizontal)
+detector_horiz = int(numpy.sqrt(2)*N_size)
+# detector row count (vertical) (no reason for it to be > N)
+detector_vert = N_size
+# number of projection angles
+angles_num = int(0.5*numpy.pi*N_size)
+# angles are expressed in degrees
+angles = numpy.linspace(0.0, 179.9, angles_num, dtype='float32')
+
+
+acquisition_data_array = TomoP3D.ModelSino(model, N_size,
+ detector_horiz, detector_vert,
+ angles,
+ path_library3D)
+
+tomophantom_acquisition_axes_order = ['vertical', 'angle', 'horizontal']
+
+artifacts = Artifact(acquisition_data_array)
+
+
+tp_acq_data = AcquisitionData(artifacts.noise(0.2, 'Gaussian'),
+ dimension_labels=tomophantom_acquisition_axes_order)
+#print ("size", acquisition_data.shape)
+print ("horiz", detector_horiz)
+print ("vert", detector_vert)
+print ("angles", angles_num)
+
+tp_acq_geometry = AcquisitionGeometry(geom_type='parallel', dimension='3D',
+ angles=angles,
+ pixel_num_h=detector_horiz,
+ pixel_num_v=detector_vert,
+ channels=1,
+ )
+
+acq_data = tp_acq_geometry.allocate()
+#print (tp_acq_geometry)
+print ("AcquisitionData", acq_data.shape)
+print ("TomoPhantom", tp_acq_data.shape, tp_acq_data.dimension_labels)
+
+default_acquisition_axes_order = ['angle', 'vertical', 'horizontal']
+
+acq_data2 = tp_acq_data.subset(dimensions=default_acquisition_axes_order)
+print ("AcquisitionData", acq_data2.shape, acq_data2.dimension_labels)
+print ("AcquisitionData {} TomoPhantom {}".format(id(acq_data2.as_array()),
+ id(acquisition_data_array)))
+
+fig = plt.figure()
+plt.subplot(1,2,1)
+plt.imshow(acquisition_data_array[20])
+plt.title('Sinogram')
+plt.subplot(1,2,2)
+plt.imshow(tp_acq_data.as_array()[20])
+plt.title('Sinogram + noise')
+plt.show()
+
+# Set up Operator object combining the ImageGeometry and AcquisitionGeometry
+# wrapping calls to CCPi projector.
+
+ig = ImageGeometry(voxel_num_x=detector_horiz,
+ voxel_num_y=detector_horiz,
+ voxel_num_z=detector_vert)
+A = CCPiProjectorSimple(ig, tp_acq_geometry)
+# Forward and backprojection are available as methods direct and adjoint. Here
+# generate test data b and some noise
+
+#b = A.direct(Phantom)
+b = acq_data2
+
+#z = A.adjoint(b)
+
+
+# Using the test data b, different reconstruction methods can now be set up as
+# demonstrated in the rest of this file. In general all methods need an initial
+# guess and some algorithm options to be set. Note that 100 iterations for
+# some of the methods is a very low number and 1000 or 10000 iterations may be
+# needed if one wants to obtain a converged solution.
+x_init = ImageData(geometry=ig,
+ dimension_labels=['horizontal_x','horizontal_y','vertical'])
+X_init = BlockDataContainer(x_init)
+B = BlockDataContainer(b,
+ ImageData(geometry=ig, dimension_labels=['horizontal_x','horizontal_y','vertical']))
+
+# setup a tomo identity
+Ibig = 4e1 * TomoIdentity(geometry=ig)
+Ismall = 1e-3 * TomoIdentity(geometry=ig)
+Iok = 7.6e0 * TomoIdentity(geometry=ig)
+
+# composite operator
+Kbig = BlockOperator(A, Ibig, shape=(2,1))
+Ksmall = BlockOperator(A, Ismall, shape=(2,1))
+Kok = BlockOperator(A, Iok, shape=(2,1))
+
+#out = K.direct(X_init)
+#x0 = x_init.copy()
+#x0.fill(numpy.random.randn(*x0.shape))
+#lipschitz = PowerMethodNonsquare(A, 5, x0)
+#print("lipschitz", lipschitz)
+
+#%%
+
+simplef = Norm2sq(A, b, memopt=False)
+#simplef.L = lipschitz[0]/3000.
+simplef.L = 0.00003
+
+f = Norm2sq(Kbig,B)
+f.L = 0.00003
+
+fsmall = Norm2sq(Ksmall,B)
+fsmall.L = 0.00003
+
+fok = Norm2sq(Kok,B)
+fok.L = 0.00003
+
+print("setup gradient descent")
+gd = GradientDescent( x_init=x_init, objective_function=simplef,
+ rate=simplef.L)
+gd.max_iteration = 5
+simplef2 = Norm2sq(A, b, memopt=True)
+#simplef.L = lipschitz[0]/3000.
+simplef2.L = 0.00003
+print("setup gradient descent")
+gd2 = GradientDescent( x_init=x_init, objective_function=simplef2,
+ rate=simplef2.L)
+gd2.max_iteration = 5
+
+Kbig.direct(X_init)
+Kbig.adjoint(B)
+print("setup CGLS")
+cg = CGLS()
+cg.set_up(X_init, Kbig, B )
+cg.max_iteration = 10
+
+print("setup CGLS")
+cgsmall = CGLS()
+cgsmall.set_up(X_init, Ksmall, B )
+cgsmall.max_iteration = 10
+
+
+print("setup CGLS")
+cgs = CGLS()
+cgs.set_up(x_init, A, b )
+cgs.max_iteration = 10
+
+print("setup CGLS")
+cgok = CGLS()
+cgok.set_up(X_init, Kok, B )
+cgok.max_iteration = 10
+# #
+#out.__isub__(B)
+#out2 = K.adjoint(out)
+
+#(2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b )
+
+
+for _ in gd:
+ print ("GradientDescent iteration {} {}".format(gd.iteration, gd.get_last_loss()))
+#gd2.run(5,verbose=True)
+print("CGLS block lambda big")
+cg.run(10, lambda it,val: print ("CGLS big iteration {} objective {}".format(it,val)))
+
+print("CGLS standard")
+cgs.run(10, lambda it,val: print ("CGLS standard iteration {} objective {}".format(it,val)))
+
+print("CGLS block lambda small")
+cgsmall.run(10, lambda it,val: print ("CGLS small iteration {} objective {}".format(it,val)))
+print("CGLS block lambdaok")
+cgok.run(10, verbose=True)
+# # for _ in cg:
+# print ("iteration {} {}".format(cg.iteration, cg.get_current_loss()))
+# #
+# # fig = plt.figure()
+# # plt.imshow(cg.get_output().get_item(0,0).subset(vertical=0).as_array())
+# # plt.title('Composite CGLS')
+# # plt.show()
+# #
+# # for _ in cgs:
+# print ("iteration {} {}".format(cgs.iteration, cgs.get_current_loss()))
+# #
+Phantom = ImageData(phantom_tm)
+
+theslice=40
+
+fig = plt.figure()
+plt.subplot(2,3,1)
+plt.imshow(numpy.flip(Phantom.subset(vertical=theslice).as_array(),axis=0), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Simulated Phantom')
+plt.subplot(2,3,2)
+plt.imshow(gd.get_output().subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Simple Gradient Descent')
+plt.subplot(2,3,3)
+plt.imshow(cgs.get_output().subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Simple CGLS')
+plt.subplot(2,3,5)
+plt.imshow(cg.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Composite CGLS\nbig lambda')
+plt.subplot(2,3,6)
+plt.imshow(cgsmall.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Composite CGLS\nsmall lambda')
+plt.subplot(2,3,4)
+plt.imshow(cgok.get_output().get_item(0).subset(vertical=theslice).as_array(), cmap='gray')
+plt.clim(0,0.7)
+plt.title('Composite CGLS\nok lambda')
+plt.show()
+
+
+#Ibig = 7e1 * TomoIdentity(geometry=ig)
+#Kbig = BlockOperator(A, Ibig, shape=(2,1))
+#cg2 = CGLS(x_init=X_init, operator=Kbig, data=B)
+#cg2.max_iteration = 10
+#cg2.run(10, verbose=True)